From 4b6175d68606d3d4dd638c9bd8cb3069101d5417 Mon Sep 17 00:00:00 2001 From: win gutmann Date: Thu, 26 Mar 2026 17:49:14 -0400 Subject: [PATCH 1/2] major grafana updates --- .../console-2026-03-26T21-06-47-084Z.log | 2 + .../console-2026-03-26T21-07-25-658Z.log | 3 + .../console-2026-03-26T21-11-17-962Z.log | 2 + .../console-2026-03-26T21-11-55-773Z.log | 1 + .../console-2026-03-26T21-12-23-787Z.log | 2 + cache-context-dashboard.png | Bin 0 -> 37142 bytes log-sentinel-dashboard.png | Bin 0 -> 80630 bytes observability/local/docker-compose.yml | 30 + .../dashboards/claude-cache-context.json | 844 +++++++++ .../dashboards/claude-token-cost.json | 1574 +++++++++++++++++ .../dashboards/claude-token-usage.json | 893 ---------- .../dashboards/contextstream-deep-dive.json | 432 ----- .../dashboards/simsteward-log-sentinel.json | 541 ++++++ observability/local/log-sentinel/Dockerfile | 7 + .../circuit_breaker.cpython-313.pyc | Bin 0 -> 3049 bytes .../__pycache__/config.cpython-313.pyc | Bin 0 -> 3032 bytes .../grafana_client.cpython-313.pyc | Bin 0 -> 3326 bytes .../__pycache__/loki_client.cpython-313.pyc | Bin 0 -> 10175 bytes .../__pycache__/loki_handler.cpython-313.pyc | Bin 0 -> 4429 bytes .../__pycache__/models.cpython-313.pyc | Bin 0 -> 6028 bytes .../__pycache__/query_cache.cpython-313.pyc | Bin 0 -> 7343 bytes .../__pycache__/sentry_client.cpython-313.pyc | Bin 0 -> 5970 bytes observability/local/log-sentinel/app.py | 64 + .../local/log-sentinel/circuit_breaker.py | 51 + observability/local/log-sentinel/config.py | 25 + .../local/log-sentinel/detectors/__init__.py | 0 .../__pycache__/__init__.cpython-313.pyc | Bin 0 -> 196 bytes .../action_failure.cpython-313.pyc | Bin 0 -> 3914 bytes .../__pycache__/agent_loop.cpython-313.pyc | Bin 0 -> 5879 bytes .../__pycache__/base.cpython-313.pyc | Bin 0 -> 1032 bytes .../claude_session.cpython-313.pyc | Bin 0 -> 3692 bytes .../__pycache__/error_spike.cpython-313.pyc | Bin 0 -> 2092 bytes .../__pycache__/flow_gap.cpython-313.pyc | Bin 0 -> 2892 bytes .../incident_anomaly.cpython-313.pyc | Bin 0 -> 4586 bytes .../__pycache__/mcp_health.cpython-313.pyc | Bin 0 -> 4560 bytes .../plugin_lifecycle.cpython-313.pyc | Bin 0 -> 4987 bytes .../resource_health.cpython-313.pyc | Bin 0 -> 3968 bytes .../sentinel_health.cpython-313.pyc | Bin 0 -> 3486 bytes .../session_quality.cpython-313.pyc | Bin 0 -> 3700 bytes .../silent_session.cpython-313.pyc | Bin 0 -> 2058 bytes .../__pycache__/stuck_user.cpython-313.pyc | Bin 0 -> 4322 bytes .../__pycache__/token_usage.cpython-313.pyc | Bin 0 -> 3801 bytes .../__pycache__/tool_patterns.cpython-313.pyc | Bin 0 -> 6283 bytes .../websocket_health.cpython-313.pyc | Bin 0 -> 2856 bytes .../log-sentinel/detectors/action_failure.py | 103 ++ .../log-sentinel/detectors/agent_loop.py | 133 ++ .../local/log-sentinel/detectors/base.py | 16 + .../log-sentinel/detectors/claude_session.py | 98 + .../log-sentinel/detectors/error_spike.py | 46 + .../local/log-sentinel/detectors/flow_gap.py | 48 + .../detectors/incident_anomaly.py | 107 ++ .../log-sentinel/detectors/mcp_health.py | 119 ++ .../detectors/plugin_lifecycle.py | 140 ++ .../log-sentinel/detectors/resource_health.py | 97 + .../log-sentinel/detectors/sentinel_health.py | 86 + .../log-sentinel/detectors/session_quality.py | 96 + .../log-sentinel/detectors/silent_session.py | 53 + .../log-sentinel/detectors/stuck_user.py | 93 + .../log-sentinel/detectors/token_usage.py | 105 ++ .../log-sentinel/detectors/tool_patterns.py | 130 ++ .../detectors/websocket_health.py | 71 + .../local/log-sentinel/flows/__init__.py | 0 .../__pycache__/__init__.cpython-313.pyc | Bin 0 -> 192 bytes .../flows/__pycache__/engine.cpython-313.pyc | Bin 0 -> 5203 bytes .../flows/definitions/capture_incident.yml | 25 + .../flows/definitions/review_incident.yml | 34 + .../flows/definitions/session_health.yml | 30 + .../flows/definitions/transport_controls.yml | 24 + .../flows/definitions/walk_driver.yml | 33 + .../flows/definitions/walk_session.yml | 33 + .../local/log-sentinel/flows/engine.py | 85 + .../local/log-sentinel/grafana_client.py | 49 + .../log-sentinel/investigator/__init__.py | 0 .../__pycache__/__init__.cpython-313.pyc | Bin 0 -> 199 bytes .../__pycache__/chain.cpython-313.pyc | Bin 0 -> 12150 bytes .../__pycache__/knowledge.cpython-313.pyc | Bin 0 -> 2602 bytes .../__pycache__/prompts.cpython-313.pyc | Bin 0 -> 5749 bytes .../local/log-sentinel/investigator/chain.py | 232 +++ .../log-sentinel/investigator/knowledge.py | 47 + .../log-sentinel/investigator/prompts.py | 142 ++ .../local/log-sentinel/loki_client.py | 176 ++ .../local/log-sentinel/loki_handler.py | 66 + observability/local/log-sentinel/models.py | 100 ++ .../local/log-sentinel/query_cache.py | 102 ++ .../local/log-sentinel/requirements.txt | 6 + observability/local/log-sentinel/sentinel.py | 377 ++++ .../local/log-sentinel/sentry_client.py | 100 ++ .../local/log-sentinel/tests/__init__.py | 0 .../local/logs/claude-session-metrics.jsonl | 1 + token-cost-dashboard.png | Bin 0 -> 117274 bytes 90 files changed, 6349 insertions(+), 1325 deletions(-) create mode 100644 .playwright-mcp/console-2026-03-26T21-06-47-084Z.log create mode 100644 .playwright-mcp/console-2026-03-26T21-07-25-658Z.log create mode 100644 .playwright-mcp/console-2026-03-26T21-11-17-962Z.log create mode 100644 .playwright-mcp/console-2026-03-26T21-11-55-773Z.log create mode 100644 .playwright-mcp/console-2026-03-26T21-12-23-787Z.log create mode 100644 cache-context-dashboard.png create mode 100644 log-sentinel-dashboard.png create mode 100644 observability/local/grafana/provisioning/dashboards/claude-cache-context.json create mode 100644 observability/local/grafana/provisioning/dashboards/claude-token-cost.json delete mode 100644 observability/local/grafana/provisioning/dashboards/claude-token-usage.json delete mode 100644 observability/local/grafana/provisioning/dashboards/contextstream-deep-dive.json create mode 100644 observability/local/grafana/provisioning/dashboards/simsteward-log-sentinel.json create mode 100644 observability/local/log-sentinel/Dockerfile create mode 100644 observability/local/log-sentinel/__pycache__/circuit_breaker.cpython-313.pyc create mode 100644 observability/local/log-sentinel/__pycache__/config.cpython-313.pyc create mode 100644 observability/local/log-sentinel/__pycache__/grafana_client.cpython-313.pyc create mode 100644 observability/local/log-sentinel/__pycache__/loki_client.cpython-313.pyc create mode 100644 observability/local/log-sentinel/__pycache__/loki_handler.cpython-313.pyc create mode 100644 observability/local/log-sentinel/__pycache__/models.cpython-313.pyc create mode 100644 observability/local/log-sentinel/__pycache__/query_cache.cpython-313.pyc create mode 100644 observability/local/log-sentinel/__pycache__/sentry_client.cpython-313.pyc create mode 100644 observability/local/log-sentinel/app.py create mode 100644 observability/local/log-sentinel/circuit_breaker.py create mode 100644 observability/local/log-sentinel/config.py create mode 100644 observability/local/log-sentinel/detectors/__init__.py create mode 100644 observability/local/log-sentinel/detectors/__pycache__/__init__.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/action_failure.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/agent_loop.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/base.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/claude_session.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/error_spike.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/flow_gap.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/incident_anomaly.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/mcp_health.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/plugin_lifecycle.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/resource_health.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/sentinel_health.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/session_quality.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/silent_session.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/stuck_user.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/token_usage.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/tool_patterns.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/__pycache__/websocket_health.cpython-313.pyc create mode 100644 observability/local/log-sentinel/detectors/action_failure.py create mode 100644 observability/local/log-sentinel/detectors/agent_loop.py create mode 100644 observability/local/log-sentinel/detectors/base.py create mode 100644 observability/local/log-sentinel/detectors/claude_session.py create mode 100644 observability/local/log-sentinel/detectors/error_spike.py create mode 100644 observability/local/log-sentinel/detectors/flow_gap.py create mode 100644 observability/local/log-sentinel/detectors/incident_anomaly.py create mode 100644 observability/local/log-sentinel/detectors/mcp_health.py create mode 100644 observability/local/log-sentinel/detectors/plugin_lifecycle.py create mode 100644 observability/local/log-sentinel/detectors/resource_health.py create mode 100644 observability/local/log-sentinel/detectors/sentinel_health.py create mode 100644 observability/local/log-sentinel/detectors/session_quality.py create mode 100644 observability/local/log-sentinel/detectors/silent_session.py create mode 100644 observability/local/log-sentinel/detectors/stuck_user.py create mode 100644 observability/local/log-sentinel/detectors/token_usage.py create mode 100644 observability/local/log-sentinel/detectors/tool_patterns.py create mode 100644 observability/local/log-sentinel/detectors/websocket_health.py create mode 100644 observability/local/log-sentinel/flows/__init__.py create mode 100644 observability/local/log-sentinel/flows/__pycache__/__init__.cpython-313.pyc create mode 100644 observability/local/log-sentinel/flows/__pycache__/engine.cpython-313.pyc create mode 100644 observability/local/log-sentinel/flows/definitions/capture_incident.yml create mode 100644 observability/local/log-sentinel/flows/definitions/review_incident.yml create mode 100644 observability/local/log-sentinel/flows/definitions/session_health.yml create mode 100644 observability/local/log-sentinel/flows/definitions/transport_controls.yml create mode 100644 observability/local/log-sentinel/flows/definitions/walk_driver.yml create mode 100644 observability/local/log-sentinel/flows/definitions/walk_session.yml create mode 100644 observability/local/log-sentinel/flows/engine.py create mode 100644 observability/local/log-sentinel/grafana_client.py create mode 100644 observability/local/log-sentinel/investigator/__init__.py create mode 100644 observability/local/log-sentinel/investigator/__pycache__/__init__.cpython-313.pyc create mode 100644 observability/local/log-sentinel/investigator/__pycache__/chain.cpython-313.pyc create mode 100644 observability/local/log-sentinel/investigator/__pycache__/knowledge.cpython-313.pyc create mode 100644 observability/local/log-sentinel/investigator/__pycache__/prompts.cpython-313.pyc create mode 100644 observability/local/log-sentinel/investigator/chain.py create mode 100644 observability/local/log-sentinel/investigator/knowledge.py create mode 100644 observability/local/log-sentinel/investigator/prompts.py create mode 100644 observability/local/log-sentinel/loki_client.py create mode 100644 observability/local/log-sentinel/loki_handler.py create mode 100644 observability/local/log-sentinel/models.py create mode 100644 observability/local/log-sentinel/query_cache.py create mode 100644 observability/local/log-sentinel/requirements.txt create mode 100644 observability/local/log-sentinel/sentinel.py create mode 100644 observability/local/log-sentinel/sentry_client.py create mode 100644 observability/local/log-sentinel/tests/__init__.py create mode 100644 observability/local/logs/claude-session-metrics.jsonl create mode 100644 token-cost-dashboard.png diff --git a/.playwright-mcp/console-2026-03-26T21-06-47-084Z.log b/.playwright-mcp/console-2026-03-26T21-06-47-084Z.log new file mode 100644 index 0000000..c4778cd --- /dev/null +++ b/.playwright-mcp/console-2026-03-26T21-06-47-084Z.log @@ -0,0 +1,2 @@ +[ 885ms] [WARNING] is deprecated. Please include @ http://localhost:3000/login:0 +[ 23600ms] [ERROR] Failed to load resource: the server responded with a status of 401 (Unauthorized) @ http://localhost:3000/login:0 diff --git a/.playwright-mcp/console-2026-03-26T21-07-25-658Z.log b/.playwright-mcp/console-2026-03-26T21-07-25-658Z.log new file mode 100644 index 0000000..6b1e95a --- /dev/null +++ b/.playwright-mcp/console-2026-03-26T21-07-25-658Z.log @@ -0,0 +1,3 @@ +[ 292ms] [WARNING] is deprecated. Please include @ http://localhost:3000/login:0 +[ 15683ms] [ERROR] Failed to load resource: the server responded with a status of 401 (Unauthorized) @ http://localhost:3000/login:0 +[ 49060ms] [ERROR] Failed to load resource: the server responded with a status of 401 (Unauthorized) @ http://localhost:3000/login:0 diff --git a/.playwright-mcp/console-2026-03-26T21-11-17-962Z.log b/.playwright-mcp/console-2026-03-26T21-11-17-962Z.log new file mode 100644 index 0000000..1b67327 --- /dev/null +++ b/.playwright-mcp/console-2026-03-26T21-11-17-962Z.log @@ -0,0 +1,2 @@ +[ 602ms] [WARNING] is deprecated. Please include @ http://localhost:3000/login:0 +[ 18682ms] [WARNING] is deprecated. Please include @ http://localhost:3000/d/claude-token-cost?orgId=1&from=now-7d&to=now&kiosk:0 diff --git a/.playwright-mcp/console-2026-03-26T21-11-55-773Z.log b/.playwright-mcp/console-2026-03-26T21-11-55-773Z.log new file mode 100644 index 0000000..a28d881 --- /dev/null +++ b/.playwright-mcp/console-2026-03-26T21-11-55-773Z.log @@ -0,0 +1 @@ +[ 648ms] [WARNING] is deprecated. Please include @ http://localhost:3000/d/claude-cache-context?orgId=1&from=now-7d&to=now:0 diff --git a/.playwright-mcp/console-2026-03-26T21-12-23-787Z.log b/.playwright-mcp/console-2026-03-26T21-12-23-787Z.log new file mode 100644 index 0000000..c11747f --- /dev/null +++ b/.playwright-mcp/console-2026-03-26T21-12-23-787Z.log @@ -0,0 +1,2 @@ +[ 636ms] [WARNING] is deprecated. Please include @ http://localhost:3000/d/simsteward-log-sentinel?orgId=1&from=now-6h&to=now:0 +[ 59082ms] [ERROR] WebSocket connection to 'ws://localhost:3000/api/live/ws' failed: Connection closed before receiving a handshake response @ http://localhost:3000/public/build/3855.c53eb219979d7cb3b2d4.js:1312 diff --git a/cache-context-dashboard.png b/cache-context-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..000cc302b989f9e58a4fbd9976fe49aa327efa42 GIT binary patch literal 37142 zcmb5VbyQoy`|eAXwiGMHCB=)oOIn;_#R|nz9Euf}04b%o6?a0>0>#~u;_j}&AxLn7 zC0xGeoZr3c-h0kj_x?56d+qh^cV>2G=K0JsVIS2M@n2BAz`?=6S5}hK!ohiXf`jv5 z<=MmgJ4%FfWH>lPILdPGb-Xh6m!2ExoG$~95c`h=>;?Yzvy`kn3=Pjo3D(RDQYo;s zLTZm`+oC@#l`P%q9>#kd;)BPVL`oD52m9@;lB{A2lu^_T->crFaCXaGmTRd ztCXis5N*UAgR?kv=40&Om#J0s1_w>iN%dV=a3UcgK~O}5_vFoPOAYFd?2B-UvS+Nag+-W%}U3doi)kkHs0|Y-pEs zwNgKDPMrk#h%1zA)xEt8r_wY|Rm$I2k*oCm>)oYy6vsDBM6ivK#Q#sGHX%=`VsP?L ziAa;bxpig>?93fMj5Xn$?$2~LdEa0BO4BwvuYNU{wbqjl@~qkMV_`^6 zDUd&c`nSo&CVYc&g(Ae3(ke8i!`3f++G3t?!ncDU9rr%eN)pXJtwQw6lJfH0+WPi( zUzg;Fv+7LpEt9{ccEIsyjti2e8ptg6vp8SoroJC`&Q3~Pt@g0e>QAU00o%c8Km4SY zbPInZ~|8D(B*8o;LNZDFtGXU&|mm2ZeC_0687hV!XewUyM*!aWt)^ zO|-YT@f7S4K5c&9#`W?6$^-arH9UiZ8OJl~GKB7T zS4{cc>66Clv8cTC-OWu`e%O{32r_1#_Jg^`qe~*-RPGdIjP)`LX zMu}wClN+bPf+^~VcgOS3u1?9vpfS?^Q2qHzb48n^oJWj`kp;l9F>OwLnw)$p)|e!9 zX#c0=yT1+g{+HQqaw)TWTYWqGmSXdUWcJs~A<0FZI$uMT6n_qCgs2Kk2A_VwIV{z@ zos(@`w=AgjH7wwmM(%h${6?O09AuStumE&`?3ne4ii-|s@RjU+-kextY5K!|;dHfq zbLYQ}iSfvrQ4ZEfrBy(ED)jKbJWd+X>dSVezmMxdgF4ed0^Uo%glXW`Sgg>;`fjFeE4#KE`LC8Afr6?-x$1 z%o&wrz0-~)98x_i{0)mH3n7!lzOH<%h~|cOCE8L)3tFZ^b~>avJO6?pG&BJLjQpcN zhWgk$il|z>ZxM`Mt}SYsR?&bM^&^J|+pQ(nYb+i8DhAh_ee^h<OD(Gq`Gtw)LZHr7h`-cd+F=3mSotgxKnlg~D#IsTGLRVTPKWrW zkmKY9RMjeXVhd~lge+$)biwf z^Xmpmy8kR>v>ns{o-H+24cDnti5~=n!ML@?bxgcgdjs&8yDQDJ_K7wfhNbpjO(s&FAlCerX+G`@{3|x<++;-&`3_3aoPM96=KWCQndGhBcL<}epe-}NRj=@lMGiN%>eDc zrsS9y9!#s>O<{e8%T&LRo0&m%qjSxyr@hhHTf)juu@H}uwwFPN7(7<*(S3f)KHnIS zXI?MByRG^}Kf>wk?Fw&cNZQsc=^AGbEE}OV7)LFA3 zHKw5olxgNR)21AP_NPP+;ZRuQ~ zKG2vzR$pCt3!aS@gmZ5V7u&okYs1U&GrW5k`j~9mya~3=&1wQHuyAJh=A?yKPNcn zE7<}>{ZR1}k_LL5c$s1{I$6nyfHNK$8Ub+1rLQ7qGSsY=NLXE;BGL! z(tkcDa=`nA@=8qz-5FoGXeJA7EQR4qME1Z2{Em>JhQYT}+OqV7$n=Q*3E-X`t@v?Cah54r0+PLJ{ zS@-BV3Y;BELvS>iL>jWF3SSu>tu-A?W;2|Xs||8k*TX^T9E5IqpEqLD?$D;bkxY= zX6!&Acs5I1o^S{-T<^)0-_519d2qL(xc;gi0OoVtIU;fqIt}K+%MzT|1s^2yz#Hu| zB(_(Rt%B5_OPshj9ekQi)My2a+Pr0w=(fopTVvBRu9`;GD@Pe^nPs&f9c8Ft;Je<=1!>MsA-ys($wzli?P9(uB) z^U}l#fwj#%rY#p?G4}8!??#Bs=SvD*4UnkTZfYT4gYu21ra)UB~mP zdgMZn3O6J+Yvt#l*R?5hH-Sq+9p4vm+DtK#y*}rm_B!p6&1SA&$%};o*V9Nr5jjPT zR3Kz5ns!0SuCH^=*v6^*Y4Sy7a9Kh(TN?W;N@&0L={gYT!~+<}IvKk7$WrQ;G* zUg6>?UcEjsGX5>$Nms|y0%%3$$hXSz$wqGi2Tn5NacHp$1Qq#1MQz9kMwS-61P%lVR-Cb_0cpBT>(HU4Q7?OD7L zJuRd?_z|HsM)iTeWN$rqx#qJA4{L0($vBjz2Pj)AGCr#XwV&U?MSFEaBN?qI0uRVn|BMMHoV%we%kUTutDHH^~vtq(n zKDBLW@?6j27}OqKbzN$#wMNrVoDnRfO^l)rYku#YhS?*IsjH_mInm*J6v!jVyZC$e2g=S{kGo(`s3yI+Kwjl(vtscl#Pm*C%emi1iqy0}S&oiv-y0zr2LzUF z;&!qOpu2k*f`sE=6TX4m6dDh*CXK%ubRRd=L)+i7Vi3JSkPLNyf6tpu4~^Q5eX)MX zQClP4fb@LUvP?Dytq*im9Dhhqitw#{6=jX8?AXiq@jqJw&2LWYW9SZ3X6LqeBS4uk zQY7KnQC`o~PmbLA69(j^6Qh`S^#LfUa{tMYny+O_s*8^a80G8*MA#;#0h;57I?S{M zc~eyPi(T;=gY{vN+Gfk5fyVBjMHhL$t1|;(%@NZV?`aImRw>F^CEFqff)m#iWT4?E z8yTEUe9hlpx=CjG{~#xoDqc2X&;5SB~Pw*s)s4f1tF%T7QcqXfu9-LiEW zw8qY$r-q62dnid7)Ho%zmzAtLmW}_bDNM<(mi6Cfnb+Q=c|EV$@m-1C421Y#TF+^B z9iF$f@g@903HBjqlFg>t3c5z)P*`@CNmjlY3bG_8oCM;Z@+?{kU5Ca*} zN}1hK9G|;#b?&v@8PFKDT2DBdFOcZHyZ$idb-aK0qXeFRXdrhf4eukR0sME-#`jJ(? zSQbG+j(S3j?e%Y$ns;qWUk{fLLytKmFfeu3mzeC|K)00EcOVfpoxJR@k3%{JMf zJtdNeIDEwS;C?Yt7?~L$1Ms=dF9f_4QfC1G(fQk3>K5p9&aZ8*BU}_btbX5CWvCqd z)5Kh-2Pu~@qmZX=Rq5)1#kbyw*sn2)cbD@Zle>v=UjHqpn^e_!@OjfTdTb_{E3#>G zcR*mIz%mIT3ENzZHtyMN0r;N;n3_HO^n)eQyhq#C#|A;0;y+w-$KFMz6HQzHEtg}6 zTd=L-;A2muE_a{@Bj5AU?1ZO|8?~wbt-Kscpu*8u{3Oo)dR80376-+F=t4cpe zGW0nLVT@DE0r=Q%$PUUn7J_1jYZdvlp=%XH5SQZ@pi}H&*hw zl0?mX*3$)Dxo;QIK|d6U!*9q#o)D_0+?mxJn>uv=K8!|h$qnwBEAn_r^7_nA3=|@D zNs*MKWo>!M?BV^bW+0!b%ag<>Aq2FIV!DvHknZui8HH%vUH(#N-Bce%j|taO6P-`hnrOm5Hs!3VOSHO*;}s zw|mxySoULztsKW@tjRzSZTevA>$);q5%>P-ddG~`lSQWxCkp3LNk8ZN8`{tG$z3O6 zuEvE9(#LHovZLO+wOEdzteS>ZBY$h!{;UM^JGkY#Gu})`OYgrk)ocsRGEOSecDH1J zW>a5}hKl`IW$-lHm+;MF8<5PB&M>2vgXWMd{^yTKgZ}MD2+C3quHP^+Vyn^vrsWlRN8~AvQDbtH=GW4l?P=yeL#gAn#c;=X z@+{wtiPSHDuID}I+uYCpm5M)_xIJ?wsYQuSsnV-nRC)L)5HB2cEx~9&22OG*ER@F2 z`2IaRBjc~hkW6~OY+1jc^=pc9h6(J}^Zhx)qyzm96KHS&W!*=)^yOLU_Dq6buoFT% zUm>?a`{rjCzlXD(ChKb`1g+CfVRrU6vl*7;($n3^R#a(DUNqXo7M|-@o&v+01q(|q zT$o)-eVu{d4>n84)1@*~6#i@y7UevwrG^k^^$Hm)ru0dj8nRrbr~TQ{K0`ON^$LFT zEOC-*tj8$TsmsEvJOy%#h2a$?5s`n2@$ekWnBH>W`A})^Jb(eVyUxtyUm;Rr9T{@| z3^S5VM&R=zePbTD+5UQ@1|3_eGVc+-kjo?RY|eXc{l?KlA+>js?RjugHz>?Y5Y^`N})u4HYhfGD_)A7{XE=jaQh=QVQRt`!je<%8GA$Y zB(k+*j5R@1i#b#U2exL}(X<-(y{h&7-J@x)+UC7v{g?{pb)(uRh5k|&VeDa_3ddgI z85Q9DD;~9wUB_F{Oit3U2}h>4tzBSuGmYtrgq%fp9--`4+R$dq@#s5b|vv4)cXU0|}ft3oaPkPu;l2WE2aFqPO zVn*m02Rj=P4e49fs4vCy27zTV3tno*lyBY6Nt#j$HQBgFW3zl&OOM%w1Se=(7qv{@ zAXnKriR?rlwi6?x!n!}|be+2h<5yFL(E57<`0r`!zt%EIJr|kWE8*)58H84zsln!{ z;8Zj9NIGjaA=2quCw#3cfuq1ZEz6q9ELPu@Azj4zwJUzQlUCQ=O;l7No~wg75Bbth zq9MSR@WkqR>O(5eXaAv?r}u*Je78jQVL~*Ws#5Q#M37#+@!t=2U$o@13fN2EeU4*9 znl8krJ~0~_M#!D@+*bUF8^Ch^U30m#Xn&?Q9@5(T`998lOaPqu)6N;l>E^z@8E&hM<^#45d?(YMfhFq~(iAWmCPau#U z;v+~uMFaJ6|I^rmCR4YPox0fftDBp$kaYATKc)K4wzkS_UU*wW#IwbP`?SCOYNgmIE8(hu`7-Y4os!jx4ZIoo`< za!bdA@sY#Km!=~Bk0WPp75Z_w+kky4gNG2V(lnFFi28Dp!_ybz$C zmcJC*z%;Sgy{2|8*sBXn(Plws!M?Y+2Tg`ZnZ=N`hXw@d$?#Q|t81`LcXL zk94oU%_xGLC!b`8OzL8E=UogUnH!W1tPy?RGbW<>) zmJ~erv6Zq=rR$c6M+fO|ah+wrzi+ZA$H3%5G#2W=b7vK50rT&;T~hjNHN6=Pne;%> zzh@MO>_#iv*}Ewco2QmyPo9{$UsKri4-8zB*|D?tD+A=Y{QPu#6VFd)Xzi&*q@AZq z82J%u`mUji$8?}Nk1WUyajh6wq8nvr!Gd;10n@1;DIB?aHLGFER_hqhZwpMMEu zgi_Rq^Ly|s8bUU=M=tWuxwFqqTRqcfZvc64+iY+5tG&0gi?5;x1i5`NP@dUoGGVDv z(`#9{#RwE0iX9DqLTO~58SMCIiPz($8ln9J#ZvJ$6zn}(7mmVDGoBamuapQ`l4+vE z;OWcuNL|cMP9OeWM}hxrY|JdgkW~{7LFEUvwE^s6V;7Ug0d>=3M~Zh+OJewGNmD)g zr>FwRIN)Jx!UB~jQ`T_=_Lr#ZCTo@Fb}1Z>f6jr0N(?gn7@cGn0>3%72?+O(prQtf zc(gADaRHowe(8JEvW3GC-p+#ETNo{F+2~L9^K@}ftBuI6wEh?^RDniQKzGvDS;8p2 z#{yI1&VG4W!drgE_SaC3;C?mE@n$}NJc;W%MU1j zP%*_R{!Uxx=28ZoVk@8!eD1v+%Ay`~9LrvQ*J6%H{kl5as3El@;^;G&I--RvjB?$7 zv@R(>B!&x{b7pF^p>OF2ilJ#@hoEMsqhVsZwQPJ-mN?-i@Dv@~qZTiP##e}-Qv z4RPtsZUetqj$wy5PJ+@A5n%h&ncr$b-L*Vw8A8Im?+Vxk#BStIwgPKQ4QmCDk9Ss= z-RuMi9hNSKuIfBko$c4=3t8}!*~{`AZ?+4}w^$p9PeB@y)SK$;!-c+SE$carZhqrXAM}(hwxNnFgCtBRr-C58HL}=_UQf%)|(uT~L zKR*5Fa}Xi59TmgW0mz^vU16a=53=@e{GG*l5uHX%vWsMKWaDW{NNZ;*j?O8FaklNl zIt~OFT~w~kua0PC>ZuZ@(8RSTndH$=kgyQACcl#RZkwBgjwI9!&zGMslBEJjoRN!kEtNqhaydPH(jkp5Sk%InRg};B7 zJl|m%vwbQ)O(WrvhskQZnjK7XNb#GUfiaGE-{NhT8{M40Jmo78uSS2}sCs6tXJ9|_ zW?8et*qdv{f5AEHD!sJ@ee~_{b4P2>Wt^XPUdaelB97D(Sd~-q@`lOr7G2yxKZR5z zD46k4Ra>lZw*7PboUr`+_njwU^tG&nv|RD(DyqvZ9s!2fu}8SBO8*8)orXk4VBWeJ z>9!XQqzm!eDo%~VJ3DMOs1Mg~%?FS)Yhn!11RvIB?Dd7fKP!%Qu(ISOOXhma<)QrQ zB#CEeT>)R}iI{z08qZ6^8h?j{PtO&3KD^Y-0)Bu#Q&TcX-kG}od6S^)y7)l@pv?7J zj??F~BhPu?<}jbSjL%8VGv?>?#efQJDI?^ zZ5P}*;Xns2AxA_Qv%c`Cogo8B`F7ySO^q0e{Jq)lR+W`BBOmnRomAHR#>`a`ekkRQ zH7DunK5(Vm-C|3wQSX*?vuc(Y7av`tG+%PFq||fSrDYj;;Vj6*$+Q}D74LhctH*6r zx^|6p_VS}^GBeZn^g!M9J>vlJE|)Z>+{~#<{6?v{^4v$(9Hh!%>P^4M5Qce z<+22TYvg`*&16O5!GOb4Q&CUT5}j^iDZn$W&Bsu3o`UL$(SH2QNCG08sZ~{I+5Idf z`&7aid_Nh%Ib9(pb<{5wlG_<(O{8l+POkQ}yqK4id-_U>uG;wPEL6P^X3_j*z&bR<<-|hZVt)6~dh0aAUo3j=*MG5q%)i5} zI%S>&f)K2mRW_mkRk>8Q;CT&;+OcY>TZ_5?=R#3A+#N5KEyn95;^w1>UW66B<~uL) zf}HoMm6v_sBjbv0g}JKj$59%_T~I;4gOm3H3+~JggRBE>r$ZsMVXK#z#N# zOw1yqmCs#mElpr>W4v9Yo+lH=Y2R~=g3FI@8P12QAYx!btQ_UK9APme7@2fGvCW0j zp7HuZ3SfUq6b&7is(L z!hrn8Cdk;HVN~;Kk7YU{UEGOYtTj_rB@*u=bndJ@EpU)CJ*Ir_BKcDOI2|#o(GR(C zR8$*^USf@EPVqgmylqH5%dh0*l4YsQYV)Bf(Bn~o{Vz_M)FmB7l#621XE9phg6{J zU;~cxX4nW!)aEa@v5yT>AXl#2TCQ^aQ71F2up{uRs(PwG7Q|(vGYnit3pe#o)ln7X zGX7K`W3Je2TaIb5oMB#CLKRw2*3GLZOJ%GxulW_~-6t^=P$qBypy{^%sfc(U3EZ$e{;$ z2%1RmAnqu-*Af{}c>_Ep`x#^YFSFHNO+^%`BEb}o%-~I(Pnjj}M!&H^HPTMi59hkb zJX5v4e_AM0%nQC^j`JbWB3j$)XJjq}E_V5D?cvP?k+`&V<; zl7H9?UZ|>!QbMW;Pe@De#8c;{UhrQuT9#Tq6>>iR-Lcd|b^Y{AV2;-=r&O)~hdJU9 z4dr*T&4@_`WhciZ?BuBr#&6?sY^fPnxSH?unHR!U=HS(JOTOV+6I;~lv5U8I-WL!9 zPuXs#IZNkN#)5Gk(%;f?!2>-6b+EMwTmd_CnRfGD?Bsj~25iW%w#9RLdQ5P9=FHeG zOML$Cwz<4G;Qkf|U?33+LIK#$e{+~uZq7-H_Cv~)njhVVQ0gJ!j6p7@1; z(*DkFX(c@2Gt&?Y*?CK__r%`H_6bq#l6S>mrY~o*14IRc4AsXpo7YT%Hd&LzxWD=%syf0?CKy~3UDP+!$Fny#=!eom$>#<)6=V$WR)1ScS7~Pw z3YxL|+RtPCVrj`}PFbA=o|e$)wmPWEcgna<&c!8hJBHVX>qJa7d0qGXs{vt=$;BYd zrath5luNGHQbFC%{iezml@|V^s(5_SbxsTVXSlv@{ngi!)*#lMQt>e7n@oXul#hn3BMaDhZZPgboyfStNmed4aI9CUs3v zsy4R^O!qWOhx3iN-@>c@^x6XYuoT5h0$ItSnpG+2xwM$zngBrxgYmMxH)5A-K z3dN@b8^VK#-Kg}M zP7s72C&j1L*vps|Dm2Z|`C1}b%v~!j=5VVeK>L;BRO9~7r5RiH4bySL5{$5BL&R1c z8CLN6tJF)e__{LS{F1_j*-X)RgdL0A%NSZ0H)tcA>ln%h`ScD`39)yY5hr;?vy!d4 z01L`gmGnD)JV8SGVk@_6F?R6%;`Jfy4=l_}BI-@2&N#(^DzH2f1R2;(kTY=h9q=sJ z60B63%xOY48*H6d;okc8$cL9_3PHTW%;`Q$zIUb=<@47wE5+Uvr9HLN-5DfP< zsF=L4ZY@r0`C;g34QCv+(AI2|;}8)+r{6q57_~nb4?O$FZ|sw|rvZt8+6>jjRkY<# zIGCpkN8?-YSY1S-($kmq4{i$1w%dD%k5v>U5;CuS0N}PKiB&=V2P+Gs;a;Se+^qdo zl!&_Tt>7suPx#AkB)_g-Ou!vr1#i#O1%PAp&B}PtOe#^USB-8Fcp5LUpRYGOaH&4s zsLIY~D)+FYWB1ASko+xZ^gzGqu^k}d7jK~k>)$X!W<5gty<;%?+-X~=?YJben#6lQ zg4zRx(HlQ99Fj--m-9z`d__z)eA_t$s&;lA*Eb!8aV@0&%Kli=#m@sQI8fyOIv1%~ z8uiY3&(j{m#I~#-`wHBw9BSaVX9rYP0M^>S5Qxyey-saoLZ3lW1WGUH`gERlVk}K5 zssmfBZ{4TA#W{N!J}`8P^@-%2HntG>)am73yO^n1_Gd^XROwdsZ#wchx>F5Y_t&I7 zl2yw|r0BcjRw?(d*>qz)_m$p!pOB*`5!=ytZe(E@fehQ+seDG`-!?C!q|_63$;{Rz zCx;tI+DU$74B@>k*9-IOmfW%K$)J(;tYIBF>zu-S;?4jq5(uG2`(EBUXO#x9?se9R z7a31k(C}yd==+zOXA=zg z3lb>~kyaz|KL}D=Mhs@TIF`J9MYURij8$SBjNNUjHP&mLrRk=lgZ zDLWkFq{g>VSWS|vGjc3=b*-lWcR*gN-gNjCw*%0I-m`dN0W`Z9@E1e>T}{}&wj7x5 zne0x93O>0JqFxf_gz$U!Fsygv1g6h89$s~YNDQh|@q4tS`VLsVzE)f=2`|yJKWWlm z|D~?!@2=tEvKKZgAw4VN-?1-(=CziuO)zHrl+x6n_ZPvx+tJyX2_{KZ+#Qc!kY4g1 zsNFE>B>>xQFOv>_bC};EtfROgW%DdUY*b?NnfG8uX1@C0yiWUR2MNUQ+GdJOd6*p+ zI{wOf^W|prii65rZgz*H)3ZdleqVHE37kAOr(j>WEkAew`P_gSczlt+iBiM9IUu8+ zYc@`9-mV51kf{PB&c{}@=N#C8)z;f1wBgQAaHFk&0_MA?PP`L>+liC0)A7*ZysVd5 zqyiqOQ}eL*v6)G^;k-|J>~3HZm0#L^QVz&kjc(;sO(YL=rM8nSz8u+C7-hTL3Z%hMSN0Ytdq? zpzA+yI~1iVzP&!J*k6;L0JxtAw6bQ9t3CtUhaJ!UDH%}49K3BjO)-Rs8=YA5j-A6= zO^plJZxBAc&Vc#kW!Z9WaI4d{1%a=f5AcxgOH0>#;GzAf-m8&NobB!(H5mrucco@l znw}np&kMglbBcpzX-~}Px_OWMEUQPj7&Mo_Z6>c5XZ)A5}J>3 zcrmMSw=kRV?|x@RY^vAGD3A2@1q0u{U)t;q`>zM)%VJyUtiPsTkS(#h_Qd)F-Z-5r zmZJ9AVyo+$6Y5?gDn)d^I6>Y=F2!6=5En!5+HutZ=J+`8G%D)r?ICF*LxT6#O{ums zM)mr&h(z?AEVNbrVyIAy(9LFtRe0{~^w%7TJus4>l+-QXZOR@lg?C0(r?sUod;Q=Z zGH2uCi)@nCpURa6SQQcaT~rLwz-ejbs_+}v@rN-a`Umk*-uY53dGu$v_bmFK)J8OB z6t^Puo-;SNFOmGmSdKFNzhmiar~l@>ey;O>`nms$KjYwV1n58dpWtm+NeT6T9M%mt z&KVZ}JsST1e-Qp3#Qnc~#s7|=@3{YsRmzW!jy9r;S@b>=H#RoT)cVaEQ(5o%&BJ4L zN!hSu0$E(lPEEB77MjUyS#Sy#F&ouCE_;fz6KHa;DpE}lquFDB`Z7cO6C}jMYwtn} zp@;eT%tJ$J8`1Pc&sti1eQt;$tcMrrt5Fn!pIS;D;9O_VR0jKNw3-u^ovE0Hl#iXs z-bG*g779|S0bt6AzGjS-7vs|?(!;Mf-=*In{x=%{|MPk$E9>azaZ8e~?zSGR zIL>Yu#{3rxu$m*Bfkzp-pO5L{2G7+|TQ~EiQ8tcLhYTEDGSxMQK?(ekU-utrkf@ya zSq&A`-(|I!q41$=r`QVQ0Z_8-rJ9%bIbsIR8?aa@Nj@qyh&4)SS|K51EvFl~cA2&F zc7zMtf`a77d@p!yQ3;uz}e^RwJ!y?lV&$g_M8k}23k_k(%PR2+< z>J(~sD_PX%A(M?9!XpMp3L|9VGH*19o`vNP8DDvBA#OQDuqChv?9d||xBjY11*SUE z=RY4vug^z&$EDb47+U#i%yb_5o5gdpUhEvB8ayg9jve zJJHo)ok%yvSMIxJyU_6&y|Isozdr5B6WL$KUhTvsAB0(6Ns}QBlf0E_T@V7Wjk3<$ z3FcZ`Amx3^tnd`d18-1`-<3x5-5Pl9A^qy9>_*}?7S+5r;4WpjGeLr_aF!9RtPn#7 zz2LFj=Rj`)qhfy%s(9%dhlplN4LNNMX`W2d6K|}Q9#Q*+s280b$P(PN zz$WP1H6=zRi>^eC?%+62Y^rR}bNzKUQ(KxV0b+_wzm$CjVCC|_eppuApUdTP(z`0l zBaEK;-TQkzSh+h0rW4p{w}l%S!F@f{e`&a8zLmxF(ewCq+{%n z!Z@&DCMddIkNa-X{IY0C_P5etJq*1Cm(%&gU~?^glbiBI$f>l@Mmw`<+A z$J|jtL217y=q}eE)E%4fZpB|(7c>^*aZH-$SjTMCrhDz`-*mUrWX}8zF+ptey;&k= zVTq*5=~cZt4?(!RhG|y&f(3ZIxsdoGB7`0(1Wa&;%10UqB(WF+9uKyX9~l3bWe=MHD$bT|FVIHh^BV% z#Zx>4p9OuERW_B+?7bO-OYxqH@$+^ARXaAj0&zX^t=L)^$DH^^-^mv%O79 zL9501(D`p}RFifVT*fofp8uZ*Qd-@&Rjl&d_gPE~A2#Erx67+W{ zh7Ry-rfU!F6e{}Ym@HDc9*las2A4fIZk1m?6v2&gC0j?n=-@t9!p zt3F(e-RLGNKNx;clgH>uPg3sk1gpD&{bYlpZAE7nPFZlt{hgS|UR!j*Dc z@xvVNrfs;Htf+A9L-VCh=vCW6wH)O-CbKYhmHNbo zq=({$afq-U?C|kV3&MUEXDQLBc2TT#<(8_ki~q{CXDahB->KKPs#(-E^KHtn4En6m-KQ`_qV-i(6hU<02|ox5walwV-33@xF*IF~c4W+fd@s$tNx0)dIl6DK;H z6_!V?Vz7b@JMZV~EK&5Iq2xOYQSfhNYpXwF38js%Pq!uEkjLQIA5N*`KJw05M=9G< zBN-gUP%{3oX(yMIuxcJXprnKT8#eLE5K)Inf0ibXk%mFL&bX~ba`sZ`^X2)>t^(ra z>Q8X~s+r8Uhv9_ic@;4(^W2V(!c3(BVvUr;?T_A@8{0o?E)vJ?3^db-#ILb6t^Se2rzIqRcs_V9y_w31?g*XlvD)lgnVbr}%Q5 zZQyA+C->Wd@9CdX8y2fmajYyHVG9>Qk%3E`DEJZT^-0fsUpd!+%kB?KS zk&K)9BN~Rn6BXNm34rCRj=7p?KHH7`mgm~$TfYwB%+oB^-n&j9^~}@0^_VfDTy~75 zv6babA=(JTIgafr1Yisw`em2EeFsxSOW-Q%5PG()5%0m?f(%!~X}Yi}3Ee*`V zRK`f)h-EC+KvZb5U;};Vk)NvZLqc(!4l3&1PqGnG-fP?~{iUt@`!sGo;!s&?>bxcs z#z@N}R zN(^+T0rRo>wX{qQ-Q2GFqCyy8DFBsLLwPf<@NC26dLHVwb$A`_zK!)<5<3>mx+W4h zxw#+#A7^xbOWbm51%MWhj^)jscb~rRC-6<0_TOk68BXr(T1K~=EyVOL<$ft~vIf;k z?3c99gUmbtkz^8Kf>R-GA{u3vd&v4bAA{RpDWKUL-pnPnGXMy)vR*=~z>am>A3{;J z_-xJ{zi*F&*4G0op&3j5bsJcuHZ;-y{COkzwRGblPi7=+b;%Z8KYArG?o1z43+_3X zSK%~oU;HNG-i;b@q-ex^>t5$kloE<!foAxQ)m1kXBx=-E%V z)A^V&q}#;>!G^k-x%s^g@R9g`DFBeAK0~6oxVTGdvlGI3XB+%K^uurH;qWkbi-fHm zavQ9xYbh)1SH9I0?|CzHr0|l8V|fY($8U7+zbT?`)-wP9m-zo5%t8NCZP0lTDKuWu zI0EEjWw!`T_+!^!o`pcqDT0UDmF}4SVpkp@et@$4>!@daSL=d$1HEOv59gddx)wjeAG_;B6R zA@b&J??T|2-u6H&828DO$5$7g$0FRmC;gc>7yCwH`KQ*+tbV-S5zZnXQWa&Is7XHg zS09{<-n*{89X6^}T3ugHx&HXM8;7{7{dO^EEAWbC$p0X>hMfWbbITej$R&n&es%&- z^y|!IvHvdZE#p+7u%sws0IJ)z8nlp@OV*?Tx~<~5{}dW#7Lyd-BCBIY11rVXORQ&5 zd_v-E<3R8MM6%Aow-bE3_N_`dijoz6mpFyT8s$WZA=G^X=gIdp5@ws>y0XGBv-s?NnQI>`9`J*j8$^$E3FoUUHYKklP>Rx~1hS?w(XJ6#Uhy=k3n><+0RsBw9C<=4?tA6+p5 zggcLrJgD6_h%8|V5%FCVJxo+=ahMcJZC<`UO>Tc;XL3Jms)N+8Ru4+1{YM;+;t0{q zk2m+dnWsEA+bihXx8ee!El0z1JIOOGG>4Xole&~QcQ6k-yGLftemRc4Dhp_AaQR%J zs@<&x)cAHi7WJWt5)d_eiOhxR!r`??r6i^0$k+|qJ5ptCKf$}G@_X;wJEz5@uS1L9 zAoRAdlqO8KWb74|x}592-QnzC=q_PLSP>3D*cl7+lOS(PNyL%jCrCw8t%JO{>(WgL zAbo|y{w>?v@>f1;zV@4}dvpnD$xP`r*Fn!c!cW+1Kcb{IH{Bu71^@1!e6T$B^6ZxP zyAVV*XZCsBLDFhYcw! zR6lwKfpC5vp0?If`aO z^m_hT&?|}cXR$gAuf^qI6>INpWtF4Uccrb4`_Tx)das0vBGW#Xo9ZL`1Tse>oD-nICEuD{~bX5<#SMl|2gA+o^#2aQ@l6y&p{vykFx(ljmE5y z0*|j_PP1+NhkEOzXAZs{g&KlRYw;6|mcqL~NO}cva7OLwa}5N7ujn*fs$s~>pE++Z z_75aPnL;NmNHdxnR!0u0OI~ZVv~F)6#X27|SpwcMR zgvmwHYS)#fh+c&OeE?pC+0|V5Dc&}qTYU4M!VVqdUFDLyJb%TGr^n$a%QdB5Kz)mp z$Qx`-jDO2Mc#Zg9$#9=U`Z0V!deDN!fB!(1=coMrwxmC%5py!^f7x9<2f0a|)^UOP zTtq!`&~Q6{4Z?aOy9d9kxD!!Jx?(9C` z33-YyR%?o)Rz0v@+vSO81xMwSV`We6%IBMRHe+nsINiF2qX?gR>m3-tb~0aD0eu_H zS9`xZJ8y)%=2s{i9vD!*+pVpSyy--bEJ*t>eOaVI_8g`N_bcdM9N_I^t$m#(&8@oC?vT?OD{YQV z$n(9NAy48@RoJB^qr_GaYDazhMIC{Q?s`1dt~z~qi0!6hBSjvn2^e zQ{5PCHU9ANd0QJ#Z5;_I94(J5#>w$|qF%?vMKb0;=*|yX zxb>g!Rfh=Xz+d=-SGpq0~^2}E}w(0-i z?Jc9CdfWD4LO`Vyqy_{f1Zn9;K)Sm@nxVT(=@>esySqz5QaXkjI;0zhuK)IT-}hS2 zv)+2v`{DUCA7<@+UHgi&kK;VRSzs)nu~A8-=DC{I_SL&4j|2a4YFrfTOLnhJ4~nqW z-hdsCBnKtmH^#>5Wn*{D zUCL^@fBd^-;!-;kiXRe!abssLa)h_3D%}io8sS@JX~}8{w|fT-DFMRvM&7~`v9mC8 z2NTD5Ute-XO>GK)!0m z-Q-ifa;DGX9{J%(GvzgZD)4T z5s-@XHj8Npb_chEy@7^tXI}H4;7kI!EXhwy7vUSv5D>Di=vSWCn@a~MoH&8a^pEW^#3I^nvG>~;%AEpB-)X0vfKP3;iYL$O4J-Ox6R;AC#$e<*) z7A%n(G|f}c7#smlwC}sE&HQ-*gDlw+qf3y*Tl@ftEhB9nk1`MDCdoWc&iZuZ^k9T~jZSHPDob_%(|hR<#)b^OqUcF_E~gF zrNqf-7U^Nl9~43=cL!dGcx0k@nt4@KM$=3RXkV^HnL%qxBtz=Z6pyYSBP_@I^((Zerf6TpWqGbz(s=gKXHx0K6jSY{SUQdFI;gGO^g#_ zj*?B&emZO~cnU6L7`Z+znyvfi?Zr?!yOZv=DeLF`k#Qq;`c;9QCb;BY^y%cZH!6~d zoIZ(wM9VXbUwlCD=&)CFfhjgbbBv#(W=O*bgY3*N`02jpD*w0<_)q9zp{X6A_{=ug{g$qqCgK&?YlD*>)Hp=&O*UZ;T*?iSa=CgWozG=ErB>Ah$lqJi7w)FWa zrtkXLDSJnJ%gR-kFTX=+*#Or?@7ni|j@|v&EZhCwGb`w_$Us_P-$mEtdNP=){cJ7f zCn$T-bJ+WZ@(&s1xcdF_q`1pkw0bf%Z>OR!4B=i5n+1*Pj9BIO7QQyE&DdpHcc-UF z7ttr;dXGlyi*{ztzhYbyJEr}yhz=rJfH0|Ad)t{w3iwAt`@}3Vc=@ift)8w>4|K9R zMEBHwR{cq2DzINrr(K56BKy;5t3&*7}B`9;8O|;_o{UPyR*(FXwolM+ZZY%_Z z-TS^fYO-?*QeR+V$2MNP-JSl~sP_?5$qb!d5ftqqirNH3L1Af;>I-om!6-u#&REvs zt(!_|-fh}vw@v&9k>t8<pOiiUC-};Zh?;+ zMzPyP8IhZIF5*=#Z|h~8_2txa)dOD8?SaekFJiyw-qIC!f%x4`owI!c;%G1+j06D+ zk%rjE2FKE+=dsQw5>jK@Wx%_50>x;6pDLW9|2Gfm|F75aKfyBm-?}URelQfsw1lec z?d<&CJpBs`>FgZecXD=imI};Kcf8=48JAK1Ckq2ySKvRucVlzFa4XAp1{wd&oe(Op zS;4cx&%OWWS*ib?GIHbKnA96tUEL{Cp|$-K)5B|JxHt@^AaM#C4D-O;rE5uh?V}{C zY0bKe?*3V$DypiwEZZ7APcK&t!x5J@LYw`5;KDaD36Er?sAXDsafyE}v)vX0m>E3i z3%PYs;@gS)v+Z3+Eh z59_7eBeP`yAMl%x)?#d3N(=h`1f%6FGUd5@b!Ywsm4+pX`=|E?GY4thUm;J#5!5TCYebnf9|rh7JI_S!w% zRD49IS||?QmFVRV%$<3^P2f}gQJN{g`?a#!hiiRxR~$>iFu1klW1I+@ajqGCGQW?( zq;wL&!@2RSG*(2Ubk^GL%fDx)^IeTqS*X_I*CGXXpCLNQR!DEeV%m8j-hg(J*cUTR%4!T^3-IHi&)&lxErk9%t?4I(D|TS3aY5M2eY)->oF8RdUi>XXNwi4Dp3^&Zrh zDa1_cSnIA1){|J44mMH7RUe7Mh>uB03tzEbsM~(|H~EOZfrI`s>LLkubMv8ffqk5& zL%+kqd9vUw%*pL~60eXd_`Vr}(1{<*{LeFXK! zL!imWN~T4p1cCSIPg~UXHj2sv1KkH!v+WvZN$G4KG4&hxxjF0>*=mlXuX4B*vLxXyS9S zjjs$XKxQ0zScDC`yz=tcrU|iVdLuiA7ba{fYW8_ydt-Q1)}{`wXB2LCqkpy4jNK!loH>l!(g3N(9agPp7l z*Zr3f1qAde*q9}4D;~ZC-${g!2g}HN+3=ieWGUddCR5A$dGCq+&7A92nU^)IiC|CZ zb#J|{sa?`Y_HBx)qvdT2=_$WAhCL$!7k{LurIT;fIeKkk#fkIMJ)n-pW2tp!ncBE1 z7_5;TSE}-yCs{(*#{;_6z?yA1E3+$7_Q{#(GD6ERX@z0d~bg8 z`&n#Ua{db=s`O#{KQDJ}uhuArA;u=$-1RA+g-3CLDOHgGYz+5LX^df+ME7=vC!-?; zljuZ`i$NExZ9`Xrck5d}nm(~yOdp2ptqkTNW#%!Q9#B%Fmkww|`N0AXI0Q&}J(D}m z`s3r%<{4+>br(M0@5@zaRVOq2;TkZhpJq4itkMmF*7~M#m=2sImlhm-)k-oIAl%Fw znmZk)TlfJv(QDw&hPulqB}}L&H(jAX-M4U(af<c~6S`9*pcUeDBzpjq3@k`3v zFOFY$I<_OKj?#iF(Ri!qRelh5eF87zgt{)))GPDPb3~!Clh>WUjQV=3;OqS-OpC|G z{w(Js;v-^pxSCJX;ZS;M-(Lh^75z{jNEoYJ(#ulbrq)yAAl8J5Ar#$Jed|C{uX>C5 zeDPQ1l^M-puHOA@#@i<;kh;_I;$GfjH48VDIAIGj*!rx+%rLD)-Ue1ze8*ADN<3pN zsqEJKp?u+FttDxF3HayE+>(2@)?c4&RME5NL%D7y;lh%W`eXft46=P{opD0|$)o95 z0xE27TL5;tNj=nc&Q#aRJT@zsH}g_>yHZxKzB4r_dz$!i`z6IzWvn&8O!aT>z-zmW zAk}k`z-CWQYd%@vb2@9dT7HFH_t5oNtM5 z8YR?KBTHwZRI`QR{7i)a|DuArQu%WL=YafJbMp(Cz0HsG^d-#(QmK1s;V9;QfO*wK z1t1W+?w56kT|56VLp}?_-_NXKEdzu8R|3KR=kV_TuJJ&a- z_io)Ly+|F9`4o@8RzF|qG?dK0+|TG;k6w>tjAtT`XK`v>fXEYtR(<5qn%%Wi^a-T6 zFkfg%WkN-LYEGe`j7f2ZfStB9!z zQ@M~wx`T)r`^CQc^z2qW%P!wV7XJ!8jq9IL{NkJDpJtBN|CQMeMaO@1T%m-fO+ht5 zTRmB~P_5_%LT5P;R{xyjrZXCEaR0;LzWDJTfxAP}$nI4mHeH%F^>Ph0~;<<(2 z0yPVwXGh>j3k}#*&6&CBtv&La9~*FK=~BZ-6Owc*7~51##)+RJd^Y2fV`&6zuFVZ) zMMXJ;PLBbrW^B|`vKM&0*Jtrs>+kj$+l$PHaFWJn3d9oHi}dstzZQWx-puTQR8aHh z$8+A-nMf5cw3Ijbs~kD*F!4d?a9ow}HRH=lek^=VjRB4SNg%RW0l1#65Jq6m=1 zjN)5fnUsv-@7K^dZ<&GZ)__9v_xkHrmK3-!t`MsOQKaI}=0gMv=*_~64Bd7S;lLr& zGgIKSFsCsX4Nx*qAyyf=*elp8DA>c7MOTiK7G_ma3Qj!0mv=8|_n)vU5t%;JF30nosj&muEfXOi5v`*_p@9 zBs|PIViDl{i#T+gLXKn6tUoj{0@pqyTg1H}uz;xRhFkh!L{wHPvH1KYgrz~Q!RD7k zqoN#(peHZ(LZZjk$9ab@^nlhk&y7_eH>bBQ`CU{#TwMCQ3|sMe4@rz+wXIA7sZDtZ8%!+-6Fp7su+xNE^qK7 z6}Yzq@<3fGE*&R$UdjI+S@b;|eEf;8vyZ)65eTG*bZcfbFD*^YHNNs zbrS3q-0S9WO`_P^_mPKRzGvwE{ngpBrPB|D&Lvdf-0@s>J+q&DL(DVYE}T0NbFbxn z7E{2V1lg85zhTiAU)g!Dr&vM4C~nMI6quZ$Oji$(_b?0#Y9;#qtfOrdD>Q3izLPm2 za#ES8d@R(e1n zMZQ>dH>D&M`si4Hf+5h3a0CJpedB%oF?WL4FfoiZ-o{oW%<}OxnsF7x zlH*R9ooH=MI&ya;cCO$GReO}hMgLySjz8=G(Sh9t4m~kD@8_d!^L8ToEkyYMWpHaNy+|PI7OrCoC?6y;~p!OfWKAu|%ndi?D7Gbxq zEdQ&I`2u0F7%AQT>FUQ)Znm&?cCkdR!l&ddNM`EewTQA_xgKpTx-Y%kx^(VAu!K3* z>e=xOxhxty(2k4b?tR01Rm8L>vh*h-m;$aO8(QdrhbpIC6--3 z@C8lckn?rl^LX7nYW*Hg6OhM`DGjN2&x&GJKQ(wGI`a80#{QuPFa4OK)RHL z@mHjXSTn~P4)yI>6SCg~^DqBAMjr5U?ba%(Yp+O(Se}DYO09#QQr0i)eMs6w%MWlfk$4ZdAFQdkJWz zm{hP{xaKDZRYBBHao~RP%IPf}*qBNPuCM;C%IH&g{f$V3#++6jc#`na)SPr-Mk1W8 z0&z4>|HajF$9B)L#fCQxOR}V^z@4aX|E!s=o^-R}9J<&y0gX(wz9uga8uSiG!*?(j z!9(=pMK;9jR$_W z6Tn5t5Z)3$Zpu0o8yKKW#k<_Vl}B|N*jY$%eEn=}O*E528fbvJdMXnr|nNAT^` zR7r<)woma7(XN(5yAt&Kf&}8CDR6DC0^SiUbA})6*cOL;WM#9&Fbj8%^2(vH9`_7r zTDt1Dqt?G`-64JMFaNbWiEH7y7sL9uZH`H58S;&TAR4jwCx*FkdWnU+pVV&gW-6s1 zJ5^%8g%{PRowtyh>IK~Wjdkmm`wlH}7yT)ESn>F4xf8Yy$suv{{O_K7PKE8+t-rH4 zpUGUQOz)Lc%<4V0Y3zNlhsQfKk!;0!2VBcu(vFpZD|&Yo5gIWKG5r#VjVwE+ZQ~(A z?@r1#>@XR$M9yp(o*bihWpD+UjQ@}bUOG6A%9(Q=*@CFt(#8MhInuy-Rmt08! zMG^lEEp|q{@>DgRVw-*kLs%pQR$zv{YsO}%YQlvtL#TIFRXU{n7P7L}n5H%%!@AbY zydrsYb?|ZV;TNvM+qS$&Qmp*l1S$gB(B)MS{l>`;XfB&wd3WL3Dx4>bqdK5n45reUlv#Sd#lzUH~qR`HHV!r_GQIUX=R;rUE7oV1|=hh zq4?|Ob8~GckMv(zoPF?HQ8TwiFQZXKM-4o z&$zBfuRG$=1_l9%R#OFrJ^kp0C1AmQi-Q=x>qY zZGMQyTZ>$7yEA3^3u5@~-r$XZ^3|Ya&|+|!K}O!kZ`6NIdv!uf zAriFFT;ZaZ!o<{DK3F`E$hNKH!+h`zEuFTHv_N5z;`JBzFCBz^LCp0^P^Chtn@n6< zlu-y0Azwv>P2kN=OHJjstqimdPkhHg4h6QBSVTOH;=IHVX zWKT5d2%T9wBK4?1(DX=$sr80$eaFIVn2px|F?TSBA)y!ko2=F0=kQnt2Kn3akrOVH zUH0Ua?aVo^kK1-aqH?N^BH?JN&b1Sz z7ydIJ``ttZD*B(hFHcO?M9@~JOKe%n>kCAU)svC0oiyPkvp+*XC2he#)`%jCWUO?* zmVzV$0>`K~Y6MezPV+)dCOE|Q z7U6s7a`Bt@N?$NYw7H=;oLr4fIRil)e9fps=#feI`bBnxv@tDnlRBb9l4k~3gM$Sv zfMa6XIFUxB6 zjpNN!!BZ&<4#*NMN-Y*z>;sSaS=mLJR!lYaZ!o)5(ZC_q_Bc|TvFq@&@5&~mxw?6y z90zV=pKL@6hqOTx-h^Mj0#2Gn=+imz?tA9+THS0_!Q|9sVT@edpgUP&+IS2%ecZMg z1()otsV5WTgOOsoax<-zBLudUsU-HvzT%Q$)M|f;1DbVAyV^J9BN9v2H#$C|YR;$4 z4jmu$3gkFsL5<quIhp2*`g$B&ZCu&Iucp>@?xP4>y$w`WdQG-o&t(d^91eq(TQCQjt~jkLOmr*n!IZCl;W?mE~)w+luabxuNec1IpxeE5lr@d>B`flpLcrU zbqUz3&H><3< z78+o&jlVK_-_Ng-;h85`3zI0a&Z41{OChI~g zLK4dDycw&7y6Uz`hqH0!nxwAR;*lWFvQLF}S9{fDzwfgSC|lKVpRU&`E-g<_4T#LP z?`M}typV3sXROCSF!c^$cU>=cLOc@keY>~a#hGAS*K=|;xzERFyI4!{4B2rXVg3ZZ z|CHA3ah)P(tNI|kGt}I~xj)|G$l#3Zph%R0($7ykxaJ^d(`Z_t8;vij?q4 zD($fB>Jq|3MJLW8=7jRVQ|jB(?1`M15W9TaSB^MMl_I-=C0i}{NC=7k7MPv7yX@Hd z@B88XZa=FT^Rwq?CO8g45yJKJgYXGyS@yB+51ovxDXJ|7wR$UU(-~z;8?M1pa9JY$ zm6g8bf)g1{SU#;wo?1SpzTbDu&ck30WV_T}7;o^L#4z@3l3uPgiGia({W9;D!Wn|J zA})B8vc(0zqRP1WR59^b67xV`S7Di>yUrHoy2Gf=m~sE}eBtD%^jR)fB!oshz(BLU z3~NoIh}wN4lbX3qH^!vW*o5tR8tGDNRUAY*4_u)1`wn!i@&u<>M=w&WB7N<$udVV; z;xN4}y*{+IV!NuUu<71#N4=GdlkM4{nM3%;Kx>5RB&P;t#IFnl1m1ok_L|e==tf z=sffXKAE@eV9&_*XMtU~UZ#$Vt}XMm{AB~#d~4)=qYAJ`e|*Q5l$}#9&1<+XE8&A> z%g%fU849$~P4knS;aFxzW6B?Y7c)TBG}#iy&fD<1B-vWpzL4}e4eRP8U<X0-QD9MHkUCGLFGfW5Y?NP*Q(9aOU z|4eN~+_Fe>v@cHY(ShXd?`rhSs)sJdIW4bc$h4{k9?B&O*%=a<);-1DUm-NT1!S2` zRPU%r8j4IAcxIm?aTEH@uUJnfqxX^WLhGUT=}){4J;k`(L~ktJ-AlF+{C$C8+%u$t zoAGaD$S4PdSiuoq&rkrv7#X~$DkCtZgw@^2m$7<-EAp+kp z1^*Zn1j}v#ER_9kv%jqRW)_Z{ezsA5iLeCZkO{4#c$ymsWzeQph%JWLEC>XX`8qQ+ zBvf6zmqUe-mU3E)4h%7?U-7zXH+Lv2%S!JCO1VVHm7k~c#UpgC3jckBqsdW&&ekBC zh5w_pQ6;E@P(CA&kxr%*rLe&`OL~N1Lf6S<0Z%#|Pc&(RfHl57K@;t4dPL?NU%v-C z;^M=<>}FKeXpMEcI)h;F$W~(?2vL8P*eF?>a#Af}sI`J!js4XQqNuFV%%gBPo9GLx z8-a-;U8V?{riEWhw1m&^4?+Jq(*|$K`hh4NYOFVD1e+%wl?k z>n8QKOuThAP*2nSVE*Qbl@7F2-D-a#Hnyc=e%TCRp@?UmE*+Co{7%v=O!(K>q6vxm1 z+~|FF8*s!^PQ`$!mE{IZQu~w8FbA1`YISO>;ZM{-;*&!W=&xdtm@&d zrJO54-=Ko7GV_Q83@=j()e%$@rvKI;^5RQZ$s&)pWxusJ6R%DZ_SbPh%wWXQ6pQ38 zPff;@d}VyLEuaO2lRHsN;XERv?{$1DkAaJm;62OWfJrP{Yhz9brTclHlmM2A>tk1u zL-?xj&l~OzH?jDj46WERtPBcA*$&^zQQG?nGK*E}>t5}kIoGBfiA~y)2rmk6!bcc* zw0|w%#M*J$WG5C6Aa*hU#vw+mVuRB8iKGdCl3nl@t~ehkP5mrZ(yA8wLmqU0$7ZVd zR+}%Ed)lZjt$#DYZ06*87_Cf!oDdYG*Nx!+47hL1bN23!MZ@8aZn>5I>kEEDF10N|X8ici-DiH}W{w|Ai@Ip?)9XAdFq1!)? z=V=&@8MRl^IkzBR3|B*8V=*ho3)&VbbyVAmCDj=7SP8yA%bpGuNEn0Lo8hjai8okl zBrxXirR=u|EdY#|nXpfSWVkyi1rPpXdx!4O*VlkCQ#n~zq*Hijbn4>Hi{`eWC4i+q zc@jM##q)vF z&RaQHv~ttYv-UyOx4Ml;Xqo@_#F6EGuQi>>wE5FH0|)S(b1l2Z`c(eUDW!QQ;lCr( z%U16B^2Y0#Rb5r(cBGcbP%keG_UZ@-*dn5mJPsqEre*$aXs9oUtu%~ujLcBN3KqE) zubfaf(afqtI8-&Bo%kR;DTvB%Pe_}Md_e^{Aau^{GYAPl=tKY_&xCdYt#8{dOoah%Zgy<&jp=9_hS_8Y>D`65w0NQHa*Ep)#5L9ANG=)w8dXynmkCPOqfRPSQ7UMd~p<`B5Hvkm_@M1^$al_$M!a?khPxe4qHe29+D3b z+mT{4eu#ipVO|Y-F8f|=crBq=wN8to1~>L`qG1Ol#h{?#mpbuw;pu#8M_XoPryvEN zacl~aXd2`v-qQ)MCBoXErsCt0!YC^`O};G9(aMq?g7;Y4MwqU39DxyWI^;MscK-?$d%qkke>*w-6~7A9mnPIIeU zwdU}wL8fe?SSEaOBFhN6lA4P2`3f)qP!lneN^KAQN+ww^@Y+M9Z@qtU&MSPk80pOJ zWuBDxKt30PvgLkakgCiz8plFmuKH)RFC-N=8RNiU;k@#lhfkp#@Y0TKGT84vNvn;{ zw9Nb>E2Whw@nEJcPq7wmLV1Y_CT09|hS>%u&9+rLE=D0an`{Thq+E;1HyIz5)i~lv z_itKu0P@AwJGqnb66aavaA_u0XP)G#UNxH6PPZ){qiLeI!(W-lIa4%M_7}!Jy>h`o zz#su4o!>->(=bS&-()Q#SanDt><<+)Y!}rD0YUx~=ih}%&o*BqqsG)d>9pN@+uXhQ zUT_zYflE_k%Zs8roz;F~?K1Q2J~e6UkEP&R^Mu^;2ZZ-@-r?vWtqBNAYl8kBB!pNhQ@#?h$W2UxUiOh6S!HB#ncRdik zAZa@ssSZ-h?%GCKh_VFn{wEgDEbV}W|K}|e)x+oW95s_rQW&Qxg%+X({r>S%0Ya8% zQ3w(S{o7A=TH5AreOyKdJo788hCgZpJ#Vh0Spz-~myTFogD|eyB1&9RoSj|9t>y6y-_txJ}Q}@6=YY#fJ zBXzV4@}2bwp$qt2?jf@pjq^Wm9rg9!KlE*0-KnrA$c?E%Pd=hEhn%3HF;`7_@Hbvq zwnh)#_Y&28DLgusltFrt2_35K0l)cuOZ8MZE#6u}s#!1|U ze<|h8INW+KfwHYDwWEop5_Vf_TJ|t3=?E5n_rM8t5;eWgYgL;84!<5)5WjC2Aul>q zXHucWoc@?F)}mq-YW2Qqjn{2n(^yvp)=QgW-Z=*orX?j~xo!OAVS5g%|eidL=y zCW+WahKn_XLyLdKP_MGbvjedhiN89hG_()oUmR71oOu%tWdn03?yy+FG`bhfQ@ZOS zr;}V@H%Xj#ngJa8t1ZxZS*mc z!t}NjZys)2bGLFo)nZXmwK5H=@ataImU_qLJ?!8{B8zLY0m$0F;zK7+$gJPsafVW7 zi)&@F$h49OO2#LUv^xWE9~f?1NT*7)?7^WL6M4`&ekzwY&W@|LK&t`Yk3aRwOWWqZ z_g#A1UmN=L;QkW+m^lNUchx?kW?X7~uwlqS&$eZI`>KwEfFnl*nfc_O!P3<%&F%*o z*%O(x_EPUw`oEK2=QB1i+_W`i*j|~h1q=|87=7IQ@b#gV(n93&ZP>%cef|m`@BZ$0R zMB(KQ@1knwXj|MeYn^=rhawRi)w=O&cD2py9*|h6CH&_YnA35(QeDJncTbZ00Nwa)2Gj5vJ_|F$1lGjj3e(PVl%IX zO%-e;&wM>uWi>-6#(Ow4%AVbQ8zgb(E0#dS&{b`CrH5MH)#T6go&%ZJ%|Wnj*=ydfH#=1fKV|;&P%pLa&E0k&OdWebC{AV?< zitj_)gF)QUdO4CYM;ep14E40>dervS?5+nk^u1I;|1w_ua;HJoZ8n~=JM$y{CQ z?Cl<>_NoyyjmSPpM(bu_&g8^o(S8roDKHpRT+64p&|h>T<1P43;&f}KfSMQkX2 zTV4_3epr_PY0F_N=X1t8L+Jee&nmUI*1{=;4Y_Zq zrHG~82opaxQBkCO9l~Oq-WeK%bOx!b&;9VNIZQ;y%pc6D#dCUR*W!92(taQ@&+(?- zIM2VZtwl4{yQ-7#vl3y zrAmQT{#uc%1wa=8*^4HuBymQJx9oIT`Em@-^=mKO>o6QI3ir3rALqG3ymEubPuY&H zBgkT_nHsV8S%}Y#L%J1Oe{N8@i-#{nRW|7?O=h`ZCf=-U7pw0F%aWFWXH-pQWAz-c zV<+AY?dum(rO$j`F`TTBW=|SZoYp>a@YZQrSEf?FV!cT~ZOxn|p+FcX{wG=3)*6Wd%0@u#wiiv0 z8t=>KSKr0VZixNavekkl`ecMzE-8^t1+z?OtrZ@XSx>8{i!Mik>K&A|jSG_7bX#=k{$P zuWiU*NoUw0=kH2;q47KVIl-i@Rzk&=2eQE^x2v(de~xO=JucOM9N(t#Df4dj^ay^v zE*+VCmR7m1&?|?J{4SG&UuKxxRcGm4m?(<|i<>V!t4oBIG8>rvGu$0_{mk-;x8vsY zG~DYW0dR!~#hcErG=`(6GocVO!J9CvXC!jgLg{*?`_t1_>?2zK%URGppkfCvlff)1 z{i{#{tRB8i*}QXbyhFe`#X6bJv~|JZ?GTb(QT1(Td#*XTBD-uB1e(w%{C9jL0v~1J zekuzZnZ!eCT~n-33MbL88IkE zur!D+wXzT&FN2{U5^(hDq8GB=-XiRKute2u$j9H`1Z##7!t+F4h#%0*gZc~QGn z8BLAM50(O$91I-53>1njw5XW2u#V7&>!Jv|eyge}t1d1GYVI1)cvnHUm_zvgW#QS=_F zT|w+r`aAhCCd}#DbR957d~|oZDbrI)OWlDXWNNzLL*@G`34;k(n1GHDmJNZ4>iKj$ z->uQcyR!(V3kc~R&L0)G8sWlOObB7gY% zQqJ4u{lfy?L&qW2qS(kq$$>ulZj=J83f+%JmTN@)%NeW^FxLlg2k8~NLQ%ySoyQ7hp0Ogi?}aW<$`4#<$>Peg#YHA`!B7? z|CcL??If9Yoq4`}mH-Nx5l|x(X!2tG;YyXI>;TT`U;K1`|G>Zi`jY;6*Vbbn+x)kD z9B3eO`@e_S@_1`WsZ^YsOOezM5SAbyAd6H&r>Cbs;)UsrOz&%mumVk_{l#+sGD9G6 z0)6@#8X9!4lHdu;S+nMK{|%G1BPGa+DQZUW{My?b^l$!)JRj4PM|vPa2|WEjYJc-@ z$k-ff{zoRcl444gwL) zsAGfD+McwkZ@wHe`dG32ZPTJ?veM7EB7AE&W4TX?X)Y+7TLfY zT$ZiOhzdz4dkgSGh$Q|i-m0-&QOu$idS$y*C)oXyN*dP;f{76g9&}*zxyoel9G-q# z_;Dz9t3N>EO|p37lVs*hnisLm6Gie1A>hbq$-tHIDUIjwYI zg0^a}GLeg9>(U0@gLRZ3>I%&_+1J*NYbU8!p;mjWlC1qGF0BB6rQgB&ctHx>t zrgfLm!{!%v_opM5a=yXi*-EYSIxw1Y1!22W_ulrpNM6`*Ev$#t;xgfc~N@kwVF+X?DHsH9=14>zIeu_u`&r5t$aOegNz3J*~>cJ#V3IZh&cbImUz+#pupNu^c*eG3Kd(-Gt2cYea zMiWutA8k2RS`4YNbuuV&Q-FC^U@8VA0 z*!tjXph1VW!$qtYLy%sq4X5ze+Ht|3Wmo^TlI0xWRWIo`AEKDIh|- z_K@k5y?n0?c;8OMacn+6H?Mi6(#?pooOo#u#id3*CkNRikH>jqklpM#jKg3JX*Nv< zK$_T@Wq;c$rq0Yq*7v#@NMA8&;;lDxwV{g&^)%I79RKXgWXq{F^m^!d=!^51*vY9S zby)Em-bWT&@z&tujF$^)$jP5=_jt^gO2b$uU-k;gaHgZ zq5}ry*gqJ%k4f3et3Rd6XeM$65;5gTUq*XQ)I`+0GwD}*Y6|GtMrsSXbMr(GRWPTI z{o>w?bI}`DAROiiXhqGL ze0O;_C$A@3mJEDiiQk&OURD?MkS7OsSZ<9c7m$T>k#!nBDA)y*zSP4S_e z)g;^OkKk#ZN%UcN0}I*Q8~cWwa+FQJVULeWHjq;O;ZZ5HLfz!ri;+21UMH%3x} zkfeZSAs1`p(@I`R>;)@SyVxO%QcIrn_(PVojE9Z7PtrkSI_%Ez&mRNux;Kw`o87MceF;6m3s=7&?(#pOX?y%Nxv@+0H>@u-zgvwBq$u3nVla3C( zVfeBGVxCr}J&n-6^e9UOGxpB*vrestaN9B_gGgUE018h`4HEzd9p>AlBOS#*ESUnm{H z?lmqoI)AQlUvfOJ+6P`+4boO-@S*Ung06?dP=V}Hqhva)$|n3_KEGOWKf*G*)(U{p zxi{A8+|16f;;KQ?>x?f8C0j=<7dy*hnoU=?{;)c&?jK`CX>+jqJaA}9S`2WVk#sw8|U|z zFl)(lTR$Ab*xP)*dCu4pRqx)+j4qk;cVb7vpScDBZG*S1wRO51C1Zl!9q zRM~2}N~|cZw|Zwwx0H6hwT*@(;tJ9TU9Bn|(xTK$qV-bkBCWTOAZe@AOCqf(B@)F7 zL4<}x)XV<-ckj&JKknR_o&EEine#j|XU?4SJZGNg^990(Pg!9IL^5m1q#Y2OU5Y#Q z?;y3Y>u?cFwS2N|=RZ9CA^plWRR;bS&_C+BYH-eSFB&{}+A2-|gwYbp=BKmZ&J;ZT3EEH5d?3DFX~WkVxb#qV91$FZG@f$TESc zfb5<-qng04?w_;8?|+L@e;{LSYIyIiK*i3meoZTl`STsS2(>ZnxNbq3eb$W&1yFmlNaPHcC!2YK!_fc^-m-H zv$?@1*xb#jmz&-+Iz(5IXpi0`85`eZP9HGaYF<@K1l~@9YdQmFXDxG|tu31d5fo7M zPn32bby`90g6frIWW=aOL+`yk5k%IWJ`x4tMVsN@|I&WRv$)v=d-XsAA0xM<21PP97xY-rVR&Nc~iR=TxEW%LMemeGHQf60^t?GGP) zV^bKoza>dl7tjQekr>W3XgU^-AGiw{jK@WkgYe-=Gkerdkw=>Mu4IDJ0~j$xY1Z>s zZcuNmbi5&3y%&~LeA=#|GSl%r)hq_G(}=ninUb8=Bo>j6#+ESV0U$e-1Q;Vr3pP;C)421>XQOqcn^ zhy0CD3Jbph--$9AY2b|(oPo)^%fSej#!fHAGmjiO#e3o7EpYMjNSVaU%dt<0!bnfX zKM^m=>7C5g{&G}Vq$uLh`D1FI_FBW1#>YjM{0bS8FEjx} zcOg70rKbUkW}DYcYEgaYNc5}wN|J)Pq`Lxe4&*=P)2;e;nc5|{ncl1JYND3X3ad_h`M^bbzw_=!XWi`(gFH;MD z)lOYO>H3vt{j5tdW##c0_i`$=aJ3p&n58u(ndd|<=n5#ocBHH(-xuE*pGx+r=&FhO xVrl#!0Vw`{zon+4vbFWR`kE%-YS*@_uT<>A|jxopwdLT^coJ^jb$&Yj1eJ4dxZfBx*wJ(jCi&YfdDXQ;3HBq)2kiP`GOQ}QvTLHlC8g7!Iy zFXsz>T$4&pvTLm8zEhP~u&CmAgHSs%(b{}t2K+tDqp|!OsjeRNZQA?P?NEHCtH;G- zEe?!y!Z;es;~C_*2v9vYuVYb921^ei3Q>-TvO7*=HXr)n(<(ALjT2H%*tZrqcCv zEVD%h?zRTU{k}+#6{S9F@km&^zsRgm#`@$cE84sOgx^g`d9*$*1i1aL@#R8YDD6IV znN>r$t^vA}p;77IXO9_jmc})C90!j+P8YxVI~2gHuldnqfA}xx>%ZB54Xx-cSm=FQ zP&{{WqC2B_T&8r@b9MC%j+rW5+Qa9O`h02d>xcL|mteGF8z0#%&i$LgyKq&-#o)LMPc0nw;Fs^+M(xR*#*ZKRlnYb1?c4a}`)(i}3`K z9PXvc6+}DyfRB`fs&7aR0~8Y0$G%5@?nTu?p60<-*^E6}q}H zSBw_`wp-VG=eEypZn-U9aqe4y_Qf)t9&HAnI1I|XjbMfidu^C@{}$^WV5$Wrf)16& z>flP$=Nqq?Fz%EiUVv(sh)PTA$pE!a3eCWY47;Op-c&9=-ZHB2*`)`$*))lqH+(g{ zUwV1WG3c<;Y&?_@H!(!G$%jP+DL;LB3f!K(1e2iZD7J{cI_Ro8kYh^hX2lk!Z$bt< zMGR&h(z|SL^=oIHeG+A}s?x&>SmipE8Rc{7a$D+5Mq0eyVS3!&K){|4QSOp{@C5xO z7V!)La>WPcv3bEU_K1vQQshZ`|=}Ek+02V)28Hm$x+!GV{lUk zeGVW0SM~>&1iWqLgz*BpC3W^2`d*hB!prki?GyqD-4Tn*AIfF2cQAO)UKh?X9c}cz9}?FPPmdqf*6u@BudnPh6-?IuqTg{83)bs@nnaaV zhgqyOc-0I)Pc`xDC1p7@lS=Db;a7O`t)DY}RMs7I8b*YQ#c9|?SqkoKC4}0;&df(+ zfY%62hVTG7A2w1jIAqEzue4G&BO^Flv-;bDOf0k@Kd!uRZDlzh+sy{61q!l^ws&>u#*uNZzH&h=$xz3>lc|Tpl5>rLm{suP?s$`> zMQHL*%I)#o7_Z6=K}cE{@6!7e1>bB2elj!Ro;HI3Gs9qVU$@|XMwe}GBu-3K5uZmG zVmn|M&H1fl>x7M;G%3`y^8tg_qHSJG2;v+p8h|=jF>KhP)o(WyfEYYM+RWYjLx_Pp zn_hDLsXnG51NNRcN$?^rUwiJwt9`h}P8-SF61wWJVxk^1BL134GOVON{q=h~%o1?& zQ&%j<75xXa%j=)T(Ck%$LWcE{%@V7D^LWAhh4r%D2+7V_rz!jz4ER_|WYbo$Wm2)UWHWIB1gycIbRi6cYQ68z=5|qx?Ka=yH(&$*AH3z)v zJ1xc9)tlEO7qL)8r0bHtlh<29j4+>z`~-7HQ2V-= z1#sRlaE_fhA-dSDq0@8Hk)D|lyH&Md(<6!AdGj*=y7Mddg`!RB!m>j_=*fCpcSL!v z=CgoScYgK5W>Efz$DV@e$_4mHc7r5EV8Z>-q32=man2?x*3FkJEKPE>=Yn*{Cm-Cg zr|u{uArDfhh6u@@`yuK}a_wOmnrgAc6fWPJ(b*G5N-w8HS&W+m2L3>1__>3l`+Aqp&sHTi1h)c=6Zz3?JJA*_X@S3*aAt8FZ) z^XX+*lhyMY5!rh~;o0v@?%Gg)Med7rnw%}S2+O{jg8`j7M-{y|W?8sJ_;J|MDC=PC z2N~$wBQ}@KH-cC9B+}{(+-Ng}M4s6MRE2>It-&GQs!tScpcL7)3>MQ#6YEd8yv)?p z-vDCwDNSkZ0eg9&UDB~x9_+b+PZT}o5VaFcX`O|#ypZ1`p(m3QA6IO1l2w@L9++@n z8QLRkiLWF!JQ%AnSnZ65GbKXieZOIi;t0K3i92FcdvV*3V4ZF0oW4=c%^_PmIasuF z`?ki3_35gu*QXwh0F(K2w$V}xLDik)b!@D$7jZ5{Z+TntF>QPN8_{U)?kH2qdDigSw3z6>MHg(aHG3Q}gaAYPr|Pg{jYL=@asZN>CWSh4n`lk9411TypT z?QE{K$+^Q;u*D(%##o`R8hNblSBza-Jc{JD6`0;J@D#i^0a07hnqgPCk|3xde!PIp z%=04v3Ci9zhi4l%7g%$AA-eDebi$5kYJeJlX0tcIb>z}JQCN7f>Od2O9&KyuG-zpo zcxnjQ`ZsTu!j!|-f`T=IA{1Yp$nGY+o<7_Q+!@F|2O+&n`Fh z>38j3_a0=YrldT7{P<#7vSGAW-k6AnOpT=;qxdez{&n<@61MwFG~v!&{eW$Y<`sdS zh3V>>5SF+gW&Jy74-FMIpn_Gp4&%kv2^O+FfQp4YAs2S`*2SGukE5{GM6EW|i|ni{=ak<|Z`@=d{o4UDvtE2d zGsxX=`M=H66^%F<4fy=Xs+tpN;QqDF->(AI#G^B1zFi!O;k?Yo;oY$B^6|!D?53Br9-aQh z$gnYYY@N^HPh0_r@G~qmVD5SRN{`Z))*I3Zwv9c2fN%S)0x8t@o@~T;76c~L^3U$R zaDgLmGX?whC!Od0k8A|Lhk@`L`R=vC!?dyq#=whKx+h1)JE-?EubZ$r zN9etS-y3?M)JKh$z|Xm-GaKIfT^&!qNACz8CO~eL^fqWgznCubVPBgtjhR{=AWikZ zadfr~TNRv$Blf@T;lB(!yM;Ci$eGK^b-8cE?;}H?{If>R5 zMZJ^1bkh>r&@irYn0ei}2Zg-e;MPa&s!btJF7_7E@tl|_Bs|HrGxJnCVKEvD$XYAs zul-$`aqCx|OpcE+?LkzU6a0qW_~&ouEPuO_Z&=A#n*oB^+Z!qn#_>??OUif<@{3C zha!Zv#9(A5EhW^|n)C3HoAE}XCw~5P0TG2PnVDdWxjMVcez{OsI{|;`{r1%CEohWZ zDE*S*!RA~vnS7Ku7@TBnb7^a@5Z8@*Cjx2DC>BY{D#xch6Ks6F zbD5PsZw}N++toWe@^?yXXZW;y$jWSh zbroH>*yXYm{z3L!?%_Lu=FLs?=zIq1@9)z}Fd|c3!M>#Sn6kg8Uy-xLhm4LomDnR0 zE+jr=CWx*Dup5{dLpi3c_B|VJ9Qm9C<%@gXd!vOo@#w`b$0w{{>c>OQpFb~SD}mom z*?80>O@{(6+a_uYdJoEW#Nf&ZyRBow9bhW&_J%>#>mgqn$j{P%Gq}WD*9Kk>M&@%C zHIj|xY<-dgZzev=+ExN+a`cV)m6TE~nxj}l@H`CO52Y!-JHTBT!2GQIZs2u8eHV;p zx@?Wk+U4XL>i~Fpo1M2)$WcqRz}yF--5a*0TpQ9+e^0(6EYr=g5Q}+N7pnbrZNP5m9~sR zx}Q7c`f|P;=)?d0%@&J!t=``De5&k8 zq~|val(&@xU*vJ=5)76Gd>QP&%=yQ9eOyC+clY#F#J27Q`}O)7v|W>|N^V8@6@fWF zVz|%VAJ?`WZIfTW;7*!V$FUi}X+2g8p^CwCovl@JyUlhjAsHAg-xJ?;0hU=#UF;?L zSh%dTw35=m*23@R{6ZO&a!5!0HPGf4jZp)9bX0^!*s)#na=#P()f1fRW2MoBqO_>m z#TKD&c48U$0RmtGm(@hx~eUoNW6Z!Sr?%^ic?BkJ29v5^N^4UQg z$Ut3&iHEZAwNnsbG;X=~t|1Aa4Gg8Pc}qIQr>+#%)bO~D&XL#@gwkD`X%FWBfT?*7 z@~?6*X~r3x#yPPwrJ#nB2)fz4Ka|@P&0D)>iM&pDxkl(Z6>MHR z($OK(EER1N5!pd32Z1l;2Zqd~)V@^VaF=ZS#haP-U!ntk=6u}iF&{>eYy1cA*PJ;@bQ3Yxk-GZ45pMPi0>x3sAjX;6I&=4 zIw$VjGxmIcZ;@OkDLZMez#?H)3vuqDt?smWd(Bf^-MJl70; zB}IE#(UFK-k7VYxPR2SCI(UkJbGW$S0Rcsn%I@g|ZLsC;MitUSg{-;Xdel)QNP{+{Sb2wr&W7+cNS*^Rp(Q#l6#STvkPZ>`zL!fqcaJ zu0%l&b3udc@sgJXyaokkPlnf;L)ofb*AhvK`1stDxZh^ww%XO0g$!_8i@`v)d;C3{ zgPQ!4PmQIPFt?_k5w=&a;$OBB3k{TaZoH@-`!464Y6kTpM)PlN&cV~=j!)jaRGqw7 zuwZl79#6&QV3!aC2n*qZT;|q()5YYmkd`3G275|wxTqzQJu#(0+$r>GIw+DO9?g_& z;_xr<=^WSL9OE+iW%G!m3(5qbZCG7`+M@z1%Fb#nWcFeVB=08aT2fhF`bdnu?v;ox zLLw;;7Pi%Aofccnu{D|d;L6;+*&pd{`ANjDWC!aKV3Oly-)3Q<*WA_75)U`8va`}A zHnq(l0Y9@Py$XpA+=LjbFa0!{!ja4_aGcVuuA@%6Qg$!o>anZQ%uiD|@`(g}v35?V zc&b?~nIx+Ao zZB1eENyw}Ye7hShyy{9@mS>L<#2+=bFM)HW=f z_{xSoP8>~^f@i%;9~>Ls(sDrinuX;kr@kbV4Opr~&MwUCH*pIx%3sP1HE=oiIm<+c zZYkoNNjt9|tF>o^-l$W|^5e&kZ$j8&Piu{q*8Mw7ty5W3usU|Z!A68C*wLLVLfxul z9oyzzj-{%Djn|Nqr@$i1wxFfG!6YJ7b>}aU~n|MRWPS4Zz zH=X{%F}Avzd`rz;d;x~X@AP9!^^pI;7sA#F{mzzx+Mi&8fp5{yxQ zswevRWOq-0*6t?0p^CJliOw+{NGrL2GB_@G`8he=4j4{e=Wz=ZZCJ0+n&&b{ezYXm7_stG4ID4 zf)ZfI=J^op`>?0v_KyY3TsJ;4by@m~33@17WHi#GswUvInf+rdad!eH;CH8>b~teG z0;L&czuE(BZq;2xG~ zp57N1VY_!1oIN`U1^OwH> zxq)m~ntwx2wiL8_cl$4d-)^dN*2&*r0IL7J`Cowkzk8`cX*~dRA2pl=WUhY>enpyI zVRFVJ=dt}xi-p6(ZLeP^o)15kQYnoDQqkxpTfWWeDHLY4i@EnksH>mm?iq*Bpj2NJ z%A1k(P>hC<65+}G50?g?|HlESC^dl&o0k*onYadQT^b(R+A^uoRXsspV2i(Z$0366 zw!S`x+Vv12uUN4&thcDR+{5PweX^j+*%7iUDTm9PSY(SFD(PIK@=cSI^0~icowlKX zuCsnx3+Wu#*=y6)jQw*8Hpyl<7XH>K8mP#&AX|=Xl`;S~;c(b;{Km@D3{01RN;diRa}FEek;zR)-95^Qb6(YiO*RKd++IqPLX) zpa)*!+LOTWVu(L-n1wtn5giY{#nJ_vnqfjH_0-qBZc?@(LE^HNt+bp0hXnG`SvK09l( zqaT#dhlz-O?ILL?2l^Wt?z8xsirAcqh{KqleyNiv^f*OiORH=&frt9@JCoUAvfPnq55gtmfN#oU)oq zBPT5Vyr8744K?}p!urPZ(N`z_O@W8ghn@_vqp}eAG6@LF5rMXw@@u)j9K9eooB2j# zM368g&;++`3gm2*5Ss&Spt<!M zqRG}LTwgwZpC?Jow9#vN!VQ)k`5tC2sQ5he^Yw(B6n}$-3=eU>z&|O7ki;-=auYiS zj$h!TxQ7qr<#%K@lD$3qS>EygOgVEcYJ;<1sj@T#dG}E4@GNAdyH`~PEw`k}4rO{} zQ&KrXSV#U14NS-(IC3_Z6H3c(b4jgvE68Yf9Gz&=%89PxL-2}sx^h6H?tXf2r6WRR z3j|-(Npwo#YLELm$MsdNkQ>-u`&^9*Xq?6GSQUyJOf?Nu;n}ACz2Gw{fdLn1$Mc1y zVR%$-Ej;f31T(rWth|#~!^ZAB3nY#~j;|D^$=#2xE_?P#&A0J~2fjVq*)gS&hlwNARnGAOyaJ)$^PC1}F5GckvwH042>8Iw zZum{BCSd318LAaf_zS(gf|LCHxi7jm&i3YPQRtAk@N&lXhevka1-`hf3*vMi-aPbv z9B9+=Ij?2N%5Ncmsdc1n09R07vmO2T@>f0P{?6jMrsW5h@h;@>uEu`4wviDD?n`Lr z(wotjT0u>REEsn04Yb82Xw=Io%SQ2q!aQH&{8UWVEUXCOU@0y3%_$+O9Dw9c<2Z|< zeUEW0Mtt^H5nk^8g59%0LA|HYtCf<1x4r;%wqU0M+RTefy9t)(C8ARh9R>Y@?62Q= z>Q!Im2@0gleE*0Z&8-~WZ31}%*q`wxT72+Sd%$s7!(+Go*Jc18e$t{vq|o6(f_s3? z#Y%iiQZr#8|2j-iL=h^_acFt|lsh9njtDW{?*#kPt&Dg*-e% z09F+wS$2-mxD)gG=ljb$2H*qL<{#TnAp=Gms2VTORI}fr;nWQa#{~aS)~jp+IjSn4 z_J;SR8oUq8s+rz@8e}e=tNdO<`*rjaA~{b?w#SGeK*IESRhdv5y0WjmT}o`1(F47Z zX{~hY;E-kTzp#K?%8>}g0vXE;6@~GKNc+)oYGb?Z;0s0)s%06;r_qRw2$|iPW>9n@!>iYXg~%-gg%0L*C)vh<+0#TGT!Dc`*}A}GUS;=@u(mIY3n|mpM=KTa1_epX zP2=8s-u;pDz(MX|I!}5rO1}$l&L%AJ79Q4!Uu1JOI4?&#g@0z?raW%Yl zD%bV?eiH97^ch@CR_FphuMoaItGLyC)fbaVe%6jDAhkPGj@u~BDk%6GhXCu}dZP}8 zBkC`;cPGmh2-{rPu<8NZ1oOQ|q<_^*1`?8W*zkoMx}T!IC2gN+i1l8AyF@YB(W znYz#yK2~7%TQ0u(XQ;CGteo*x{y%hOihl(BtwXk-EsKvd?VK{*F6hf5RUbc81s&X@ zK1gUdZHb4U_m5lOcJfU^>}1X#kf?9BA0MVVyL!~5S-SLA2rPe|Wij$CVyC56^@17V z%%Y#&V1y3LY=0KOggTL`&=*E0(BP%xgW1IIakizRvWa-9ex4WBpPTx~zi|B(1>V@0 zqH~r4-Lju#du{4wzzs!YLcsG;-=+K5Ia7;ntn~AC@txti6^Gt^2`2puDH(nNW2-C^ zyR$IQYWr)|Ogw4)beyL`!e60>$5{=LeA{cl!T~w{hiQVS<7c}XN1I%h=INTrI z2FXD#7RcU_8ry6Wb?>Lh=%;(-{!YDW^f#_OqBV{Su^NmAbS3==rE#T$InQ(LKEiU`Bf-7ebHo8#)K@?i zNI7xr8dhp!0WH6D@T>Hfq#JKOxf)fX;RSNt_Ark-ES@u_ScVKYNW82hE)vXa&p8mC z557`SIb1TUI^8Z_gM$wvN#{?yy_X-TuC3cxAcNM9y<#(ddAO|xYBb>;R%cLY0|!uw zlFROoncbUOhn%xQ=oV`qw{51!)S}(SF@F9pWU?hY@AU`!8c2@P2KzAt8--+S)9JuB zcJ?gpd7=xt!T?NGRARwfGXA_+SkSC~Jcr${MWH%3qwR_V+j%bg&2eU#oXMvry5B=n z-<>bB`7RM`u^oNROe}-fAS$3XXZFxMR;#25;y&`F*c1Hx3of%;wC5xz%>Iz2$?-G2 zsHsnPTp%XK*O$qIr_NsMc_raotun`lWO=E{{p-H|@46pbH%|pn&o||7&VHM$H{9#K zbZY76mZK2umY7d07~ifk0r?j5bZF-)j6dZ#h!ZKFgDZWbn%c-nUQZ!ci$3t$GxExcF-p=JP;HmBjp$y?h4+DR16J`N$CF6qt7+}Ug8wk5tq zjd9kI3*iRLli2WO4w;X$v=)<3(>(Ib=0N?>_(u`JEXULM zLu|=qtLmSEIf|BsH#-*`I}*`vABnzdwRX--8vCOhlwRL&*&`ZLZ0F%AW|+TP8U)tO zu52J@t*&zrpew8v$x&ZDrX-ZmOV@NsF<99}M1ld%9kMz(pu}u(>Bqi8N4@R@YQb zJ$=v3Ea3O2O~zATiIa4r3V7OjX8t6*K}amw<@XR!2ePvJN_#YDe134iJm+;R#k(?KKY(m883W1 z;$_}M1lrQ6bN=SaF{_kC`NLD{6ZK&Ggc9~UYMy)C!~1yCi#b_AIc0_-*Q&9K-ak2)95 z%^O!A`=N<<U{S`Za8ml zv6ZKRJpdkhauzRix@aw?L;^d-W-pw=g0F3LK#T;zF41?Rj$GfINaSo!)fh;Vs~v-u zk;OM*g)-2bmo<V94WmgKFbDQ#_gMoqnDzL?1EZhI=1*ps2%Q%nCL)wb#=VfKlE{`Fh( zf9TDN*D!tD27`kcuS_q0$b>5MC+>s{l8uWKW>}toH3MtN4wQZA+CXNpQPaUV)70)N zFwA^zwO`3e8%=+ZIMlDIh95gD*UH%r(vrtT29!%1Dq+i;&Z5~d>y2?)r=@tq;iEq4 zlS@V+4pu?10yB@3D3dU^i6M=S?~=?C3c>Ujc``M<`hJ!5OMaZYPenD5ak>_5V#S=4 zC1;mBx4gjv8YKQkg55(P&vU&7Y<2~-CMO>{{YwV-%@~dy>9JT{5MZwGG1`4xzXV>Vdz&X;10>jS17yr*u2P7g@BwxsnFrA%x<}# zE@=y=m&M669M7-}OdM?!;0U`JTx;QB19pll7s)a8w#&3JO(kH{T2EU9f|jqZ>?#JJ z=M|)U7{hFWwda|SIU|4mB#C8ypR>7civNhzH@^+7V9}sn!eju#topum4R_-t4Cvkv zo*c4aADIy*3d;hv02?W%chiz~b3hYc-ndD!ga(c`+wE5VMt;tc^Ke1E$+go9H$Ku* zO9C>cPI;-%_Km8NLD`Fgwlyz* zYcTP9ZPjr~OIP0bfvpAhE?;rvN}ukn;8&fbZ&Bxojyz;atiQrX<#?FP$|Z1RSVfpu zdB@%c6p0UUo38RSN(xSsbRe$jcY-Y4M)##D$;iU38_tyHkDYqzK_d~!Y%ys4tX1`n zAjQ8%XKam5azFsLUXD?%Z1lDXxZOE_s;<_y4Yo_K!O){QmO6mgxL@=*Z_I$w5mAV)Q9KV}2n6J`CB zc{zLVwPNIfG7p?3UMwKGqKg~`;u}9`2%mYpqfP=qR8Ih96XK9EAJ1@9h;PUEn^=3e(j$vj zxMTss$T>)2Yf52pxM1?l4a=Y4HW(arTEk9T#fRWat(y8TF(99 zy|cVylc12zhWw#8=FLH+AOoGfFGIQWu_tXLr`F7giSGG}`}Q#^)R$m)wf#nSOL2;;NbJ*l(d6fq#(78&2{Q6EqASj;YsH^S9iG_;*#O{)F zBnUvxFQX4Xq;p|pu%m^^wBJ@3*8xILGR0?lbzL}U<1CecNb^x=jMQ(sUwl+ z;7R;-Z?kgwaHkCNHO~1Qn=7KsDM#L;jY{OhWd{22nxw4pLc;J;sCK{fp;50bO2>5v z@KH#ep(}~=e^zM_*H!Y)sx{ooteZE$*%%2923;`vi)DmRS82->sT}phlZGuShUTo; z|)xHH5 zXMD=({P^KrhM`3(c;L?V6z0WQb;mzqO5pvVC-c6mOihhd!>dTX^FFrWCp5!`TPKR|ZdjDfHs(qE6-Glw zOVfW9y!`@1ieg@YmN9h34j+=9MJspG~SbaI_K&6IX>$n_Q z5(-S>`KKahyyE1YAMuN(CHHw}wqAwx*T(SRwQz>@TBl%Ln1KO`@@Fz;JZ^mc5uTzg zr6%xVvmA9r;uE$-`@&1x!@gw^VZ6svUMRwHxLlN1oyW9|8~1*fAH`PpCj#I! zlvZ9EEEAw(wlWSeSB{nh9Ae$3fRlqsLD-}l;E(PA3JmLYai59Nybo8Aw>r@<7!?)O zTizWP4ro6APk2Nff0r7k1KC~rQpzUFXfxsJLbWrTj9+e(l*}dfk-IH#kE=y5;~+?@!(Gv8 zLB5)e6?TB-9?oTJ4H6;(OcUj(#&3)j&7DTKOfaK7$j9Zz##`i*@1k176=>12;JW~? z2B~kAm3dzlR%T}lyS89)C=`N#`ByvD#iDP}sPs0E)Y$5=Zz{;M5;UX5`U2G1BWQg} zj$cNdxJ_hpS@z(wGc=W6b1NPi#=rZC70}4DH{`j(P^e6T`UNobLDYhlQl{EQ&nb5p zWcC@rJba~P!%jw8O;;?OlRU52cAzL-X%SHd3Gwf@S)xvIcbN#tB_;Gd&8&(c@1;>< z<;x5ItOk~PIK|U=>$iD@`OXVcdUcOfuf;ihY&n@sdA)_dZi()S;^Z`%i}7*ijB%8rXZRJavxnkcD+;(`-glT}bZQnmO_{!`lo zpciO_6*T_rW(_-i=;CqRqlsLt0ok9KIi1+DZNg=}|GE1}ALHd~_Cek{R!)T8Z#wL(EXbY8#?g0Gf}wOb?C1&wEyNOEbzvHhdwUU4Fq zm`@b{>H{`nG|$u6?C6NaEo2V4nYU)DStF*keo&CP>{*UNuFd4aE?EEkS^73{t(fJy zX~xnT0AJmVHtsqu%tdG%{7DxK)0qz?PEp~Kq2-@7IvgOOs0gFis7dT0KFcNcaDfoh}{-OG`H>5{wF0ND1riT zoU}qZmj^|gn)O>4PgnOF;1#L=nIzcN}_wp!rH`gHX(9p=r5I)52L=Ebn8Hu+=l` ze{$3$t*Cg>(>TUd@8`LHBl1_&rUae;H_WbnhTZ=|lEnY#-Yk#lSfeFO?_Y^yvLF>8 z+j3OJaS?;MW`jIT8z~icGY6;u;~CU*kDbTU5xD&*PL|tn?rQJfeC4G<^7E%jBT&%J zM-_8TZQAMI5k|Jb%eBr|-A4=m>2>}xov+(nO~Q26V1KUa&Ayiq=S@iG91~+4<=~!J zO?oF;pZIMosbO9j>ZWX4tqcJ&PT#qQuCN;7?KxS8J&fC!!r#TqKeJ5y#3>!@7|pYg zUVJ|mreDT$q_Z(jmq-dYDJP$2GKZvUmlXJKF4Zm|7A9>WB*GVQmU+%`>b?YMIR%Bl zvjpF)A$?w2Tb0h%mZ(anlF3o8UoPdcnV&5Qy@Kyb?HpYaJ>mT|eWvoGTLjFug5L(l zrV+f+PCn{Musku`Sf<#htwrt(B;6h8=CYvstC?U2qEOsJ15;S?@)tN1bZewGuLH4M z`nT#0k4#3*RqiESNHXSedad=;0>XHbWN3Rrc1vRM4dr+#&wH^$<0@#i<(}${clS%i zCeg5sUZx#EWu-q*mu82-k_NKAbwrz}oNBnrz|z9`}7bELX|F_jezeV83-RFt;zP<|=F37H}YTub}wkCr0mR)@xdlv-{cFJN1pAom1 z8&6v#xds|O)^!tXquzP!?R8b){Nb+G!>-y8SuO&5V3MICs{t*_ADod=M_@&Z5_E-6n z1w{prj;acN$_$gNgAz9tLs88=v$lY4TRUD3z)hMy$)l?Q`c{3CWRm$V5;RGAv+g6c z2kW=!dcl-Ed4PhPd@!B}k$@;0ArX<))^ ztYL*tb(<{0e+99`o>K)oLz~gpj$G-()?^5Ji-Hz6jlU}=#{)H)CPTY-*Pnom5=r@Y z1*T5Nu6Cl>Ml0Z7LAHundARlV+xT3G%I_6ja!Lc)_&*2r_N`x7apI`~uiPQjR+G36 z2(l_R8^9!*khA~f_Fv~icr^6fcJGrGpPb}82|o;SW`S>6c$L6UGCPGD;(iPZAfzuaoQe#5 z*+){I;;Es?Dze{RW+3Ngt}Ge5F${M~3i@{ZaUnpE*n64O)#hs78j{VZT}z0pPLz$j z6Q2PSD-GV-*fTUG`@Vq8g_f9KK4Q3fv`|j=8_Gjc97q3FbL+CcTcr#3?2jFzBIq~E|n;T5A zVHb>#=lfGZ#^GDsrXT(2EU}>At9BvJQnk0&n9|jo$qw54D_gRNy~zAO;jkz&IVA-z zLlTub7%4KP91)IrfU(t6YnA5Qe{lqsdDN57%3y6BRo#x%{2V zFDHz2Cr;|sg9DF7Xk!b9T-QU$SC5(ZGgd#io>ynRzZQOWKvA?VIony zbIQ55=1rQe#jOom_BlZx6Sl`$p{uuzdOzMU|EC2s8~kU-E7tV<(K+7BfB(+mY}&fX zCXuRq9?y7N|KG;9{}X<=&9%-E9^Ee4{@Z}g)%23$KT|7oPjaZ%fCWX2pAS2EcJm5; zf?E*0Un{xx{Xq^#pnDiWYUMz**PLRp>48Gnl9~O_rATEY&9ZaN)*}lp{8N*CmH4}6 zy^&1X;v4ZU39ja$HH#0do0L>##Cg=?@?xbnT&2>Y|APR;#c^rdtpUo!}o3Bb`Jb;AM zE_>V_$E>n2(9rW|R{92b@}7B&v&$c0X`ihj-0`wht-=+zsDA5`@dHQg9c}-XB`M+Y z0(HOgcQxW$jPhvlTV_4gUL{@G{8asi&Lu7RBYqTw`i&m4?$Db_;o(PiWoWyIuN$y` zy$Uf3?2hX@&GRsqK7|9KPK-YuzT}k)xJs&Qd3fr1WFMD)GgJib0{zXZAT61go$%t8 zUueaO&T#l0CNA0Fo6C_{zqi9uxVis}x3`RntKHT_V}y_dcXut^3jzdpcTaG4*M#8i zF2TKUFCe%(6jnGC?gWD7R`Q*_Pj{alJ#P2CqyN{BwQ9XG=VO!clRD`3ad|-JxlXxk zG9%-{KNtcT@i9bfER^sK*;$uMETJFh!(NmU*Cm@708E@{f-=7)k;6+!zE6!WAGa|@^K$4 z)AJcW@>)rHuy1!#zAaet39ArF3-~9|qgR)YTPGnFo%D63D?d-Noy(Itrw%gP)-K?j z!1(vVo86H(wnzENX_rXwhgFm3s=b3!^Wv!DAulsamtj8k>=Hmjm(zmXqracP>2M}@ zD&2A4@$pCPP5#5YtbVava_%8Vu;O4O14Ef>CSGcKimHBnJGqODf(mcueVz>=u%*Fe zz6UctK6zIS*Uyc{KI6M4=ygVR) zwD|*s=6y~2F9N_VasR*pF(m(O6DQKf`fo8a$IaLO1ZzEW=01u(4w?Pk<-MJI^DOlp zjgVq0q0oO|zc-s-xtqCu!FBC_UgH0_QTz7}|6g@;|95`may-?uts3ypS(1e+kDhO2 ze`;!L)A1@xSjuHu0Cfe|e>rJnj94p>4R{kIIgswIK5O`V4T%JU6mxM5x?uE^JdTas z&T472=4F+h#TiqsqDmTHCAl3!Kpti=;#vWLMV$>BhqFHrbf|fMxMWmDoP=7ly3iFI zfy|`ICr1LmZ^$L`OLw{va^hXSM*opqBLK2}-tdvA4#&Hag7mX8xM+_zpxA)Au}@*c04Bp;4oEdZqNO+PA+I{ zA!#ju?Xa+D#iejSMh?S7*i|WiRHgnjp-nSFoYYvvm_AXIms5!T<~H5o({H}ZWSGoF z!lwX#v{;mUNdi9V`+3z|<1`?B<9*z>0TB$dBCnJ$bX2An zXdmsI7~W-PC$v~BDp=6+=nBFY7jZxGvaWG>Mpg!p%Z*h#kIz6^MXR}T=w}!kxv{d_ za0x4~YRfrGlBl0&=l;j3S{}fDuXGP3vQpEn|NKc~aoMt-78ZK#r>h3m-2w8UMd0NC zGD@)X+&ydg$QT_Uk@Arm=i-88P*(_;Ln0ll`2ScXxGIxh8Un-u*pA zYC`*`M9<6k2gR#+UL=L#@ew=@PP}w$6dyG`EibJq(8lG3Q!ySY$k$Ci$cN%&>H#7D z(@i>f^LxqnLr9OQXU+mZMZ31e3NnZ!{Ox(8$N-!Dw1eoQkO9AmkLN+J%Yxd$2jZM| zMvmGJUi&eF9osVy^(`%2wom@G3m>3K3*=$}?OzpZ7XL_Ypf$#Y6QQq>@1Hst{Ck9N zhT{?vwTevS!?-g6{_dVnHS{Am#foo^mwbG@r<|)&bU)DkzTZsAR420^$Z}r_c2x6% z9N}*n4eBWT1l{bLVdO2l39C9enJb2(1O&e%ZFh6n!*u7yOjsyRh|sfuuCt2Dj^$Qb zYqlF&Dr4P$*1k!+O)!#!_3ZSq;G84^aN;wQ1dNX+_tpW(T;8k9bPW6h+qlW+qZeZ3 zI>MppDJdK^Emx&^$D3~z{2vg+$w|0AHMI`5#@hz(<0nYsjfHe5At+r=y_vX@FEdAq za7c(Ld(~EQi6ivEw)a!Qt;bxW`&${5R5%P(;M~kMMeskp1!bNrL{4N8(h?|;cL_#U zjCY^Ro|^ae&|e0$T(uDpJfkRv?N9{`(lS6kG);PP;AOX$(XlY~n+VK&;(jkbWYEH3 zU9gpY7P_}LMc)F%6s?d)4nBilynwp=SKSE6p~M)(*f8>;!HN{fPEmh4GZ$w)US5Lp zh2e1YRQbvNH;BQMqx8P?_zh-}Vj3&epjYUxXZ#m+>?+O?S)_P9-foC>w^%M@{UNj{ z+_OjiH|w;B2a#+E<_x5-#G^x_qu%7LEm^PV89J9ty`3NX8B!iSmq4gv0vUE~8QK^_ z9Bw4Bc$-lG;j~e@ij{unRGb#=tKC>d&3Hbw<2lZoa8GqvXO!+{%VK?$l=(Bnbz=N& z)mn|^vLjw*a+6OZ?hs-SfQ!sNtMPMY(}O!OL9T(nG4YrmH@kLw2kL02mY31V2JCc@ z_PR%^#_Fs5pZel8 z^JUGe$kGuF*jOZ2?A_9c{5U|XyAPMptr0I+EFh*Ru?v+!pk8)zWd0}Au-LQBC zU~v6r!}0b_;>Xn1ioII|$JZ2Ap6%r1&bL-gQ9*F-gV|21p2n(c^t>zirT9C)wpBrY z!X~)9GeH$bTcxf18vhdWUU&;etzEUW`^Xjn!7ljXCR%GcR>l=KQnxwdJYTe{LlCW= zWKYKszneHiszCh0mC*l2YYP3s&O~8qgnsBIQ9Y2J)8hRg6InW`4`FX@~yNqfrQ)EV_;>wxx>bG z>N}F9vOK>b3shoNaZE9+@q}5gxneZCY|nXGOHH0dqhi`lV~v{9%f?7SD=qdbFr5JeUm0hk_^s~&CzxKn&uBD3moKQo}>Xf#@PXf)1hvJeZ=UG62!Gyw)0-d&Y8mEx! zVBdx5IE;HvsFT@RM?p{eNzfNLEb-H@OpeWmTIS2N2Aq`m1lZ^|{X=-&Sb*6~ZZ|f& z@TU6Y-l=`Ya1maOj&2tSV{U$Dq)V@0hMQ&g^hS6e%*1_VysF6}z7b|;)OoINC)UqO z=Lh#k<|Odb0MF+JE=Djm^W+?A2#U5%6l)C4t5eB8Pz69UiGs8b06ROcM{Xkw{Io?= zsn`UE-@ph`lOKO^-rt{?y$eJ@oqfd6_I2#5nFUTobA2yJ3?$q1u&po#;dOjpZvl#e zh!{N^Vhu;r(+w}KUR7PAH>qKnz-;p;#F(UrPzGdI>*hP$j!*2TiwA^Ee9S8mz|=+% zk7eP1VF4zIHHB8@X(O0R>TY|`ZS?bm&#Eo?y{}W8H8j=|dGaEQL95+zslU%eh zxn&pn*1RsymBFL4r2;dRM2-p#3xAK;(#BIw#U5>v|BEr;((t^{|ov)3qOyIdEaaWPK z3B}($^&*iv7S;4*eF@?_Z1%yaCtQSn_iE?p@=Gic7dg79s}7Z=!2;-U>1fYi57Moq z=4NJO;x{yw;DD*}Qre5u0JG`3S#zg9sE+0Bv{N!xKCLyfvkO>HXI3PCqn?Q1+$aBy zrb?z&Ri0a6Hg~rQ@BxXq>P%=X$EaRHOlU(4tV@Jepjx_)qj>up5oXGl8nJ!2#?FsC zV`EY3A@4uuKGx%=MKoy?n$pU}1oZSi3g{bvN7~+cRX#7N)fp zRzWQJ;NjL*+wDT*89A3$B%&H?xB+VZxx5xNqxBBqjJGq{rYhn< z#xa8ix9ZPBdT`J2ht*G6c7!76yq|~XJ$us-qLI)l);5ux4_j@ABK}WHfirml7iizhrNtZ|LW8`Hr z(Y)*uz#pZV$C)!;63S@en=Vd@CnZ53^<*id(g*5)Afd40o#;DQX~XTNQfEg z^)S-y>kZKN5905vk?0jNSp#e9u6$yPdh#4!Kx0LbGjuzZq9HNb!>2kl+PW4^-b-8? zuM@%oTsZ!_>DE-rKTI#Ht)6GL0$5 zyAL63`}Q+_@#X;_>o)5`$|WRZuAt&`F=}q0(6*A5x~nrb39t$@wxp)xvRA?K_+iuV z`nBYHw>Llr^EXB(=?+nWRQ9KL}hhkYv62M-=>%4^IVQj@gKNlwp*_}>796e5x z>Du?+8b!?mVew=4BS&4`?n32Y_77b4ZUHOIBJnv|Eq+_&Ev?JjuJpehG({(yk22}I zPy|MG4b*gMKdd|r&4TAweJGpqbL`+2>6lD~P)HzEK_NBWX|z5Iw7NX+`**rMo<;hI zw z30mXCdQF=M_YhUtq3M7Q050vsh(2)QNmzEQKQCbAgc^Ex4XY1KQ@zlLtkO$}^F{%G z8Ej$yAVkY$Jaq706Ipllrckq|nPKGpup?rq!t*=Nl&uw4|Z`)lh{sdndahQ)1e zSX9;T8k4hk_!B8bL-@0aAg}gXrrXYzg75e5v{iAobcO<$bXs^CMFFU=B{9&M4vZ6TyEtOtMg8s3k_v#sXR+Nt2W&}trR1H~jphrO3 zxoeWwf-1*)abKb>e~?rgw+HMHEd5j=Ekqqlsi#s@?V1IQ$2k8(J`0MiS zpwhA=jW`LhOv2c;sp>_Kz(ZPD~(`@06$xcVYQR&%{dewNCMcIN9*iT4IaY8EU z+2sk#RmTg;h4b}3Nj}cGxo;Dh<)v2)t zZD)sz0I2l!siyrU8zw5dm2LJ{USy;&{2_GtB~z2n(Wsj((G`cYG~~^>5la6+ zss>JVEFyoYq5v0?|3lT*{~b8%zsmFeR}kAIf}KueoZ~r?<5bsG%lEzQv>3CAObp^} zb(}6yDU_G;U&xN(aFU$?%!Crk7dhfxxg7;>8mkHI)tt2Yyzd4)G6DLh5Yi-tgJid&)6;x)%{DdJj%59Bb{hKYHHWmV-V^zZ`j)1do3 z8#rbMwOrKYb@|_&;h^LNgx6mUT3{zXnFyVSbkrDmc7@ZDI%N^2-HKSA4}&YxoNJRS zn3?^6Gj%OOiFIJI(>Nr%sxY~s(V=mQ77 zzZ;p1{P=0McYB++*)%nqn|#y8bcneL7anV%+reo4p*}1_oOX*uyKyt^$Z^thVIVdW z-#|Y-S;58WH`SD6UG^0(ny0MT^)PfcHyAcEpa9`K(8m&@Wb$+xo_*Df&Tt^rKR!An z7wys^?Mr8>Vl}Mq@UG5vhE4Wr7GZyqUT)BD=((6?CrUbP zK6_`W;zJWDDc0HK9S~i8bBl$&-*^Cpg;lU_RC7*Iv5%&@o`$*q<&&zm?zai9u1Y!a z^X%YW>4aj@ecCTjGCP^3ev`_zX2w(3o~j5;z2a%}Xrww4?G)YSq17}VSmi-B2lp>d z?eCidcE%$8dH><0-!ddfjS-2jbB4tb620FoyRp^LZQdtKb1{3gdy4&du{ar-R6by* zuT_ms#I_2KWE1_C+XDL`*%wq8k_46<(|tIrNB{P%GHR}Eb%JGSmFUL8uV3lqt;OKc zT>0ix$FD+_4b$u1^TiQ%%+Z{7+YW&D!8$AoUlBVYfMiUbkjKS=#d_;JJifczb_s>( zzu;ER$mvDRi!EuY#)yP)L;r}7kL9t#02$BY{v3U^spw(isPxn>cQFj-opt5*gOZV4 zL2e;z64FqU)98%{S(b_o^V*~1Tr*IE3k&60i3TThv;`l}aaDdS zHb;6+GEvO#!b=Mhe6OR*fwzsVwu^9huc>I3yt<;NA){?#C=<)NR_&PX^RfA1nK319 zF>uuTZ`{G?3vt0%)`iJx+lYra0nuL-C4&8n^ns;BX8CH9Pi|08bACR)*?@w$DYwtz z^M=yeXje(?8A^N`5evUkObMgkY-6N<)mU!jW@*$4iBxscFp(qq%t^Xb!~OfGQv@Ur zFQHm@c2v3seI)~^I#nnu($7JFu%t^>>nE#jaS&M(JRET3Gm_s@z|4H8a#ZknZB3Ky zGd6LO6N5?!3U*srNFd%1R&xKH17{~g%yPhE82J8kIY5^I=o$~sasd6oP?Z6{Om^?^ zBn!>Qve9{X<(%8yQmOMD-He3xR?VNvl7{ZXKE<}DS}c&0mu!`Vxy)x3-u4+PRi6D1 z0B8LhYUOxMTknCz(17uiW3&S$Smzo?jN!+aU7fbFHRZWf_RmnYUt;`k_Mk6J@FCfe z`VRIYsXcxdJVaRdifN$LEOuwBc~XNPW9=}=mIR}SxH!sP4z9tHmX1X9cEj!~dl-|( z01e`K&1VUVidA+vdiX6(I+FswkTr{%Tt%|c73G>WPq{{IZW zEIEoZlKFx7FDyWtrE)|*ic8o1Sy?XK6f(_3)wl?%dw)k{TBbhtrWF?!F;o) z3-v!gs%kkIbYYki@$aYcXqeErR57iA^BcWK!C=ixRicPpI42n*r*qcp!Q0qwvnHK) z_phLY7_b0tb^$!Vk>2j^d&GUC{gSeE=*XzyNl66TR?sd5C z;lfF#RMcFE)li9vEw7$y3FHJ!!{lq+*8|!taS`@3BS zl8aJ)gC4NL!w%5caf&6m{2DAZ8x~9Fgg2ta;@RoutCFSc-YX!;~11D&Lg#Hq$dWZ5)MmkcaXaxxoSwBt1xi8 zJQxXAnA+bq>hy%xgC7`+8-L0f9mS_O7fYt0CJycDr@KooV8y*SW{PKhoC={J0C1m{ zHV(Qv>ZJ78sRrwmk2Lyu^0`1Rs9awLf@F>v82$uSl+lT^c5@Q(>#483nDn}f&3}1N zKC()O{s197q4Fdo+(6Sbz7$+)A?c~R^iN4Jv3EJ~l6)EhJON znV-!`;5te~D`E!dbdZfJwHeF8;aqOxeZrA=c`E?pFv;gqsPHh^%l*?j1let>+e?{? zSB0mc{B4?|B|S%^<7uzvblue;Z8hcXbI6rNp>gTLAMG8Shhq81T#iO+eVIPPW9TL_ z-coF7v-Xl{v>lphnx6>U@gay$PyyH@LU5(VDTA>{&4A5Af)s^iH|e@@gy@yG6SUdU z#K3ScndHSCk8I9sLnQV^S0PJNJJb>(OYXqsW#isS>WKO3%cscO-bH&TnCejd{x((o z@U6;6x0oWKOuknK+)pPp6!#yT{u4IVzr1gv3=d;dPhlKdmJpQwPh?mIUi%H*RZ^Zv_I7Ckff{@alY2#l7-oVI;`kZ654t|6^c z{}jzqge0wmGpS0aH8PMdoOV28xeA77c>Et2Y!(>o3RbjKkaHqZJK}&BcqSAg%x-Ux zPDe-XA1A+eJwqf4W7c?Z>|*z{q}gAbC5MD}7hiJoheK~i?@Xh0{9hoUKUNH^jWR($ z33c+jMt8-z>A4haxZGN^;4HmV%P22Gt%8Y_vFPe@T8QYV&5X|zf5s~*@$~5qm5!@! zVz!)B`c+hPex2J3)H0SPWqWq;`<#TH+p;73J=Pr;pW3y-IW9J}lhU1sgR}zUd7j&; zj-iQ6Dj)s!ufrhg?QKl?5LwuE$5!eO+X?+kqYLQ!1$`rXlr5e^T$VUoK-0qV3LT@P za975qnVEzgvlG@UF2dRz2FUnrky)`=hP$8;%UX?X;V-d&1IwBfiWuTKKLtZemRc-l zYU;%MOx?b67(f#}?|V75G&g^Zekx(|gMJq3|ARV5XCz~4#=k}*1kisa$Fg`_rtgwtZFK}ZCeLNs!1W(o$ zlO6WGy)RFHh%YwAr0r@X9!1E+KQg?!5LoFE6+Y=qY>bLC%9yXw0^atEJ2@rn6i%n* zpCHmr^B{#rys3j=3FtPj`uW-4je?Tak$B#IFyNceepQ+uPxgoQC^9Bx(E}Hat9zAo zCBS!Ut5yf2?g_Z)Zg<{PS^o69QO_avZnnGv*g*BlD^67Zdp?N!V?+0>&)Sh*Or5Tv zXJ=`Pm;pWazfk?%J;}d08Nj2?y^nUn@#KtPJ7>qwy{T7wr^68j?!Dx%0r+XP)3k(& z$uK6z_wv=;mwi9+>u|&pv4^Mbwg^cRbaNMKTerUkVtwI?B#PdagtQPizFQ7WA3vEx zvl^%c2&r;=e__;bE3^2JLcC&5_mQ}m)0g+68_{a)S4KELV~3|6knuL>#Qv7lJvvGXZI zTx~8d8s`&qy)FNI@1}0ET)UqzmdAw>JiRhdR~Ni$9~fTq?fpO#*c@eu*d_-r)-|?u z?AjHLhh1rL4>G9tyYFu7x~g+56pz(KD)8|gG0kz)`q9u>NaVX((v-D~ipt>0u7sg_ z+B_CcJJ00?6;Fqrufd67-5B#EWf?rqC0o%_7VTOr;l`4Yk?f7>M2Uo zknsYVpM>dB!8+Xb30-4}G`&s-|L=7?RGRUgk4Hv$b7+E6QP1u_tQk;{k120A!;Y@<_JX_Pyv zIzGb^5&8g_Zob-2L&?*~1~w~`n&0PqbY>+tDMZ6u+Yei#fCREA=X>R|Ad0_99$RnY zbaHc+u8dsfcKFo*E~!~Ob2h6MQLiO4y-=QGigG~B5AU&y6>EtS|{jS9eDQ# zt@ZDpkM@jf6uYwbe{@8Ytqb0X{T>X5(qMdEBA&LH9Z=9G=kx6Jw4Qv)g6F(N z@|zo`nzy;m37E97N^R|i(~*k_H*ot#!9dq<*p5UqmpURU-RgT!WZR(nkQp#zMkDb! z34e!q>N-Q6IQ}$~M)_q@*JKh=w`A{?i!a2XqGF8Y7WqV|z6kk#$Xe0uCB3Q=6IZYJ6VL74{!;;z#L!mr)UY*(HGd zseRK?^qE-Ni{~KiIZWCyjl8$s*nqd$;{u6XEVi@Tk!+=D+Q`dKwbkE8ZRkB24qdJ{ zXOSpX>M+gCbWuRwcKb^`b|VW8-)WrPY;+PW#X5ua4HLPdhqL$@fFLfYL+eF?&JDZL z9)ym4>o$c$GHpDo!ctK7#i@x5t(CL4)5{erNCMZu5vBec`V}wzZ0Ju>ti+QTTfXCB~N>R$B8^pKgYOCP;?- z=%_zWA0g4B?sD3(jB2+G`I=v7e+9b&2}Ztl$PhEUjU+(NuXxQ(T3%}VtZA>TTpzyH z6vuQEvWUDYbDvzCjcxnbjJLI@{Uwk{Hu%{&4eLb|bK(g_j3uuevYJ)gk33#ll70&b zTtzlSfi5S_c_}A!w{=B{GkDvZo z3l1Je9Gb9?`90zkecDXqod4Pb01jSS!Z1YHBodI3}xx zW|e#Pjp=y0LL|l9qAT>AuiDK#ml9$sH>j;0>-75}pq9e>d-K7u>GCmp#CJHaFObe> zJ-zfmydE9T5vduRdeVZ%|N>nUPP4ga!OG_c^b<@!Y z`S+u(m+r&iVM7*d{D;(#;?(B)A<-9q3KvNn57=c!e2HSzqp|)q2YXQ`l1_SJ;g9M; z7YuS|y+i`}6JYUSz2PRvo}2J4L{h13zo$DsDb2`8ja7@GrJ>J^z4a%1GoPjin&;1~ z{Eh*_3e0>7)DYW?QF~}0dHZ+yHPFV`4_+%LXv=Wc5`J#>*f)CuH)9MFXw^NOSc|$& za0HfXQca|`In59_gh(QU%P4-Y0L*pd_tn^H85ZtDnfUovewb)cdLfZNWOUAH?3T~` zTz;Dw4;lnr+a7D|+D>AHPAj`*C-RbCL1}ht=~HXp?ebnW0mw~4(Hnu6ic&;#^`{Yh zzPq*Et8t|o)%Ij{nb+%=rD|N+8JK$QZZ+0jQd73^f0wet)sdBx1!*-=>5Sg#%DTj7 zdt4oRUY?A* zdh7%VU+(1&EuvFOd#uFvhw2@D;izd%@Kb-;E?oMFNQg)iiP8|mWo8%2rd#>73;i2$ zhNt9?9O=$Q(~V#KY5i!0lJdW>081B%Y-3WRvxTERPuXPiD9pr~^ELJ4W!01k1=gjU z;iL{mS1O`K6}bR9`de!2wLy8;(fyZ{qEQzn6pU@vE@RFn7n0-Y6r%DgLzWv-#(Ve$ zc6E#bSCy7mse8_~q;)lAf>fVfq#v<1fmkPg+>fLHptwI=Y8Hs#Ap=S;H%w>E>Y#W1R+`Wh@~o!>|Y zfKoMi+_{8~cmI5@kF+C>Lq#zs+FqpnRsz(YUkS9oF%=zS};+zuMlJJ-yqu@;x+D%Qfd^g{k&d&<5+Dr!lk#C zJsS(4o90v06T!u2$_f5i65Z>y40@!AKK1u{S}JzJ8Zsh!GHzX%n31-TkTbKhL+TMH z=V=6Q*Pk{9ialO?>0$@9n?J>Y`_Vwq=TD*1X~c^;sl}HId@*ll6FV%|@aI~KexKwv zf1H=RWAOb~+zUfM>^7|iXziDkUF3bh)hu;UfS4Wy&5k^6`2jg*EeGwP;MjyF|Gekf zKCm`^?hje1Oa~_`<1ZYl{v*icaBvL|=qqio8gE$KqNZpRm99}jwN=LECk%qVHFm<0 zdv6V`d%`@6zd&{8dKER2OgP1BwR({^9lv8BAlqNQF%k{Z|H{VAIk@Bj)=TJpT&FP- zXc3_Ik{x`iGEsiVXXo}6scvvAnsa#6R59{h@C-I7L!IizS-ej5_1xU8EbKi}jnF2` zs71^Tv~?}8)zt69=jC@3;^68J#wu>0El15=6cE7Uvw%?1JWl(y!c|=w622@3`wy?9 zE%^ZwsU+f0|2;L?QD|nY{1xnRd$Km1fFnPyWe{6`TT`JqFVOUFUh-m*>k=-1E+@k5 zSOLH(w}uY@pnH(9YD-~}%Uncs+j<-6ab{u$Z`-H70(nIf+&9cK!p zA^;Plrv4#5H#1PQgFye8%wmE~?B{Pu9yUvYb*TD8gC!=}tziZ-^SsHo-ExXoxr|Vi z9)TPQT9C;40!M4jReix$LI^Ystt>m&hM3n*4kJ4d) zxu=6>@k1WIMnKlG?$r{3pt|q*(imF{eN3nF1uwnhW21eZ{zM{R#GjnkeoAzv?xI+i z@Y=uG?RUjdqwUJs`1>ncg8`$Tz!GDOjHh2_A{#8u;j+Zbj5>sFotNpNib&d>^inpA z>8>BYtH5`WlCn}YXuavuX`TjdaOFDW@#h%GmjPl6k95WFzJsznKSV_rIS7y7i5isl z-JF}Il{$SYIMTW@IS=Ys^7L_Ay%<8aRyj5#&RfvDx*kt)u7e~mhBPM+_Y~^`X!M?v zTW!T?e{17%Cq z+xnF0vN*^$8UBjs;{jcM|L^NPqhI2Iib=m>!wxCo%=XsL?xOLtLschx2@GU|MW#0l zglUflgsKxbW*;<{94X?y3@i1}$4J))7YKp(Xl^&)ajo?okS6n$RVR3k$*+%_Glv&6 z+B~=dO+^(zUQb~=4KJ;RC$!6+{7C+EMB7rkDBQ(JU5$t3-23bt&1zXk>+ei~EOc5x zmaAW_>c89Ec;p845;>eBU7g2`#h-3I9+yD5f<)Z~!4C4vCn>92F$@h8IKj#0AM|Gz zhueNCc~d}N?V+;pu>G4UuJ5XBF}?mLLIs|J@xPb5pX2{w`S&&h@Z|+e_cOtNrN)z+X#CfD1}*F*!5huh5B@Ez!SrrxxJ<=g5)&|H$k0 zI$dMVH24G06k2Qk%S;9`Y;${h@QheIQe}Xf(KcuFEG%1``u<~(0Reo_lix)`j!RtY zN8J{?Hou)=^-U+2R$b9e&b^txZ=EN}?@RDPdbQoViJK95WA&f0#eDH@cbvyV|C2EZ z|9dvRy`}t+q2gEV_g6Jqj3mtL68$Sl@!VXEH(ZzQ=FECuxs8ZYS)HnfmL{QZoA9#e zJvfzSz!01(h(@)C z>NbDI{nh;^i9gUcEvF{${h#l>t?{jy>*XD4qM;sm;RUe_VjEX~ByM(w!1SbxLrjKgmDP?o2*crlRc$;&mHekizp7su6Mol%YpmjMu2!_)4f3wcfS0y(j{u70+Ta{>D~d$i0%x&Y;Pd3_h8 z|3LqA7uVL_&5w+kN5FmOvFIh~@!`#61zQ)Sr!a`61F^Ja(GpW6_p|#x=_L2P^ZGIG$xG03XWJc z=Pby74N^_)ZEpV0vqTXetiH+kT*RVPtJyAnY{5>BxxCCCVPY%|o`F~Td0X+*37I)^ zse-26v1Etjo8<2&4Ac`-aExoSOF3>r6-F89^w__SET?7~UOfc#ex3BG#TUV3PwuE; zT&w}W4HCcIf%KxeewvFl)^TBJw<8>Kcb0?|!bp~~5bVKui(w`&n#DMZ4;1iGPBPaO zbk@%KTy%a0ClM?{Jk})*rCrmM?B`SF&I3Tu+i)UuVgbABix#;1EX}Ov$57gcCcn^y zgp)l>#(yCW|Jv?a8pr`klSuF!8=66}lOAMbXm$+D*n4ME%5*5ZwP1F^TjldI%@@Z~ zN|Eo5-urf6_EC#b*x<0FnZX9SQyTr&Esu%_EtN1rlC%k|;*Yy0XwfqxY{De7Zu93+ z&0pXbq9~1L^reT8ME4q{<>+*VRKB_P8|Fw{a&K(y4&HPiRMR;`tT3dzwZ^6;jRiE3 zdZm(vf_739L&ZV!AM$KSor|$$h5K zROF{P_4#%#u2$4sCE*TPem0po5!yBqEG9|t{qtJ?w+q*Q;A+&@f3E-!zS&CPs?F=r z<}6<1!_vCHGf4jE#5lk09Ir@Da|A17qre>V11crCv6pYq3E%5K*BSdsa(Rl)ML`3- zT~cu#H9Ao+n8n$7d~*3~3!)i~%=vIRb(B>lm~7Z@@6h5@s-2Kk1g*oZ`jXpbZ|H$| z=vr7|bxHl@Ybub6Zg~~-D#zN%iy>H6y>B83Ctj2M^6e9t z?0ok!w<1)LFb?gxDGDkLAGLFO&E086=VT(kSqXYNA?i29K=GoNiA#p~s`;YN73PmA z05+@AC-MkU=7VfbXD^?T@(X&9@dbET{&OTSWStY!rJ<~_zjqZsP08U9D|+->3D5MT zr@wScuA&r-+R*cqj&mM*Eyyfn!X~B|&ZSe<*g-h}96j~(n1_K$Jhvb@y|p9c2#%okQa(hwFFY@?Qji85Q1NSFPRWU{q4+ zr|+11d+l7hDW-r z#(*2LL0YO>QWOBul1TR|y_%x=i;8V1cC^h9SD(N3vXlY#V-3|oP~@y>RblIRXG=6@ zxU6+4e1N4=NR zjDq1{G)-b+K1ggp{Nu`My7LkdPNg?fU-jiJ@bk- z4m%k3iukU#dqSF%`p1*;Pq}LuvP~X`Ug=sU)rx(d_vY`Zg7gLn#nHd(!84|zzTW4d z2^DGxY0z!D(^(^dEnC_}q1{g)Q1uqdh4QN2_2!98cn8`$sWrX+Tbsq5??F7I38Crm z9IMk_dXDMAiv;Va{p0~F?k1h8b=-u%H#%&+|IhFw_!c{N_hX#;r@Dz9fBFBWkjypq z|L{+c6Dj|^?R@S^Jrm}CK642@<$I7v>b$$f;(U<{gh|YWs-R=6e;hrcV18Z^D8A{? zNk8wEEnT+K^um?tc1!|!FY9EEOv@td@AcZ6E(x9}Q@2{WvxuFky_wh5H*MM9T20&u z8j>P8DfTfcza5?@g&3ASH{AbsX@`_H*S4(K>gCMFujZ+0OKLxtM?ObO)vnvHjNZD{ zn*1P@;lrgxbSh3aN@IZiJ4>Cg*NPPeVyQw3l<}Az-7Rn`#`0AsJehCi7w2K z2ZsxlIx#(~{Nt|j0-a1vo<&rLJ1WRgIB$*ac!Xha$Jz1X>m{c;zqU3;(wFvZ%rC^} zdOF2FtCEqxN*Hf~Z5oP)X~C)b(&J{T(LW$6(^>k!h`4%NrbVotsRq`JMs|!sf3&dD zH|5c!RxJld;Z+PPZXb&iIk}7cq}q+{umnur$)&|7R3Y&Pe`y3#3seQSzM_IGlY5@{@zWli~No7$r^D?c*FREAO=XNr)=6WjA8~iei__!sstycEO2fUEWTubi| zCp)$g)-(O&SEvmX8cq`RrJ;ydO3#fHzY%v)?c$-5MT>a@vnRjB%YVo|YiVtl_O^3B zc^P)HmHzgd8H!{S?ULKB^X`S3P2{`~ooLuaJe!ii?<0Y=#dn-9SZ@f&KK_W=ZGB9v zV=i-V!|F{kov~DNdjoSe&p=Q++>~&Ye!I7bgIjp)ZEbTkh0aEUm;sAAv`~#1|HbYb z-lC@VEa``9TzbTz^JjLNpfTmVOT%6NrARFD#C%@s;!G>245xx{$4nOz{an|sxUxJc zO717cU!=O2CyFzA3g;!gR+vb=So?vcALgV$^K!>6DdYKsvo>EisLh!((-$Pvf8(9o z>Y3VnGG?6Z*L+XMX8E<0QEx7nb65*Mg;?KHBj<;8U=0clm$_rO$^xV3;q+1}2#ui6 z61#`C$VRGxE$Ta&m3t)_w?>QHMujk)mAk<^?ytWNl%c(mZokCJYUwx%FqLhQkH}0_ zb%O=R=#@hoUB4bH$rYQrj5KoTK7V#2U;HGQg1UMAc{yG7h_AH)z$N*qp4^J+lWvB! zdq%aL6)5sb2e82xtdKu_TxQ6&^u)ykTHDr>&{Lo=MzEc)Wjj6r;jJGVvX-%8)XWvj z7O07r#;If2{8zZM$P1$b$U?w8w?K!NTIWgoj7)aoO zN&p=r;oVgs%#C|DRe^nWvc&86rUnO6c3Z-4EuPKhftI?$`~hxJMo*V zquIRZ%pVd@vx=M~0%yZBX^LSKoFDfM$zD?z$*a;fp%cIcXIIiNn%^KZ(V8}M`w2zK zV58*=#GrCKtY(lBK`%{OUnCOFD(ChRQ1f$2p^mG;Rq7~{j3ixC?X%HQzGxp48-uM=` z2<2WS#v96xcnv;z<#n^TaqI-+?Zh9&Z;(}M#cs4k=s?O00NeAZN^{WHSG^0KZ!nU`U^vkiNjeq-iOM?RTvkUQSZ||hJ zD>uKKu7H?9I9rKGukPmEpYIDQXsUBKSVdz<*`4zK0M|WdZL>;j=ZgQOz`#ln_5Nge z#1%`@4%%W`pYwaC_P5E0wOIn zlc+W7F`cRr z6xQB~pRMHbD~JD*{ut@qNu@PSGm<~Abbkt)DWWR-<>NBc+eBr|c5b%7G8v3y8KtC z&oROUmY@(5w_?9xt{zo!Y8X))7MsYy0i4y2Q3#HQjcYm_<_YR22bXv5TVKh|=9P9; z!sf#TIm?h*!{{3U_Jy;{8TTv0+Jelq>!Qt~52{&T5Y9Lig|ee=exK8bNvB1CCyz$t z^eoFV&^@&vdIm0ibfU~y>VN>5e9ir8as)R1+1b`Y>#5)f_v?6<-;Vh?0ZyCmAK~sQ zJMb6ed*dJ)QDLWzPZg2+Hg`6_2zibedP(PP2x@n13+-E87Qm+F~I zfEc8sy`eTi8qDK2y_h2Ig8w-PjvnLXsqgs};KbSQqLpv)K0hQGpeLkDTPOtla5?!Fh6?dzjbJv!7;f zk`FhAn)v=Ea)rqL^_>-K)RALU4eU7_F@6+I`wDu;E|Jn%-1c%%6%Jg0P<;m_j;SFF zl}oLP4A+lVFpZhV-s>xXZ6tVBjLWaWW1VQ>>9iJbd3IZb9|hj04^>SJ_p!~If!*|^ zjQj9ObEP)F?!4{*Nw&PfP_6l@XNgPfTCN%CD3}L8L?9I*O+K4yI$19b%uz@4begvG zjcvwh0e1IWA8H&f>TVbaf8tzS$m~~T8g_bmCg)wG9Q1WW%e%))hx~Z$h{<({VcW+m zWaN!vQ)t@Fl=!v7R{Ps00zW_L{jLN&LGRR*eHF&j7RNIG|}1P5XB@ zvBFN_{AjxMx93ER^*!~vipaDi#&0(VG1KJB3_mizmgVs_?n3J$X_eOFHTx5@66T?V zE_a{4e5*rbgBX;F9B(M6wj?P^?GJFSts1v1U7|bCsvJ~T%8QEAH!lQPNQLm)bCk9A z0@G`f_+uQM-?>pG$D?VD4WAfec5Z-=bD`fJBm|CS6Ck9=J@8>g!C;w!zuXM&>I!@VB)N^P@EHg{GBR_KGe?XH$rnJ>h~c z^QD2tnV`M@#C~!eRlS`X(&AI=EzjM?%dJ0gX77yRU1v5oi;yD=KIPo_9gq5W^gfDd z5{%~pggA^PWtqV<$rMwYOI@|Y4ZkYj>b#uBkOWmCjg~ux8-38)?ap>#PjT(*lJRj8 z!TTkk(gf$^ft-dWN5AQv#d0JR)0=ET^_}JZWSI_mS*%IU3o31%*NBao-t2iu1CjqX zN)Q=*rKs zF?7JMk?J2qhnxwno5)$h9_#K*4ZN>h-!WQ0Bfr`{=7&zUF0p`728`f$UySkamgeMe z6p7X-SN^ScFTI(PHo|Ii{#f#VyMW)%1&P>>J~%rq0$azYsCH&!7TyXJI1zT8DlZ*f zF%})JW&~GIkuI{9&p;1XOU2ds(#olLCkRl~mJJI`%nfY|bnX4T=m|$d%#D-nn$9Gs zuDtPp#bwdF$m4oUVLDtaB5=IPs05lc72U70i->oEP?cx;?{i8^&`a}$n~cL?_C*Ncz~*#O@qP2e*3|rk2g0ZiH$dm(H4dPd2RwdNwE-t&nG~34~(%_NG6_okhzyri$me)!PaEV9`8c+^&$;4N;HAbH_#z&7mKRm*@ z!_tcJu$bqHN*@DUulNk8aDPJs_rH9S4CoYqpJ|w;Ev;MlB?{9~=_kICw({q3kFkz) z=|CkdzEPn#6DDVYPMCjZx-_ttP6KYBhc8tmpW-g5%@3UzB=yo*Q;TAd}puGuTtY>JCer0(YZI8y6nN?J(@=slRMkRPO`3b2}Di0;%cSO zHT<3q4aQu(wsENwayYu8^xbn&nwmMR5jid)6mC@7D?KwQ#AeGL6HV>$o1ST1)*dr7(Hk8CT2^Uyc8t+jeTNvlnUV}W0DM_15OeVtL8*s6_aW68C9 z#?M(3Pc@4&o!gTqW8c-}>I7`69%5*Bzu=4*uP4WTFUqumbN4;{0BB>(<*}HO2IkZE zchrcK^jl7iR{QQhyV^XTnv0Yu^Ov6&(SD0{E3dY$OFb6y{02>N5tij~M^0L7s{X2G z)^zYKL4-MhOh>WB-YeKKCO6|y^z+EFoselc_kK>vnqnWyoh%?1gD?TyB@RT1$&Qgs zmodlLwR{^Jyg(iX*%`Hom3tr`)R}PJ54UM{T$sC7ms)wG$H@ZWMFoL4}{x^$UT{F78ykc-J)>YuGye;W@UEj+GwbfaoY z#0AY?q}U=P%rzy6;PcQxr`KbjG=}NWd7_abCy&LqwljCm2aw@pW`!&e^ev4#N7bby zB{X@xHEvk<4?ae0m{h)epdozNp`_B_#rq**bhZ4hvH+Vo1Pub(d0_S?q}wFXshA=N zCY0BuIWyCUrs!D%WY^9jOVva3ucDHznaV3k9bicpqK>RGR$nd(Dr6XrY!FIALNp7e zu#f^X6-`+)m!UkpwsoeT!y)s!Hbqh$`i>B&1Q$GIe<;thICQ2J&f(i>R99S~ z4gCoHWFnBv>&l?EC}(${0QCII9h8fJO|h^nuA|2+MEOX*CYOvO`);SHSlJU?0xRZT z?$rZyHD6McjEYwI5c`lI1i_9?{#=ocWvujHO5$|14O9rIzcx~<+6$8Rsb zy!<-STj#b_=@aFUrKag|Es)>`q=ND($~=)E;Op zBSRYLZ1G0FzmATldDAGuZ?L-lUT1?w1#Y53_$Foy?w`u5Na!u_hMt%qopAY&Mb4Bj zbHB!PcWF*R?Ty*u<9v|ACuP+kHF(+Jw2T+qu3SZ=s8R1dK1PCIoGg+}{TMQIFh(|i ztovU>MugL;$mp?Z&le9*Y|?N_o05bOw!EWbIb{?cdGS_~UTcZAnJnaXy>b&^{8v^e zT8i9evhu0sdQ_FNapG|3_-CpmS?~iQb-Wen3RP6zo)F{P_%U_1m_LXG*pe<6q&tlX zviQe4QW5+nzazj&Ikjk-B`=!OGAW6cXG|<5tvZx!(L&sS_4IsjVpzY7;q>GX45}Xq z*D)hMYZYM~BCRq*wv9REEHyh!tN&a6o+iU|B^3Kp((`dV56f?}`NG=vL<52lFYeFG z65%fOLVjC~UUJBfhezh-q8SZ7tAyt|9NF1?CCWZ8-EbWo3jZ{hPUHO^m(bvE_cDQ; z&5aXxF(M!u_gPl=KSI4p$nPK3fGiy*UhB zaM=>DfRc8kleBSvS9S^)W1}7v3ZqqZvY3D~@`nhNUsXvaxsKK$h`z;lKPf+ZyVO(p>anXYmj@TVcn32Tlm2(N)|4c>t)*8eJ^fW^Ep& zshuWpYW|9N;}N`A?@-f*Mp8wjt)W|qWOkEoaUCH{1LP^|2%(=nw_s0@Z(lNV16L6% z=P1nTaQcm+6LxOEnXQDYgRm>(LhsgZF?FxU&YmqVhH) zsf)HY9-4x1p_0#^M)tqk_-liF_IY+TJB*-Sn0-W5+~Vr;Df)(Kc%M`umEK>i`L zUbX*0CqEuNhBlCXV7cSVw;({YBc(VNoxcowTYG7MC-d~oZ@PIGmB>ay{jUz;?Z#<2 zS-KitTfRu~M@7r-@c!l~UNcDIZ&@edd0DA%5PW0yT?qIKr!VHuLcPFyfQS4mT%K&4 z8A!1F)fnOnTH{;Qsf7t3r#tL&XN$Wzk<)+ic{+x_pG9#ePIU zZ6<||L&3$|EKtS-pO7D{!Y`^BH&}9K<3*(!N?=A@u|IHj-bSp`Wr9`Jdz&Vn3#{js`iYWX|IWy$u^itE~g3WXFv2wgwl|i zWxL@5#q>bUD8XUI#Moy9~?xL_4P&CTR+#D^2)4LCa!-~0ubx? ziI&e-2g#^FG6k2D>Hw{pQ#BLf}6xDDc%d=PX_(4`n=xlN_-ozw-)>t z5gs>zo!9QF!O7}0*l}OqWnf!O@moY3XQSV~Yz2w3&#yBi4>qrrpER&L4T_77L>kSu zU*fRY*~#6x?7-|450bvjg27hlLlo_~QCe@v$FZHhel2c~E937be>=cZ+R>3jc;Z*2 zRUE*p!;x7Q@5@QNWxDmCy){!1E;>jOlXk#B>=F_oQ>pmF!%Dwg#|<24dk9@t%{jzm z$98&qS=xsWPXxPh-M^5a+nVVen;r%T_TX?*?VUeQLG?4SFZ(JH?`9$m zXk*l-Qg48k<$64rAc7D&8DGKU65VVY3{JL0o_3)Lac&=6=3&tPg z3@N{wd@4@0TcE6aQKhl<)_MFGb+vHz3?+%()BeytSum3v_-Qh>vvg7S9jora>?&@& zcXj=Eio=}zZ}@AYVMisyQmSSNrdi~oWJRe?weibXyCHSD_gWE5$%BgJX`9lX4W&Fo zZ0Hv8I)o7(I6WFe?_;>-l`}DdWmqL@_AtBE9<}At=r@&an|M;I@_FT;vaOr+x_+}YRy|RiHm`zE#{Vm_%Ie{qM2o)QqA^OqZn;vPqZPH$7GTe%b z4haVDeN`48aKx+WYSZ0ZB$ZY~_=TNvx03?9U08?nd4dU^heo-cfG zQ)g3|6*ORyCO(R`I(#LSd&0GIW-!po3E50cqfhT5zI>eGjw%?2H%GU>2eRpB72;!f2i~`9rrRR?&0y%hWPf64 zNtQ~7a#7F`5cba3#}HgFUWPVPg`2UAoU!k(vp@rAF)z2W&FC>t$&?8TkbI2xNcrTP z(qBj33OQS1!n{4ad3s>@WSxi^)SK7@NqIKVAoYrHAdTKOCkWiIP&j)hD=$* z?{!m9J>G#Q=1oGL6G#Po#12FR4{^ua!%;jVC*ekSW+lHS@vXvp9i?$Djw9-h&x!FPqVKlg=pRTEA2>6GE}wux~MJ$)R`sPlg(9BNObV+Dhl@J3S{~Bv`sf7tB_RZERN8p()Fo`MkO9*u@4E@vq7?y7RwD z7Gm~Z$l;3>NNsiSpaq0PLk>qk?K+j zTsG&x$4NwJkEtLXyqErovf zS^M^T5w-{CxvtY!YB#msXXO?4qtWkht;*0KhCTQ^s{U12+UcjH_`mRQn*HwYGEA~q zf&GEq2;^IgC&cve#pKBg^_7E~6I+`VEmBM84^!aX8fBQoVnbdp%6Hbq$)nAHXJKqc zHm<@VcJTN-`@Mol=SM67%Wuz5zm^nTHSJ`qARBL8am$Tw_)wc~rQIgqtgm^peoOfy z<0T89^1F86hPgQT<+(#=WpkJEJ3wEK1Kcwtg5j%S2*%DMREV#N(_F1xi$e2;f_Zgs zoFwjnmV8~^`0KI5gfqlDI;Q%^{=vYuKk86}O~d8-kjM40q9ZOp8lQEQtMj`>H;I%! z!Gd>k(M@f@Q6wo&9t z{}5)L#8o{GC_Qopr~5L?l&Id0_11qVJ;F1**^K>S+A02d<(YqNb&zuEZyFeZhQ+b3 zk4M7;Oy9*b3p(#xH#v{tK49?C4h62l9}tfuA}I1+w2Y~A?Ih>&9h?(M-E$fkH)s>} zx%8K#`hRheF`lO$)!W~%Ikc0MaDR$ql9FVeGbjGLI9rw2Hp7jey~gW1*9BOja(sUA z-M#1e&@!)&zPc=zGcT`7TQ$psqCA{D&miwe_r|wd4XYLTR&)sws_QpEG@G-p8Evv0 zmlHZ(p37Tf!=j)b|D}h#u1|6da@JX4PKgPIjeX!qkL(4FNn45!USKpwvQMu7Q zs}+E*%Zr58sh10RxS;u3GBZczVV6*$rd^Bf4tfClB>&ZfNJfR$M6ww1jLLj^zQ%Ns zJnfgWr&(Z}C9t2|9;lDue}WOiWK>5!Z4@%LF6~ipaZdXbck?@i0dACU(duqY`8&t7 zIxkposI;?G)L>F?+J=!xF{{XVlDihf;o(i$5{v-0ER!(d!Ajnp{TKViry zs)AAwA{AT6keIMss8UlnuHt}YpZnmZuhO*jZSjuq@APKMG<##DnR@JK|G=*&CYcc#kuzWq=e^v zDD^*T_c?iM1ZMs=f+bouv9+OtV)-N3#bqi|fyQ_?sz;qAUAsz2sET*6TJJBNq<;y< zKM7X9ZPb~92u3p^`JR-SHjEr-j`?2I3Fzn?zTvo1y(iZ;@o~vL8ZRBCImT$zZ5YlN z=jCc4@86~506w0tCI7~}B%nGhX(jWPOuvNKL#m%gS*#Swu;}*pwr8ZKTL~BWl6@R| z*0M4&*;vXxvy%Nj#LRRRY@FCF7rLRjx96bwYO>{XbSNsZ@GW;Jdyq!T-Vq-tF7izQ zw1{K|YmxcW=H3!GB4^u?T|6I>tP>DYEFO0GfcEFlqXm&vL1v=E5rq0g&3kl=H@orj zv=o^rP3+m|qMQsGZmz(A>fX((YLn~@HMzsy%4abw9$8DI1o3-}#Q7)x$toF+^>uKf zG8FzH>m6T_ShfsmVt71rt78175FFqX{3_3D>0xq&#+XoRe{?fPTNG&~F(~W#!#aI! z%&)N4qru11+U3dC(ZOl*5(~kw%M#_?v2lo)o5#% zADvy!@!DGo0rk=1M%DYPUl`=`3h$|4o~7g(BvnI9+twLTfrW;v+>Jt*PXsJBfL!_y z1M%4Q>Ih(r-p8{>3KlXk#%j}FGbM8mP=p~g?d$bSmF4($N@yQPmEy(aQVhGmeU7Jg zG>C8d+}h@8)ywnF>CapOxrhaa>OmN)hUJVzmw_)u*^Q^$c&+Z5b@kTyu6q=+uftrC zq4bBQV9kO0pp$gqCu7u)e{(M3B;f?ly$lq>Rg;X)hhe%Yc6Ryf!1aq-$HjQ6cW??X zmI8d16Iy~S$s$kD-;>I>$r{Kv7?P>zu-;bQ8z-eGF$`40+c(B%mx#g<|2HJJc zWhmd=Ow#EyBVI5{=PYG>#!b`4GefV>d5sNlN+tOMm0&2L51oh}gKuqmA5S5{GzA5j z1nG}JF@C;+7yLW_$f!h3crl~#a%_8;MBa4)%WTG7+_lzl3Cen7axO1*F{AE(DH;74 z8YM)>PM${Fp|tLHpP*r2S;HwZm&;24Rej`7F(Ig>mbaq8D+EET1_4GTJ<#%}QGs*1 z9M5XNp8hi}w=_RozQuO0RcgJa>D&z!7+(fNv^6$1eCusb0k^a2k9}T>!dtOWZUR8D zZH+U7y_9VeX$B}|4L@8Lb~WW>X?#c5|Dyoo!?3pBjII$fx+`=HE}Lsy{5;$ui4je+ z`+cET_;W2p3l}FncrvRkSJIz?uoJ}UvPUQ@%`;P-945b_=Ejda_5HsWE8I3$aG*ul zn0Z>Z+G*tixPfaeDT3?dmpQ+-QA}mw>iFxbq=i3=y4j+Kdpm5Ix(4(-G}Dg){~hwo zn4H^)b=iC1>Islez-1)>mm+~}>)J+AwpuRHGu0sH*#ph*z4e#8jpfq=vAN-#yL}oJ zrYxAX1_6DZ<{IosfDuyt4((HePpn}qP@%pzXMdCk z1f*5yP5+aYy|w%Af?ld;h5}>8j3S(c5MUpky9y=-)vFf2U*WWC(>Kqa6%aqs19{A) zXn7S%R1^mY|L8S2eM3w2m_9#BA47M!Ojf_eD@L>!Er zsnVJJqRWA}^-r8S;QKc(UyN`2~W|3lZg%7p7+xCB9#5lW5jMk#oMe@We)$*wEsEeiB8POP*Tq% zc42A$I7YTj6Vp^7`oVqV@d9oHKYpM_FWv$tkPI(Mr)Q?#co5^cA9Q zHz_V+SW65}Q1Gl%cd*$uJDb9@Fd)1kqhd^=QHIxh@~ib{IZOwGEv!m?54084bXrY< z5=KfLY~p-Q^P<>+XK|WYQJuwtw17R)MxFddDdXphQ(X`Fi!2lZ@Rp-NSE#-JvLoks z5CPOQ@OQ0ZwYjkJyKGFu(Z7!6&o`VgRAgBRBj0enDk~+V*90*(b%3VrCuhIL&8^@{09~ThHv|ebnE|P|gj>d$N8Dt{LoOx2%h>A6^t^E3}QWdcQvBqa-c$s+gPSM8^mw z+u1xIL7s8aVgDB=Dhmb@%ejt6{a4hRyn5$n5hy{1?9u|iTwHLu`^DSjr*itODabr_ zVcBbz=p%aKuKRjzF)M~*i{U=7QtHS?WTl^vPn;nqVM+X*e=IgTcM*0OU>Ff@hYv!h)(yQ*tM`1d-n?i|sCXk9_R#$sQyC^d#o&P8 zMTa>(E%VYM@bcMMqZCRA%C&GB?&@*)hQLA$D?Va2ctPDcInx1J%FN!`Y5yeSNj_Hs zgb>J@`wI{>Q1n#}_3t{@^NK-c3?rsChdN31X83YcDkSEK3iY83`w`bR%p%15otbi- z{_R4W&MvL3nS|}SB5dOFll5AmxO>gd zWzI6gHnqa-3KI&DF(4jmcn>xgOZ6hvNRS;CfRN3${$o*vv)I72&Fe5O^c7kC znFYW|)`L1iVNTXaMMQ^gKXhN$SZ&rOrY(J@U7=Rg?nQlI0I&FO)R z#ZuC;;S%LSbCc@3UC9`BU~*e;z}woNeQ(}=vF!Pq{hD4|H8@M@d7bJ&zkRrIY(s@E zLqUy4%{?x`jT9#b>crcFkbaV#KL$T#Dk5`U?+oeT=9QD`fdO!auc|l=L-1^l=*k(hmm*U=9+l%9-B!S2`rv`4+zzSWAEr)f0-fZ+(g-?rNx8}6=fY=9Un^pX{D`tPv1z+$TM;mF;VIo`E4q`ZOtUElaN|JpCUBmt2I}GH*5vwI zbhBkABbiW}=kC|G-rw#KJlC_uiPAXS{E`438$J@-Z4--?Is%)@4(@KRU;^mR{U5k< zpxQjnNe(eqXr}RXpUd7eR|zh|vF}%&Cm}s?Rr@?b^i*4q_INUp!(CXZYR! zMNUh{12V$=zud`_Ojp?sZUsxOB@fU(Y652eNkZtxC$ui@{m8^J<2_jYt6jt;KuZ;Ai)J(RA}Yl?i``KemxGxj?DR3mVv>a=AsxC<0K_6Vqm_AgMj znGJCC01niUpY`*Zn=>prvn^f9JM^2US=;10*x*TO>QfPQzeCnMg=&wCB%@=|{A1B` z*mS0C;8#~7nqBA^AnwLVvaFEpbuaz7XG0GtjaO~EUN8Dlpc--Y+nPL*xq%vFxe=NC zXZ(kC=l*g?E!)HQ>+3*?+9wmje~XYVe!kMVRMM9+S;h`(E>ot_eT*Qwf9$Gw!kjgkl& zZ@A3m5w0r0<1@o}GaWsqx2`uo)}kLl7?1@?##o@w41Tib{7;>4RG|}sd)5-Gw-+b+ zE*UcLME&u9QczH-MN>d~Zm{aOzXCv3ihacb9xBXQXK5M}D{=-Y(~;h!V@ovlamSYQ zFd(zIEbMS5Y{i%)&>WSFTSGFsvmWopv90t~1P4c47iFX;F9QS6>6{j2=e6;zo-xit z(K{ErU{lL}K5PTgNhj~)DbU-ONpk?=c}HaO)2>pO5baiO&zTO51@xQ` zRN#OyNS9_k4tMAjG6mA}UP~p4t(mQ%h0TL%zqN?g%3Ysh}(z7F5Zy(qv9 z?02(szD@%0@N)RdZ|e{iR}SckUDQz7hO}m7HFkj5%vS5mpR~mvu;>2n6Dp}8 zFXO1=-upM&i@lz(dpYURGUf)F)=P4_l)-x*yQcUQ1lU2zD0J3muFSobh&| zZ0K5$%SvnO0ExX`1pI-__>d=MIz7(nJ(~mI$@NO&kX3I{aN7FEGdVG^(u=}icWEo2 z^0a1^b^T4O)3>KXQ4R0;g>z1oY3b^(rWkrJ35=W}VurD8l_r7^C}W8#>-pG)uGZJo zo+)h?*MWOWylIa5GBR%pUB&@=GMXcUb*ElrEHX(nmDewfH4X}?Z=PP)Jx&(ZaC1%Y z-lpCYzya``!3hjNONkVEsQ`mmRE~`f-dd7SIZ<}Dx0&5Vej9~OzXo4l@9^U}zGz$+ zDF}OVuw4OJpY}9C;zAbU-OPYguzPT0rA0(!!$gGjwq?2LqSgQ+lZb@ zFUDpw@#m}H*|Kno2@Ra_GLvYzdhX-E0elhI=JhrdF&f9`MGMGkUgOfBKPPI6^*-1N z-28~6Y_l~@5{P}dm<$bIz9(al`jI-srhf?XXkY##7B8P!qwt!qo?~6hQT{;z?LxFV z>v{(Webcs!(gw{rylQds!yeJeCCec`4O@X`OZu*dyXOYTog zf~9ZJg_cL_+6{+CVLoqpP;$M;BgsYOX_^oqeDUl^6;x-lX5 zdFz|`wVz)>ahu^!?8i3Y!PpDB#;-A?Yqi-`* zts)ZTpWA9#%2cT4ll>%8%4g`G3<79K*= zRD0WhJ+~dCTab4!8n*p0MX)o91(sW|Ce{jNl`?iKF`Ba=eWJm+)1Al+Gk;%vK9$3$S&+>EOnz{CjxzS&0=LIfV7QMo`vs#iXYc;+p{k+;T zZQ)>1#k#)`sJAB3LBh3-k$=iWdSx@v&@?$+OtJ=?Ag^b2lnpj`JNDS+z?wH0x#+US z)XM$knUlaZ`2$ciV7hl9MZM0jCxvKX;bl^6{7Bpb+;5l5Y&a0}liiXV@xZEGO|n{*b**I=%Or6%@Ayy-&f97aoO- zM}p@JE3H~^EM-YEhd@Uuh9PVtxyPJk8e7OOB34!}wMw|Gm_poj`>jC=`qrb(=|6)8 zZChW+aeP;LRZqj#_EFSK{*d2bFWUs&tY&jM=u# z>3_Qb_=Y{T#iI|G54Puw%uo`4CgF;+VOF0j9H`IIkUAbcI-`4UZkTEZcbqM z%%M5-&y!c_yoww#G$*3`P+23W*la{1TsT3$az6!`Yq6^gPbV)f)OXq7EApDYBj4Ikk{7`~gRYIx~hC$F!-_!N_Yon(q#IvRZd3 z@!kvv&=w?2WmUP$AxFvx2;OP_<8w#8!3vdtUK$(Bg%9s8S9wj&lrR@~FcE$+4FmUg zaYv20gYlhf=*(ZjrNS12`1L+`d6z80ChX;r1ffyF9AU+WP5J53xZ|tk%iA`E6qQWV z*l^^m0j6+syozD`0=Jd7@|Zcnif`Q?fL`J(aarUZ(Mc{h&rr78wdboW|DV;$1SoD6 zS?$y^tSs)Y2n&M=sap+IN98%od?`dth~_4U+@zn#Vs>%j5~z-{oC^rJmZ zRMC5C@VL%(Qv+_NF9?d8lfJA`QXG9{$TT}HAn{Ola=Yf^e#%ze4dvi3MUT!!xh$#% zYHQLH(ZB@DIn5u?6?Z22Ks&YmKF=rf$X^ zbitQa_>Gg{F!y$(a~5*~ixI{D(5{7r@(n^wN+gpw`=^L?mZxv9&LbU&9n2)*pegHo zAY;3aF!*%oLE0=h6Pw>?piNK%?rqw32IDXKijV^BJdZl}w3e84-+dhaFLrE>#?HZPwv?`yuX)mN!Nf>S@c z*B^-1-W=_kg3w@t0jS|p&1yIT$BkXR+S)Nw@(zMeRvP^xpDIF{(lg$C#}2h%w>=A1 z5SvCxlh==Ta6B1racSY5oR=HGW!@u&{HX7Ai?c+(oR7;i5u}ysOet-pG~YY-LeKS{ zD~S>mk`C=aQ_So^ZqUc-h*u>#m@dsH*El4&chrfh)ds83Nk zMn^kr1zp7amSXs-PUhsv?Vs;A{e!i!TU1Obe^`s*0r1oMz2VAwkC2&X{Re3BnXa{M zHY*0M-)i=7yioWmVa#qAZcjb&p4j;hkQ*)3-LYGUPU7BoeRFzCHl?C*!q4Vi1Be$u zA;ktHZChgX*9Z>7zA+-t#sIaB3KCv6?GRoiHaejjbFdgcKUn2`%JK?GOzGRd(dFr@ zjC9lNBS+^*Zs?^_M;m>8)+;B*8rM?YPAT4>0rRrHA*<4x+yM z9_uqp>bsT2Ck8?4UUYt)Vnu&5tJ|>pjQKN@K<6p$%XbrU_06pJadhh5uZkON=E=m5 zNCw**yUG}t1gR0ca`}5BsD5WM34W_5GgL9&!8ZI5{SzvJ!pxKVIl5Hn1TZILVok#D zjuP)r%3rFyGj0;R;*a|o$>d4`$ogJk(#mj*HMuuhvMF8eH|?!ElvbyteD<^GYBxCM zMU{EovhFV!s62~->{#9AV)A8%T3bkG_SUz+&wGIP4Hq+gL!^w!nG06WE#7C*au0jW z(@yF;(V_+Jn-u@%WB@J7YnNX)<#kWF$^lg>o(Z{Y-e=DY%`Wtt=H!PS@P2(7nH76` zi1Nfw4!_ewX^tNR1$u9#Xav9KwV&ozIRIR8Ce!5hVY_+O#0d(!q}k1r2q@2AGASFg z@#u#IqUu7u^fxXi?={OmDCZmwpc@e5T6!nYrrlU7YV=9sr!4KaoR6ZF141!BmaB9^ z-34wAN}V4?$2{d}$m32YP{q}O&&MCV57jB((0z34njfmLFbHv_uemMm;_LYAO)e{@ za$D@?H)*q+-t!_q+)STXGQmvs}jB!C}l9IXF5Jdvhq2{)h~ z!kD=yrQZPim7WdSJEbm}*Lm0q`b@{AqR*2%@HNf&dQ(;!Z`B06wqy-rs{or*i^E-> zSb@*%O4p9vXmvzmV(}WWw#6kec!;{5*xLTRS0j2`h`U(&LHTX2j{x|h4RMwHBL9I> zYSjU`jp5DHQ_KsykS6k6jJ47ufoZ7E+yNV>@rQvAOnZ2cW9+oSAZvWY0!>vnU|Ok1 z)h^`3o(}i!$SM-KM*t;GAgi#u^FaIGiKUIS?bhh2( za)#OveKY;33EA>X?Q=nyn_a2@-9%p%y_SDZE>XXHI==dll~txDr`CqhUjk_9i}Hq6 zg9_vAx_v-{!wL2v=uzR`IO?_=DhJxUmrE_SH3;j8Og$7F2v~QKA;p^=kBcHoxU-pSLC> zpKO$l0t#%hvqLcZTQ%2@x98sM+IIW)9p!kL9}KkokmdJ0iA|8IZK#;OViL`cm(iF0 zG6Z4k*AIBs+UeBky}bKJ+A^p9Bl41QxFO(GN`j~F!F}T#$@dddtl>(|5s>eNp4@H1 zUJu(orXZ)NF86%;_ddVF2?j%K)Dx?llDbQcWWDEemu!UhbQ`Dj=}DSae(HSghJ*T+ zh+KIb`*YYbYSp(1czAlFP=-tIzWf8yIFc(^_lUeJOZIgGf42)W@$fEz-!mDWg6U{) zg?neKk3FldSo`v)Eg(sx-Rq%iGhGaKB?Hn%n^M?8>BYZmYmx7qKTO&b{|^8eLFT@E zbOlBxL9=qq>JIoDISB@Df!d`&YZo(`IvcM_t%1un6c(m1gTi(X$0ccsx&1VGMCOM3 zlqBresx>xaLg`yrE~Q!dEfsNfg3F_& z@$%EE=HCKoqJ0lCZ{Gv8DDko;eZ(bfiAwha7gUKD4Q>w9G%ZJM39qzPe+jfYG zL~VhUq_h5wCwTokxX65RB3n8*0S&vPEu=SvE3TQO4>*;VlO*L_`MtVw3l2-#gbVMr&mZs!sM z324xTfzfatq#8aJ+L);kLOQ%6GpfkY7*uF(AaVme#rhNHTL9B6_9V!egOd0U&?3jU zCi)8OE?xdx_9Mk*gxyu5(hIB}q2bY@*=tqU4L^dw*JfF>ps5g4trFd{0Z62=LvoXy`>2dPVmt#9N3p3`)RXD*cQv^l?0qtnQrgjTdMA!AVb#|3E+OWC*1Dc5)Rw&d~Jn z7j%KfEWM1cwYwK%{<~^G&Sbai#EY}T@2Y7Hw~{h6ENO=t86aWdilZn^|QSMec(6t5{i;NVINe11w6(UkbdW$|Xh=Cib z8@)QB#dVE^Bn|xOzBx5@+ZvQJ$((K+);hOqjg}J(HpsC-nEP#wk~7vi^%qkl9s=jm zpGhxs4*-d^X-bp$^d-(`lGVFiXlueh!G?yxIeT5w-QsNGU> z{zLukr!?Q6pE7^ale51#yOiBW8^g31hh9$F@uO8w>9WU zFk!X^&B-TmGTJICu9wSDuKL@k{WPV0C`C;nc6MJF63W@?qcwhUPi-V4C*DiU7av`L zXcab^l;@xur*Urnn78gL{7HF5UB|u-000mG zNklm|zB1gbk~kU-E>?6Tc#O_U@i zeb)Z)yJ}Kx*z_slm;=kDw3=SryFn0W3>(j&`uuE7GGY+5-L=Yp3HxbvHel-@BTllW zW%twcGU7=@=4He^TbNdC@#3Lmx&1UgBMx$6(m_?)_%+CgTRB01GCadjU0K7al@P9Z z`)Nef7Gf*Tz8B^W|0M#Cgc$X%8cT}iPuRQ8dsmIsOUV4gPne#nY|Zj}Ys&K7tdvr^ zrL-KC8P+MIJvF8xT=~OV;G>gJZ^tF?jH`jQ428^AQ5GW*jhLpG}$c2X8#o+(i&Epuu1t-Br>qu`9=x&d$JMIaYpWk*dh>*(oVrT`NeiwSz4VxOiG{W94^W zSq}Jb05abi>zeM^fI~3ghAs^BHCmUQOSN@GR%dZjX;fzJ1(7N34G134Q_!CT<_yf^ zFNyT1FoU(T1UpY6bS*T-kSv5@m$@*r8JQ$#cjbSqjXA=K&71e-%4h+Xee+cvp=KS`9SJ!KGGCh6{uxtzn|S{JUx-3M9m3XiS7EuCWwxtnrE? zR0~tA__+Nv$r3?--MMAQwdcHk`=(&|_~jEX-|^&kj;Pxz(*@sMF+68+q zc>Q^AzI@-6`*)tbcl+MWo3~EBTs?l|#bYl#b@++9p1JL&hrc*!B4I42os~U<(|+!| zYJT9rTdzI)^%w5Bc;~j=r)@ud(^RlY(?^dVefp&*?s@jkTOPmp(CR_#-JPJ_{F+UK z+C0oqhNhBB*(E+h6WFWrzn}jr-gQ>JL`cc-6O`sAL%?iO2iBj&`)TI+*a#sjC!G=N zvCz93IjK?S5=5krf2W@kyXM(l0zADx(UN}iZd;bwPXpZ@#WxumFDFvN`;&O=&V!4d zLB==ncq)Ot>Xf0E@m*$Klg`+8&O$d_c+o{?|Etr6Y8(ead@ruaSr0ulRChbr2swS% zifVOmOWd_2c2#2}9B46KunVhO7!%XAdKux4`-j z+9S_D@HgN8)J>0kF&GZA-A}W;#RH*-PNeaBEc#R(DP1sOnpH9AThe$>;#(y-Q$Mcd zK0e&y!g9{{E&YDlM{_YbQxp!(j#0SI^C!K#2A0Uj+I{4~>W0(7`F%V*zVsUpKK$T= zvdFR2wB}!*HYiFO9-fyn&j^SejoiM6*)gZ4)OVwFYgA1-xvj6{q!z;@^_?|`?P(b& z)yJep^N|d5rhuIW79Uzdj>Ye)8B@z>Im5p%Hmb2SY6uLQ9mg^F;{BI@;2&S{)4R8K zLy<$P&;G}6{??sO-_~!@f3<<_4F}%*!K;7ag414;MpJ1$uiyR5?Vr5!a*?vLH^)VCMpKkTtsc6+sqxoD`LruoD3 zNZ+-*oYrAMW7E@&W$OPK5PQt=+Z;Q5`2?%(Q_$ zi{on+iYr z{VQ8`NdH(pe)N{dzxMP?Pc7u0e)0N`eCV2Anlv$fFq<5cS;*PD&Yi3oO)9zf;N7Fz z;)Yi?@A#$H|LSB;vv24Av>+QMqV8Qe7QQ#aiYgzo#6S8}=!M2tA* zm^iK}lR(|jp_cY z^dwXY?b+R#KR-jm%Z8~nGW@QZRGJ;4QBU}k#QZO}vNaj?6=*y+;pe~U67Mxms<$)W zv{t{uy*0*{hlCza*_%FcwA+W(D z7SE!4<54t00<7k1>}FF@HoF8D!74HkzJQh7DJw}$%^ zcvn^@+1b?4-kM0x5K8FX^Y+%5I;C_h!)Wxe17m^l0hRlb#=M<8(G)Nh>D3B&N0o_g zcKAo3^N5i%j1)so9(r8hw50n`Ii_Vf}z?& zGgOVgZU{op>SY$}Ml0EqGvrw8vKMb^q?{?5oXtKj6J41`K79SJO!66L>^Ni0@L#hv zPs+IH>Yu;yU%mPKA6j}&GdnhK+I-<@7j4_L{jO(j4Qy?P>Wy-er}^*R^xvQ4oMv`R zgg>zRf^D0(-SzbCN`xhBJO`ZcEc{`?ip z)-Z8jjcg4{7-nl0v-@HaqCbymn&gA8%yw(=-=7vlfTmJ~!2dOphLAoI zhQyk`u;QlCB09tg&+aj*t|}%kKsS@xA;&;_V!@o2gz4R-6nmlbNlVLLS^cgu08eft>_9VhaM$@zuL7dG8y}jhDS9r=L0U)E6K8+nXNxyMwFG;!I&> z%g*;)`V;TDKF}}Z976?b0eLCr53J@6zl8C5iTXcwcL(GWEd zJqYT9pJ`NuDgA;Onk*dR)#>%3@2bHJP4XJ{UACBJY9!IMmwS=USFP)&FhVb(u(Y}xr^m;U(fZF~HU$JUR1>A}DK#GN0Vz5E8~lx}|H zOOu})4!r4Sul?n7cAe|*J#Y7ccV6;ie|Y=vu^0*8RkN~nzg=N*uhfTbYzQ{>?p{-Bm?Wgg*m6M(pp}~II$q)#=tHyVd6yRAd z?c(3yV0 z#Ee%185+mT5cGM+EAk{%G)8rgu4xnJ=G;)-9S#TD>UXWM)>J07X{fD^K;vYKI%BzCpJoha<_dPHn?A+?Qh1_`g2hQDn zp1*Zc`Ge2h|2wz*UtfOc^OMBMS-b2D9Y*f0nbiN*^WU*~lYKeOk>fA@*YE!EWKJ_# zd+K%HCtiH)&Zoa~#hKTfzOvWcK1smr+P3G8r@nLe*o$7>*@w5B|HE&;@SR&XZ}m-o z@~%Jl)4M;uHfcJh-o~e+YpdUS{A*Y4zjoh_{icS=EO3&+y7`eWr?jBOBRPdeitJAq z_q{*^i}SzZPr81!)9gvg*^fE6?rw`i?qNXAA>T4L9Ga(M|3dJq;jk^^j(|!F$l0AL z4Gf}#bQme;fJY5#9o5$Kq9yj$EVJ)#BCYw6Hd&ty3T0?$zoqG^>9wXz)463TmtfiI zqpz#7wSgK$%)}DG3X?l#VoW815~iAoDwv6$+~wt`eAzC+vI!)~{Gb?mOGU&=zzQpx ztOLWJnvpt?HwhBn$Bikd#spRaIN^~nKVZg$1VLkiu!7pO(o&$7^*%|-l0a*$6-CXe zg%t%xB{9deVy$ISj_E%M+125g1RM*FwG(SMKm6tY?MuIL_p^7T6kpRUZAgIT-MMAQ zn-2VtUr0K>er&Sp^r`RtY2{S{UJ*GSd-2gf{qDzKd^vDWXYSa4!+|#=cNe2R000mG zNklyOBF|QJsS0p;2)r#R&E#ZdbEuI7@A|li^U03(rQJDWKkojp{%@7SWQxrOO zH#LEZVkBxEnCx!4M`U1BAMa1xu>)MJl4^bu2lN3F08?$%h>kjfR;_gGZfXh}26heE zz5O!?8G%eKMk2O&0{$mFSZKp8v^d(mb!+Se4G6VyQsyswEHkZ{%)|UjYgumXwKiAhzG+Bg8`$HF$e-;vL0)OFU$|h_Ra8O~c z(ZkPQ6XbdsZ9H;_Dzp_EheoL-fXNvZPg|AX3>qLy0A)`)tf*qya0PK3I61&m<~3!j zH>RvLIigq{g4_F`#lQlo*SXztc(9MotAwI(ivnDK|2wz;06#McqMEZ z{hiuE-YfrYbCP9%|Cz;%+)O6dMS@0+dFdnP;cmh%Qc6aP0f7s_*{K_16G`u&wt_C6 z%};n4^WQemN^sRegH_OtGK0sIF&1xj&mlZeix@Dha7HF|e5^WyE1q3h93pg)np8V8 zRKMtPQhRH{ch#h+sHL2@w}z+6(+lg|RKGUYCA;pM2DiS7os~mdt(-Q@qK6`s(@93X zs|E$&*Iv*})#zvx*2`IjhC>yBGSm^|fCaIUtKk(UPyK<`9vK^+XeEacl{=j)} zIc>XLiaK!Gg_4+EmDcC&K5*Jfu;KTqm!7=s$-f8wC)bk_;JetTJ>{qExUh5!#0=p}cX^F(~N9yjXp|=Tp zm?U5x636mSLfKvV zvz^(k+p_FbHeR)cSL{J52pLQ!HBA>&7 z+Ul7WPJ%cL;{ydaKP!RSBL`DEfF~+b04V z{#bf;Wp4$rwQ{V#9WrNt*>X(!95W49j?HdT-RY?}kdsvDat!cCI#gkftsncl2mbo^ zzw_G&Ws><16;wGbyRO;)y4UQz#9f^4eCqaFpSUIR%(GU3T~+s6pS}ljo_u-jWpm4;FFsUbQz3$&KY?;m-3zPFt)EyoyQ(*( zP1`qbud3&;M1*=SoB3}~6k8XQCaRJ7j3l;C*tNUR_`Z{8<4?#ivHNMf%2c)neAWmL zGZSu|q2pt@w<4drJ{IM~eyVoQ)~o2muiec3RBtByiT`k6x1Waog#9`8Qz*r0UHLEI z_d?i=$7}wC{ds9h?e2kNe}WcAIle!1rqA6Q3cCs;8NRBwuYXBS|+;6694TdHnmlL%)4qp ztnguYi!QOtaZFfuNpkAnEIhq;lEe7qT_6AI!(ZUsjAK`Hgd~_KfA#*??b&AYB}b0E z^qnWaDL%JK6m}%e=|9}^n>0wHPe`o-O~O2Q=)V8`tN%7hkddvlGw=x68fs0$idW^6 zeIh0ZvF**)ZHNb@*dU@$9OjXCwE}ngY3NVLFiCpJVw82@NvNTWcuHdaXMJo6$AUn! z4*xBtX4rAr8mnZdpN9SfGU8TFBkMBRn(C;ssV@nPr&dnMo_<+*WgZ&?{0HoA#tPLb z555De+8DG>Y;kc%ICdL#!^2SZ}SHGf8-J$T1(ax2OdU0!cX* zu`3%nkFOv9&Xc$N?zjH^!K2S~-P7O^nRlbb3s1kuKR5o&k*B}?#5X8T9hmkJo?4bG zvhi}#i=+vE!G3DkRobmxt+?hrD|`KuTx%zeKl#!Vhyn5{7hXXQgOuxJCP4ST?*dF9 z-)Y`blS6+Z)}NRxpvWkH(&v8;%hFWgKTr3gIg9rvbu4iFH+CQ5KgwzCKJ%-!sVV;b z&>HZxb`P>vc$oY|j^9;-4TuE%&tTbk@2atOFKyX3zzLGYcMC%eD;$)(btXwmoDIFJ zCa8;m?-0VjtETebQGeXA+_c8-r>Vlup;4VIrt)c4mB{eBYEo&?M-Mi7V-dA{rK1PD zEAg#~(n;Y}$yZU^nzaogY77yIKoWZ(hB7q4Qe86sTk9T`p{Z?Ygz7Ui440^}y~`z+ zF^^+ZX~GPRm$Nmikwhm$Gdr}gluQ*3&l)KjTAy@=CW%CJ*!bdOxBc$MOYOwOmTLFd zPE`}^y6s&xldpDeSvhz2`TmZFUwCkJZ8dQ>6J(caJup=*v#n|;WLLH+7_p>C5rLBS z@#>H2$pd>X@N8XOJ923CpxsYH^kOtcSj9vZch`!|C{%6D6cBVT-j!+*0^p=Fz8?R? z8DF2FaWI#gwMD}^<5gTgjWPtVpN5kTjOqLgjTJQ6n+g9#GQMDi!3@nnKkZEYVLY|; zG5iNlQv8XtJHdn0i2OMSm*TmKfOcwk);;Q{75$VQyWAAeXOaf+pL;B~w`S^zf@pO% zzmrx9mQ_k)T8cb${>v_)x8e)4?B0Q0tJXiOVA`Ua$}-(FTH^IToKvH#0@DHxgb7nF z%4!JjR)Lv2WhV4%vJR9U2jIi1ApGNSDVUCzyC7b%X~P!5#B>mT%t!qSIqu$36>im)z9#v~ z{nzf^YF}-2Z2j0{FFu^J1-;>L)mhi=TRC&`)l0`;`py&Iq`pS>$sjSXi=wyIHQCm2 z7nDxcB){c?A3m`ALa+MMN1nR-ncEl!PcNfrZHo*WXN74W6;#Q*iWTz%40g?k|HhLP zNln(uIw=8*JBMXah~P->?d?7@&2o}IX?D-f?!*3se5|uLS)q)o8O8$iCsYy0+3_cc zN$dzHgW@wZ-tL43G~DBVX!my8%&*pF69H#3f?)S(v~u_(8lW|1t;l$p*rjcuKPNoJ z_>(1$Q?R$jM3yG1_8dz^`1=1^Wu*f|)j%cvB2fZl?!t-+oLKtWVv)rMPF93=mvo}) z*#&_N&g0qWDwuJ*P@meD`O#G@htwF0LFvdzXT-%=Cc5fpZp=-BF&170Vm4v3INfPl zVAz!nj`b4A*+fsQxZkYH4ycU#9!zkZN0cAAj+YuRi>R zzkA@XlV&bE0u+&JKNIsrjOre^A89%WM(`_JcE0i;?5-pb{B7V?-FJb zm(@M-cDIQYcQF6e{88r8S)3J!C*nYkc}^=nmfc&k(JaofwDeJRnNjNOHTxOvY^q=G zv>uzyiIIehs%?~$r88DzJSQeRo|X1URW@7&oZ zr(f*dvF{@{{Q9N)uJp1^a!#MS|F0AQ=T%zP3ixkEqT*Y~)>;f*G1}QHIP! zIPHuv%D98a&Zd*UH#L|NJO)avtm~A7Z3H&RQOhWz>dgA;8B;c}=45Eh>8g9f-^=MT zG?qp%NPAu}c<5W_B zL%A9w)LAp9Z9B~)cXaLO_YU4Qk@fu3F8GD(f92}^*A4$o?_{_`et0FE>!);2p zpLQ~TDaz1TPKOa{2L3a8{fuw*Ct9i)GMJ%BbDnq#yo&8!662o+Mgfu7pZE-o1kHj< z>SIlY0Gbgo9Yo=aGBjWcCqTB13lB35a&v?DMLOu+(8q~a5qQQ~bTc%ey;79%9iO2A zcDb>;*H0yj@h1`fMo!@#6-hJiSZ-Rw^GY*Q^Yb#E@fr2H`)TkxTK|KxRG*D}y`dUs zyDo%@GANxHB~C%3o~t<25?bt;7EWsP#vpPep(j1$76{%?gYr^9LRHMP^U-Q134Cslr(nLj2CjuHl_}VIhKbwV;y?u000mG zNkl-tArF?<+)EnL4d|jf8^cUgTHNg^w{CaRac$$x_|om ze{o>X1+vI7k?P{Tul<)l_;3Ep9l!tOhd!t58q5M#Ikm(n@ap~7?_D_qS#aK-^DjB$ zve%q;$*%2tLeF^Q_)C9&&mT<$U;uz)B(oV>n#EH~5KzD7{WMKZ<}|PibZ7p9>fL@y zp2Q5*GrwxxPt&WrXAvbdP1Sol8S%n@@T-_6YD`u>OV7tjXDm>G{9Zlp30O`XvjBh4 zojM-?#{HE235G+>pOc(fZHNAxvNbBF9h%f7C!h>uYtWy2dx7r36Ly^p4IXBe(zM2W zRpxHRfE5bkxYEHggsLMeVq z1x0jCDQ;5ZiNtdZF9Hg))VpeEUSV|yOz7(-no3<|uh|+GU1do@B144JARd&agYyYB zb_X5H=~4S>QqwSsuH0S82y1nBQ$VY5WIKo_y3v(@oCvee)*v(@^EgI^gyNnh1LJQY zOKwaN$sPVf^#sFW$CB9^Q{v2v*5u+DCE3|W{MIm9ss4_UbXgN+W=3->&K492+eB7e zfJCD7-=lzNr1}d-5?Xqi1`1=1BG95=@x|zB_C?ZpbT2~_*b@WCLBtA zwhf4}cowT&XFpBP?rsMa51bYr!TcP95tA~5_baVfe?rbW$8`0Yh`x0G^fBAv>6N=(0emEKj8j7r8y&5a&d0JeUdhDuh|nHX(dnVp5k z5Jm)+l^ReW;tY)iRm?n#Gc>a^1uV6iJ5p6cFQQn=%h}2_=3O=3iz;}nIu}Vqa%!_} zR5C6*n|v8p?<_&K=p}O4nLl~u&wTIT-K%Ry zAVVV&B?(Wfdy=`w3ojq~%zc0Hg$F)6nc1|ikq5WZclxPBPkgA|U7`(@n3JK2{E4zb zxM!(}j7K1G`~kf?&3T&A)G3WA%(^7P85-A>ps(s3A7k*bnY|HJ4OX1gq>D!}W1 zIH&dw%i=(o^iA7_&)(i0tjJI39psD#JMNabp(uxL)(@O5(6crUAph4S)TkO)zA=-* za@J@SV;9EXv&(SI9KH2#u!7(&CEl|Yb1ZeHgAAczSB`u{YOb06+x?dAr z` zz=5~;R~1i=OZHy&{!2gb$-6&}I8Y#pw&pGf^2^-gw2j@$zjWiJANcMwcT85kzWeMQ zK0|}m1q0FS92u4zjoKPBC>fPNURlN3TUDAn%y=)8M%_}JB^~Ie(8q_fzv2!W6WzP7 zM16~7+cA0>?%sYHY8L$-3JdMx&6V;gbB2}u2~`AgS`j?(-C%q0T{Ys$HsZ{x6c`dh z{)cvF{MmLHY3y)}UkD!?Uix&!_JT(vZzu1|zIj)fIgUPZfVz!Dj6K$zpj zyAoAnWgeq|V;X5iavFiuGNx>eISPo?j-x9XK9Z&{Fw)w*dCS~epFI5d$8P(7Z+-Gx zHi9pmRSq3}_CMV6e}3S_pS|(&53IOJ(qyjlrVHMF)1zN};_&11Blu1(MCJ+E?%uQg z^dCI`hu(PLEtAR3pWglP?;pCSlVC^WT?=H`YHKVl|0Q&d-ccqft?;owY2ru_!N>lD zD(z;(VQ-BAQ3Qi_t9$K067*B;tw~2-8F*KXX;wsRH|u7^q0^_hpTY>j2&5e6fU-Ic zQ?=r6+;-GYNeb%SE+Za{Z7vfm-tGo2X)XahNp3zek9racKdAY0xN&c2sLXC6K05>C zfXY_3#fjeGWBy~A(UDErW?>sY8*{Yj|3&{vFg3Q}cXm}ngE+gaB{VbrvXoIxHXb3N zp%kv5iC@|(I-({yxJ|s6^&eCbDjZ<~sO6|s?Q{E8Bn(3tREEYFvV3Jv7^CV>yC_Ds zDPC^~wFz;-=!)5uK@eLVDj!;if1`8SmT9^EKFlu9F{+YNuk6ph`$vCs-=~hRAM=%*xnutiocGqi;zU9ACj?xcC#F3% zr_I-#b^Wiu`G0)Z#qTrLwI7JAnb>7)%?gP0OjpaGn|h+?Psg7iLUgx+)S(`0Kz3(h z6_@V1R9GtejFngjbQOV7S*xC6C}z3AXk(7BB4>n#1KGV@pa%4Ky2dQD-r^BJ#;E~i zB;r2+>%Y!c7U2>#!+}JQT@r)KEn}q)ovfM}5|b-);_cYFB8wl(@2xR&PWVttja=$2 z0{dw?`yOU<$uhazP}62;;^@K8@@(|bWa(sRlHOM5k{CVM@GuG}1fmBDk#H^#_X0q* z4IkoRhK58i*O5d)hQ`Y&aWbOVGn@EiI73r`^Wovx3{AARMw7`-PGdE&b+Tn7ClELL zJzD`NrbG?0q>kOER|J%m%By1cC__`V3g{qQC52-QhL{SvhsL(X?rGqrm)BN5eea)5 z@-*6H#UV$L-=Ez5vDfaqd@`Rf^=#d|?dr3x`_%V7nPq;4#zd$yNdAwT{;h%ejFo*? z?Z57d{nuQv@0zoAofE!RXOg-3;ME_V-1vnDJ`?*6`U+i$&(K&oJN-1+TT^)z`L_8P zU+Ymi85*$vyo|5wYQjD?IfUXpi~d$rV{C@T=nlJT0xJ~>wM101>Ent3*v<*SdqA!3 zR)S>Qb`H~4LB==r@n>^(Gc?KBtC$A83X*b6hK3AZ>g)SeB6aqQ{yszFs^=4vFL$j3 zFPNBnEWfvgrd4*1)z-i5P=WhtqIcC~{Q@S2KBDnQvialLQlr4SW5z{GvrgY=M3dp} zz6S}MknmjAEp7*5`D3-E8L zfjuR4s8%x)5S{o|tPjNAs{Eho=yXrI;!gS(B;j}IamU7YrIc6HW z^Ql{p96NHx#$R^|j{6SX^V$3U0{eLH>A!Y->A}xkboy)kA2B$4*LfG6ap?mG@69PY z*{0fAc!c~3_VKCi2Uib%_2Dnv^vD+`-(Gjl8$Ni|hhMYz5H$A7T3EA3ES5zcOJ4eD;j0>vLo;{t+k$jF&ni6_X%hFh}?{^(CK z_*nGkU3(S3YVc|GY~t@~cd0>rb*PSx%f`0{%JqzuYqBSZ)z&Iy`8ZrUPf1 z{;!ny|Jfy2PVY?i>sEDXV8-rttzd=89Zd|lF$sMjiBy54W&0^#)_>G8RBL`v485h| zVkKaO6-^cj;Zkh~Y%Q$>5=K{04VF%Wb~;4D{73~%xNH^r8=Qs3Qb0}Q*dO|8sHeh#_ST}SPD2M#4%{JiDAu-RlwbfMf6b zjjIQXLy13Md-O~ECppWpd)uB1_gqv7lt5vx8xe>hvHYFsbo*1^{-3}2KYafF&xA{0 zdsp_o^Wyh9yA!X_1i&jQMVhtsvxvJpQmMgZegF+aN#7kCnUm>)<_CBsOf)r^Ar?T3 zT}re`Rg_3%sjjys|F8Ao#6)!bZ&Dgg2oAH-m`PCoD4wo4X2mfu^0 zt9ZI5C;AUo6nRbl*;v6Lf=~na6&@pBu-POJal z`GjeD;z=*tch#E@yq)=RF8oJ+LEyU=#NAl%-P*HAP!pO71`Ya6ll}u zKmFK~*+|hB4kRh8~Ivsaon zZ(P!1648t4i)7b*lkU0b?d8WRpsiL;8y1!`5}}+>J}Y)n#Tk*?Kb*I$c-;@0V@?&1A3*})|wrq>3SQDRjUJgBa*US%@@000mG zNklk1-{+<#`ewNy96> zetOT5)_F6kDoZcZSyufy?N8-(^mFU)Qi0}*iVoE ze*=jLOLLVVeeBlXK7aQG7wx^&2seocuRiGsos{WPJgKqZ3xH2B<;c%W?vrk;C+(*RcGY+}#bQ5tKTW^4mab*4 z3b|VSNdrK2ulyHVS;q^E+E3$pYo&(w=cId@x1T0aM*{B}$f+t0zpF;!65me^o@(*! zTVAbc&C#`^V&-sAT94G$-IK1&^>7XTItfKI*@XIe&;S9U7*-z8zo9XrZ(Sz%G-KjAVe3N!?^e46p zwo44V$bBbzXGu|Ub?xXakALmJo(s3~q-xUog?lc}IR}ngnu?A8;f9Dz@2VjsJiDf7 z(7T$zcX^99V+2D*`DPZQ^!jP_B-!x4$eEqp86BZ#pZQ%ic2^nOj{J$1%zq1a`2zhO z_|?v?8Z!zwf8sk5$=UNKvE38yY682r{DVXmuXeZo#Diy?MdVM=yH*JceA;17#k*?k z*iCzD3bZpH3+zH$6aRw-#7qasP-*+t?Xvi>%(P}@%Z`bINN(#Z&SfN-S(uHd~110@0lH(-en*fR(buxWo}w@)d>s`NZm~iiH)(k(|03QiaNspPtXz-F2sN1w?jvtT|2Sn-=sImxInF`)q+n# z`G!d{@uwtpI;16tqT~8_Z6Wdr^1yTV9a($nbe`5sj!CZQiZid7{#o+uC3NP6wm|UV zchv}enzoW`Ni7Eb)cO-cN+V}TJV^_`y$@qhuA_l%O@2crJ2(lrVpT};!3KNla z#w9cboR_&V$F?oo^6=!Vb1bvBW_6aWsk1O%GSh!%YMe#<3>H7y!{=-=F8twgrKu^~ z`=wk&VuV7F0yFJOshZdyXr&6v3yFG#QP`tVYlMITHE6##?XguciH8VOhKA6{^(%9N zR4G5^qR7BmMXtArL}&y^2^7oop9bLNtY*e(A}{w8r0Y#*TelJyk=MvBh_6-Qma1$yKvmFV4D_ ziWN9Vccg=KkMAG4=g9FR{`-j=-jp`C9@Xa+`;#FG7toRaIqayBe< z$7S(jnQToQJyS4G(=Z!D`1&LWNYlJ`)kM))6urSrA1249zJcf=V=xp;KjvLE01b=d zLp-0M2_ruOf-*EN^vPl(Kb=kcxxBP6X@2Av?yV8P-=Q^IZK*fg7*2A$M624e?k)=$ zhFsz@G%iB5jrAixAE8=u$2^faf1Nvm#N=(P?jq9J^5OOAc;cq5l9nv~$cqmhKK9~@ zTj4%^`x(gv^Hx*a`ib=yR-a38Y5!O^?6vBu;lHfk?DSXV4w4n+Sh-SU!qS^1#>HK2mwb~*WwaJnA z65)3?Xvt{kNe6j@gC~OAwaWS{$*I*9{A<}-$OgPf`9yk4r+q{t%p=0gD@h1;I zUJao*Tz=*?zx;-Oxp(CZbNzFxhyK&I|K0bVy)*NuBGzRCyQacjwD;0&n}gN*)wLss zjvkbcg1Y-rfKW37OpbQdi0bb0JB&a2SWp7(MOdIe8TGE3 z4ECD&SnJgt04@K5W=~2V(4F?vIIybKfMXtiM~*)K3$M=Id)_7ITzTSz-G?{kSSnjn zm(Q5zBAslF)c*z175v(GY6t1gP@(m@4;8m6OQ!UQgxMO1u7;n4vS^Hpt`aM(Lb>Rw z`H}^rs~LV;8lat=bVls#-bYuF`f;`f@s~)SHU|&YuVO(-=XS@*|FflC?pj)b+rj`>s6mn$5huX4lr;uRrGp zzwziziL*7i6V}aq>ka3<`Mlld`_qSy9e(_!$9%Tt#h0Jkls5UlBz5}se9o?Or(0o! z{0!TRCD^d_`Fk$hwKaGX#zgiLd0mnXAi=QE0QBkbCFG8__({tJ-Yi@yN>!-V8*Jz6 z^wT=b$zmDtif0n1d%`{*s#vuFG|{W{JPQ0qBFBSAkXW%5*f3ilwGxCt){$e?0scN=xSz)IquuKL3F)3p?LfFctHP1J}I! zb?;wWUwii8)3W%n%-))^3fLB7MgQ?^Q1tiqNobmxZPj{vxUd|o$@#b*YKbR}8Gg!_ z87`=8ktR8~O{|z1Z_c4;twoOsDKEavycjCt7wQKNtp?G^^(%YA7`2o%^$wA3if6r{ z6C%XuwCD;D%~lYo4wYv-B}N3!wYyt0q9{vX>!=La72JyOSaxOcI8@uPo!R0bR9D5; zVh4?OM`pHD+iv8D0$K}KtPOrG`tT(Pi?=77yflYEhwW3VyGPVxL%HN{; zOk&`bMD}4@`+qqP@Jc$=Qw_wLiiy=ci=Of|wcn|&$P+lpnm+-yHH!}^hGII4_f2jp z;+hOu)$X7?nh`YbG-IpG-aT6f5jr)rIN`uQ`ZYFfes#iS@+#zIJMJy0i%lAx&b}+N zEA!&ja(2O_lRo$Sb9a63j;Ee|N)|tsnbxF@iCdbL`2US5Sfa?O%pIrGHvHk)uYRGS zXVOLzf0kOBn%ajdOG+lXNm>?B-M@royU6`mVOeBqOL&rvZLv_MzVdafeQLi7m(Eh9 zRMZH)=Nf9C2BUwhW;lUPBHuOGYXncEW)uL76S6E8jLo1R>C{n>B4;lP`jU6L3B!;yDh z{N8KM4g{Fg|L}_sKt+jpg{64zi@S(82A_cc^rN;WUL}c2QBOi;VgeFNlx``dkp1*o z)SqCfUAVGVQlGiXd;VvBnE$F0cLE}%(Meg{?2g?L>R`j@kE%6bcaf+my~E%NK&lkY zJ`Tc&{q{nJS`dxE*!5Bir8$6C1gjwdMV)0(n_aZFad$0Rpp@Y5uEhz(T?zzumjcBd z3dP-v77Y?86o=w2#kDvsuIK4{zBAvyOlI;Udop?UTI;&6l{9BCY#Yi+0r1Y`2_-#E zzv>+a@BPk8dWI}29I(MBW1yUBvD!+DXF9NMbf|f=672)t(hq1T0R{1yhxrzE;W*Sn% z(6bSJ{#>4Ebncuj107#ng zxM7k22HgL*e-rHk#Oosowt#_fRPpjHBE z7?ny#Kq*If4vJCIizKB*$_jSDo&}ptiMJaE(H(%*dnWVsK8X&%DcX-qlTm9M4>QvW zk~)%5@OfNM-=Z{?FxRL3K7&EegiQD`ac^r*>vz_&mR|S`s(JL*8a1rV>3YEx2Xm!R zv(*;<;`s2!tKY=KU*s5CS_bTSUzmu!Uq=~znT49Y!&E}q3SKJcx6BVy8ZPYSljkD& zKu4kwGLs*CtrgMa(OqeVGiA1z9t7JrC&yo_Dl7Z`?WaR5~hUI|-hr4YyARwQWlT zy=;gTb~5Zvbe-nE49Q6xwhRpoKsKA5MekFA$Fh^Ka_GK&;>Wv44!@hZ)3ZQu0uUp) zsd9d7o(OoEcscaXCn^a}T2Aqj@VVmXc0eT z>Yi_{DieOwTP516JCVfQl%8=k-2;?f(m!5`K=7#l$jJ}#$L(?l;OgYuKqddTa*ws- zax=_M7nZRiY??Rd#oedCm5LN0|C5r_#0TmI|H1s5b@+&T&auM&FT!q6H!}-QoZ;+b z=dRZMlN5)s3%=sTTZ;-%VHplV9>UimYng&S;p@Mt3WPi z6_NlNmC;6E^55la|0}Fn@Kx(bMSJO~jumlK2<>FFR@ZcWtI5(h}+G z`)mNhw7h|eJzo#U9hQH4K2J1p+dcuW`~B@T?Es*W^J7ozt_wEO4vU6Ad~MaB?!Q%$ z-(6m;F)@Ry^?TfcbqyeHk#pW-tm50R*42;l0}~$6dmI@Pt}!?=waI}Jm>S?OG4Hjj zt0#i4LG_6z%d6)S3-!7C5ZaeUw1>qla2Z>OX^k6zx<>As=pIS2bhdoGIbLI6URvzz zWjQUkDbM(l69m7VvCp`q-|I8-EZZU7&0=xk$7FHv6)VtlADSz#6MCZ@FPiqzS@knZ zv&J$Km|nr;mif=0snE<_;xjImA1~RqABY>UlZ)3qhU2A#6(7SgA(x8gZe--TWv*wH z@MucOA=TYi1j;9LIe`s^?h5q^`m27SGxO4KyT#E)TyD#XM9TglRnuH#Jkn877be z%)R-Z8hn-K@HzIj;I_Dg3{Q$Xi&K#K6SDrb-g76P!#h65n=|NF&fdB(>^Mzjzb{D14ygx=@$C; z?7^IeVaitmRsJu|5_5~WyCpdAC&`9Dj=4RrQr+t+q2kMrd|9QO(uYUH{5(thHE(^< z&J5$Ok3UEBx1WVqH!V1x`^EBGuZ9grI&+u|&fPp_prub4ccz?Uf=*VC zMZ>e_-(u5yfFHM~wCJsLrhWAP%zJ^MnDcg=x7TwPtQ|$f@b_)hGRu|Uv#jzSF(t2A zah>^uR;`jCd!_Roy6E^x zn+MF9X|fH8^-5QEe_vi6Ps)9#wDC9*{!MnV8u?@}S%#ZAwib6E@*I^KNplZIhL}Ew zSFT!{8YYzD)pAW~XYDt<_r@oK<(JuN%QOF`bZ_Ltjsp&_UJ++cp51Q(Iz9^7x!C$S z@p6&-`st5Mes3*a$=})6Bl6A4! zm9F`_JHJdPg1`Xtq2dN)`D)>g4tw9K4-tx2(;-3j3tTMDe+k+!27J3^{73^OaR(DV z12R{~vN)^AwCL^S-5CGrRHVDqc+uoGKI1_ofLY5KU3zd{iS#gVw%(LYWH4*1!}!%< zdb&pQx8T0`94h$A8pCnO!+h%1m1nDOy#*8M`#l@ZVikV>Q-;gWwDyiQefnOf?}Q>= z3FfoCx}lTkKwt24EgR0n5-niMFwT17-Q-1(GjlDdq0!1$gj^fXF3; zODgw=s-?xoACcd4s(}Vr0UrHs2w!p)=;TS23o>amW>^xJq2R7fI~89YU?WcLuKqMz98|RDL{YLw!C^ zc=5R5Y0>QLb*@&ky}F%Rf4Y8oI>5wzx!sj$wto2CaLiS4lbN1(KYR1M^Aq>!M9W+x zz2x{CnY&D^^^F{nB5Y;DeIlQTD8^jc~mmjaRkr4EgwL~_`V`IJ}z$K?*&nX;?WH4agcw5<69mG94{hrFYZH@E!g z5np-FRMV%bMdvWAHnCK1RMVFOlM^mO!J-u^_q16g=jvh|`6%CMXk2tv>8&j)o@i>h ziq&x>S-S6H;etT@4$Y-&$DW6(@N!AoVfK01$XMLGV~S%Flk{#);?m7<{i2^w1GAWQ z&iQVzzgFuPEUrou9;Dc^N%5l3iTrsuK6OdOm`(CqsRI zFK%X2-)(`;?nCFAiyJ3XrS(akyAi`T$TRSS}oemZvWA=~!x~-1OJYN-ip8_1! zhoiZdLY=K+WHNH@H99i#GjKOdEire+i_%nt0pyw!(#fYiFZ_4<&iSsWk`Gojn!AFP z1^JRoq6xYmR9K$4v2HZ}!s8A3az!`&QytG5HXDP4;*>Ot$!Aw#ptdp+^^Q>L*pnpc zQ?1ob32uzU=xL|)(nVIIsUQBPbCS;IZ4g3^O^Kb>nf+O)cpdMX>ss?^YKQ& z`QDUWY&*ug>w3U`;`?*ciN8qi-~gAtsIbrF(y+(Qx3h=Y!a3{uO&tP;{`tm~?)r-F zz)<&DqUCY^JJrLQ=SBQCi8IyxQ*q7?F8wmY(0swi#f8@oBl~ABmmcq|B1q2o!vt;q zGI}N=U(k{Qu&?$nm8uD%;zkoQ3yPKU7+)R{B zY43&l*pIlOqGr?gj8tQ`gXHBK8|i}j**|5hUYTE~Ac0Dv!d6w-VqFMx)n!wTH21+? zhRjTW0mnq^B!90{-ZGGHQ}+o2W%_v2G2(Aq7bPc+wW%zHd6?EK_g6L|c1whTKGZY8 zo=dR`3(@uOm^v5-h5ex<+Zh$^)G!E$5DOtU8;mhgU~;4V%4x-RNjl#5*)(9lY9Y__ zso$SN#D0IN;`@%f1-I)?Ju1%X1CZPFa=BU(tURjN-1tqEk~4O-|I6hrQ}=A8K4?9m zv92LhK3~Y^^7pXaLJKv_m(k-Siu15YZydg8cGI^u`rVL_&-+%lnZcmA_Tn8ZL4J?k zBut~Pq>_g9r`+-|Uu+~dNPf&Kd+*MG^Z8HY`^_Rv)~Y<^149>%U7QDJO{3XV&u@fVWsr-6#H~ox#}~y?{fm(Nhw6_7Ava<>lk2Y%E`Y6}?A=l{yYr+Ox{Q<~ z=*K;u%qPX#$YJ^{JzYy;GEH_7k!lutyVa?vIBWhdKl~#j#a_?VcVfYmem8V^EoNJq z{=51yNO1e8e_3)JTQbbiRsPW0NZfT}i4mgU=o^$gc%LHv;;sj&3*6Nno0R1P-s+Sv zFF@{8I<~NY`9Du62tkD9mO{1^WBTV#=|TG-H=tZ{C`hGVHn7Uc9AHKWqT|P0+$K(_ z!IDu?8euM+i^wZ0x!~Q zS%~lb+i;|uY-3O6HIF6H?AawXjNf_czx>)8&)|8vZukYV%ole*vTFV2v=Y_)EDjA(XvxR13}{?rV;?8^ z8sH|ThH`H!a3*qX%r~E(wEFPGdkLb?IVm4MpBpIP!!8{5N)g5-*l#@{np|S+F#5I8M`Bz4OGYzd~SGoOWSDwZMvv+6zNXSF})gM-SfPv>)?G@E*2E>*y19RPDAiO*uF$y`oMHg zxbS)Lbk9r_Xi+id^j*&gd3ir0or9ib@=%Tw0#kP{+wiRIcZi`;w=htl(R|D)_B$3? z>HtiEnaV+5cXwSoZ%S66X3{eisfT`lv`}|T21_eu`$mrjDIvF|GQuZ|Fp=h|Hy|N% zMHd}zWs9X7(*HPnKgmrd)HciomeyA25G0di5mVd~L*L!3f=5xr06Y~_#Jg-p9>m?@ z0``V@#=E6!+Jpo>|Jo=|h0-OY$OL)G#^Vf0gr`(-wUVIErT^Cp7-0f%_S@wH_7BA@ zNh$qsT~q>JTDJY`{I(g$y_X!!Evc8YZD#B7Zf0`)YKaKKcZC7TS*Z zQo0WQ?V$uEvZtE?*GO>vDeJz_le1(rB;4c}7;NabzU9cqpFnhWHsv==agb2uoKfbD z`Cum((t9CM=n(=%B^GIF@Uk|bSX4j@P*%O9H!IH{kE6Q(>&p>3`TR@#(_Lgfr|K$G z!O?cDpHr7W-pFaH*wJpH5ql8pU(ybNByukA{={089OE=@aQVQACQHB0uhWIh%3R(E zY0F!Ir!Vo0$*Z_cdf}x(pKLG)nydMwB|l4r>PUA)7lL~KiP0M}6|8TxQDhBzcV(dr zyWr15)$s+JSB;6a-N(&E^WwUd86JZ82tVemdj;ocBPO-X&g0~4)2&QXXS08tS{^fS zqXFgRw_1#z4UKJ;UpjvO*#dX|$_Vgst$5#ZG93(s znA`U2rYbM@P{QyI&>>uVSUzuamFdJvZV7jT?i`S?r^bv*xhxe#?#oFb{1#^32eCqz zAR%ti#<5FU+M6%#>-mxnEKPMNLE*G(|C4(PqlL^IFhQ4*>mkqrQ)lsfA$6;@F>xn1I&T<)5yL6Oum8B)< z{9ipfL-N-J2o_NjH$@z}{8y4P1y{wt*E6q#93$QpVM)b)MG#`z;c5ppz&h;(Wm2L> zMqVg6+5|X~`hVwbh1vQPqRAfWPi~ovE0nvEk*CC|dnsZyDepnQb0%6?dqTBhc6Ai-!D%r z`RaCjXh-vW9FiDnDpR>k*2+K3*J14YX5Xo&l$|3byPe3}arUEjIJM!_f2amD?U6+C zHPnKEbL( zWMJAmvrctqOVz^KvBfzC5kqR(xqmPO%ff6AV8>yZy@^(=mzxfjsJUtG0Ob3{&nVUJIVGt*(vp*v<8^Xccd)iL5w4}Vxp zZ6r+CMe_0tecwlDLoJMp(l2%eS<~KDtY*F|?9XK6j8NW7F&$eILw-iOJ0o{>Sw#Cv zn=kX)&~TPo?+N`*4_&0OrBx8--2rUVI@I;|!hQ6G^A#i=>V9E1XA_TO7O&-_qpJzbU3~0x=>7ocSq#1YDe_gtxbaz{(#jDizy2>|tRR`;=6eiI7E1(nVMG@^7M?4fw z_Xf7ORqdZN3*@!O=hf64o1H!x(S(4A-yU?w1ZhUq==4Q$=JXnZ%C_^VehhED+vNS0 zKd$#7;*df9+S@{aF05b>eUJ5rmk*)+{AQP2+MNUd zO{y87L`!K?bic1qblH46?1{-%)fY(psqpM_2#pl_{?JB{^O>Z{;hH`k2Y590IDQ@6jO$zfie4tzUl1V#yN{5@z1-G`Yc2znP{YV1Zzf5Ajt#GYuCEi9?pE8bt^ZOM8?Olf07LOd4LxF^4Tq-F}5Y z-w02!cG`yKb2t_AmD}7TqOKhAMi5q!kOKZCjbmassW2*Z97WEtJ52@0+=T4s0WSRT zuS(+nu}t#P$cQ8H%#(4df|XYvQxV+{6ZPyPA16>^x{b!*gbWG8D`Ple^r)qG=o}W$ zD<}rhkKaO51qIn8Gf$}Xj2eogJ=()R(GuYBS~MOpp$IU*lh5bsq!bAZ!wDPyT319F zau|x2aJ$28Z9##+PJV7MHv+zswKv?b*s^hpV$^y%)>mS6aB4dn%beX+j{0OY0u}&d zZH|Z`n^RbyNzY|&7SfQDmtB~d%U-DJ6=nH!lbnxq-s{&F5u-!fHccm&oVTr zckag&X+pw@Z4Ezn%74e%Nn;!Jda3WOiQuE7>ha*%&KBpOqK>GePuBuf)X& zB^j%J5M40H&_yI+O8uGGtF=5U)7|;JrA>wXK!jr$BOvXrw;_iSOtWvX(JjWohh*ilm4wG z-$>;mqIg3QByF%9Sa}U3paUGFP%h+GmNL&-ZfnU;%k6H)Vc!lQr{!elNRNv4X6XP) zhu}@kTE@r|dRQ|*a5|n3_uTi=!-8{$k80t42@00D6MG7APgbVdLVvZ`A-)+_p{a2T zG51>PF(-JRA!QEpm)f*bm1Zb>;&pw;@5&unghJ zt+ekrK&%^z%g@T!^7VX0u9dRbl-={f{-Ao5F*toV%S!hHM8l`$T)>r?UVS>q@iAI( zl?ohJjebu5yVdcue13N+0Trdmt3`fy4w#-XkqQQ}0~=jv(Q-EZqIfvqw&i zZQA(swO+xcxOin)(kr)n6_l#Cz36p=c!xDx7frmjq%?dv{cp3&h8MLJl!QFS68Hu- zD#Ni|;s2k6zJk@zfJc&GdKi<)NFfOhzn#-1c03f#F{ndorkH>|8jHY%8V!`o3oCjwi^ zSMV*?pVY##tNKqtAGhj`%8I|0Iy0sb>Rd1_NOX4lYFc|Ap@!vlI&0{;IKircZ^{xAe6y}S(~0=gay&=nk`pMU)PaY{%t73mR-w%-xU8ML z8)>sGcM1^?6Jm3W+6f&mgI!nFjIzh9F&yt|Dcn0(RFH*?7bWVYNOlm&YJFddlPq!d zhsJGI{dha_;>p8{-2||U<|c)X0b!M1i8~JTXxB6J4KRyBY&31;r>>RSNg;wW5M!x1 zT#qcT7Q9?vf9y6luSSI}$?tcTHUVLFpCEJsTzkjm=)OIdU*vecUy3b&Fs*DL%muoe z<9Ka&%B@M7fMTABTw}oAVL$*GazU20cxn;{fYI9Hfa8PAbf@bxp}EC>Di8l(;1sOm za;Ah3v-5~=^Qt&H=;yl44T6KY1CW62>qs8n_;Bl7uW^yuKrna45cfaR@#&O$;$ zfbQ6Ut`lTMBw&I0CoE8vy~XL*MjqKODBq1S-waCAj<4fvrthsK5DGCeeiA-XTya1} zTf|L)x+2EnI3i6{*tX8uZdv`2_|TW9(=8^0ZG?jAMs~&Jzm+$+-s= zh#>5hUAv8p)&>FXn8nihZvaY+L&OLFir#wgqpl{s?t3$DO^Ie?$z;Rh$FXlwj8L{h zW4;)W5j(uXZi%@r4JEiyE1R(`6<%?Ih1VhZ7ZgkCY|neHDJxS~NNW^~YP`Vv2m((> zOyrsNXOpgi38h!mJg#SWU$0z%x?iaEg-_KHVhvv_8?OT-bpwQI!5P8xz@BiQ0%8#ZtI%G?ppe@^3xOrO z>y>A*eE@<#ngUU$!80U^09Z|R?YH?2=m1)egOk;iXPH;)*$-+;1)FekW8ythl|9i? z4R*4a%1*ep#%E*40xNsPa3xkgMh=F7L+^e%c{WWXrI{bT5k;i(=lt!51ssnwq&p6N zXRL})-u9FlWsZ6Ep_ST;7yv{gNythjlECLde2W>$Uder>da-x>9bHR`d}v8)aOShR2v*C41E@Jw%qy+2ZuhWC@ZDKLV>3m zg7!WfD!n}!Ooc&>9PU!4!*J2)bE~)GRz6h*qSDc-cQ^QthZ^EWQqd&-?ANTf)%dg0*o!?%<_IDEgADF)BQ!6>We#j6$kXg- z=~}c;3n+c(oCUjf-Wx#)Y%_)C;q+rH6=n~|XJh6ufN;iKOw4P>i- z-GH=M6rHh{jA7oa>@_&y2<%$Xe!yNNAG=sZ_3Rq{pYInMJTS>^-s++sW3;~yWuwR) znwRyspZazYn8ZjlK4dw_hW-P4F*RKKa}=J6X@m&}_l>}F@sOQ#YXEnRzMPjha|EdY(u?#! z2~|g0HKT|DJldYuO_q^06JV8u2OsZZ;{xBg*~X{(w! zSvsyBZ||0y$wu8-m^E+uln}WgMmLF3ee*W~6KQK=lOs2fy&rsJh(k?tWF z5nmzmAz@c4VTu~FJ0>L4tQXC7^kDo#1A+FFv6HagE>cZF(Js)bef$u!qCFF&JJy6$ z^8~Z&`>loq{~qmS$ciG*rXxKU;Zf&cPZR!&^-6PVol|i@MTka2R*G` z!aQj0q+I6+nve2m@+}`B0+5?C*gD$X`=bi>q?~PySyswkXkl=DR!*lt2Nuk|`_@96w9c!= z+sj`PNPJdaL7fEv=((?VzwS;3W*LYGq*^hybGwnZYRPovqX;@k5oQt^TU~(J6&zqB z$cnFweTB01+<~*{8zOUw!$P0Zc9X%Ll2Bd1 zP`0vfxRz*Vj9xZI2qL)Fj9$}t6D;9{iHnY@>v;(2t5>lMY-nxmjbDXeWFU)CYdOf) zS|745$8S8}H(kiH=Lh}1iweek4Q#a=ZXSJ{$|J$oQ?a))#sOA<3zfqfbAy{<{GLq` z>Yr*~mMRSpJ_YvBxG+QTMb!CFZIIZOIm*(%$f-Lf$$(>Ra1A8iurMTt63|?hw>3ZOCZGP$-G3#Fp8pA2=6yPViedd%&MJq>TR9?srUcF<`H1(Q|3b&A~XbdgD zs{O`d_Y<$k%>IRMQflW{XV;YGVv<@Gm7(tZ!h$q9sTd%0vJfvz@zYi1@DsdpZZA=E zv1RoJv=QsA&408*BtWlKu5!^n^-)I^VyV_?4Nf8U@G{G2ZWCw7gR%?FTV>-NL-Dhw zBBv32)NYo27|Z!%%SJj`X@ceXG-UZCWVRLa=1yq`56|GK&MA-R&2Dyf9hxKYnf00K zBIo~=z;BmSk`LpLm2!b#TXL$+5Q@v^q8Tw;SS8H=bl70%noOH(1bfuOIm*n@u2Sg1 z79m32`3j$T>umQ+)XSH`=m6Gjj))frSAb}FB3j|wYa%26T(FduBKeK(3F}j-buBS_ z-o@MJ2Q1YJ9izx-ZeA&0r8VuDvvQ`9+kx0d_=p;EbRJ&BCl4U_cEz#vP(jbp-vP=) z0%~u^bGwyGqH$>Snp_w~}k8Vee zQ!J*6Or?a*BqGFhS?MyRWCK<=8;MWkt@WR~AYSzzCz$)K=854Wzn84oPP<9ut-aO@ zoSmO2?2Di_x~fN64o9t+80XA4Vag{QEZQGGMkNHdx7!(7>bdbQ4{qkZwQ<->)vi{2 z^`~;;#igQ&?8hKsaM$13xj}MHG@=ajedh8DW}*F^o#Q*g9f=Y2v7+@i&x2mz_7cWh z(q?Wy;&s5DgqAseE@4!c4OGphgyslJm$&N|pdDR(wG+-JYfQ)Zjzh{YMPRxmi9pL+dp#G{Ql$nd3z@>OzF`|2oN`XlwkCJ`%<{cKu4sn z)Ke-}iP24tuf5&LZ&yE+n(1m-$f#TJ&*4BOR5GB4^M&(wa%)4j zSh36HUcx>}Q-AvWNX-^r1%*U`rl==#l0BfW7Y$&c9vS2e26y89A4S!tZZ!RrRUN!# zM9C2%uUR!Dnmawm*T`M{+=J?#e%}8j!kYWe@K42XYUFkR&RNAaB2L!Ye)|Rf_UtJ- z!|o83Pr~~M?gN*M72dR=8xWf0cNehUR?nbbfjXH=)F-4AqzFArz`?=2yhQpvcK|O? d{$1%1;U>9UaS4&oynuJ$6y?-pYo*OX{s&BzWFi0n literal 0 HcmV?d00001 diff --git a/observability/local/docker-compose.yml b/observability/local/docker-compose.yml index df097fa..8ea026a 100644 --- a/observability/local/docker-compose.yml +++ b/observability/local/docker-compose.yml @@ -60,3 +60,33 @@ services: interval: 10s timeout: 5s retries: 5 + + log-sentinel: + build: ./log-sentinel + depends_on: + loki: + condition: service_healthy + ports: + - "8081:8081" + environment: + - LOKI_URL=http://loki:3100 + - GRAFANA_URL=http://grafana:3000 + - GRAFANA_USER=${GRAFANA_ADMIN_USER:-admin} + - GRAFANA_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin} + - OLLAMA_URL=http://host.docker.internal:11434 + - OLLAMA_MODEL_FAST=deepseek-r1:8b + - OLLAMA_MODEL_DEEP=llama3.3:70b-instruct-q4_K_M + - SENTINEL_POLL_INTERVAL_SEC=60 + - SENTINEL_LOOKBACK_SEC=300 + - SENTINEL_T2_ENABLED=true + - SENTINEL_T2_PROACTIVE_INTERVAL_SEC=300 + - SENTINEL_DEDUP_WINDOW_SEC=300 + - SENTINEL_SENTRY_DSN=${SENTINEL_SENTRY_DSN:-} + - SIMSTEWARD_LOG_ENV=${SIMSTEWARD_LOG_ENV:-local} + volumes: + - ${GRAFANA_STORAGE_PATH:-S:/sim-steward-grafana-storage}/log-sentinel:/data + healthcheck: + test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8081/health').read()"] + interval: 10s + timeout: 5s + retries: 5 diff --git a/observability/local/grafana/provisioning/dashboards/claude-cache-context.json b/observability/local/grafana/provisioning/dashboards/claude-cache-context.json new file mode 100644 index 0000000..71ce6d8 --- /dev/null +++ b/observability/local/grafana/provisioning/dashboards/claude-cache-context.json @@ -0,0 +1,844 @@ +{ + "id": null, + "uid": "claude-cache-context", + "title": "Claude Code — Cache & Context Health", + "description": "Cache hit rates, context pressure signals, per-turn token burn, and token budget analysis for Claude Code sessions.", + "tags": [ + "claude-code", + "cache", + "context", + "observability" + ], + "timezone": "browser", + "editable": true, + "graphTooltip": 1, + "time": { + "from": "now-7d", + "to": "now" + }, + "refresh": "30s", + "schemaVersion": 39, + "fiscalYearStartMonth": 0, + "liveNow": false, + "style": "dark", + "templating": { + "list": [ + { + "name": "model", + "label": "Model", + "type": "query", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "query": "{app=\"claude-token-metrics\"} | json model", + "regex": "model\":\"([^\"]+)", + "refresh": 2, + "includeAll": true, + "multi": true, + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "sort": 1 + }, + { + "name": "project", + "label": "Project", + "type": "query", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "query": "{app=\"claude-token-metrics\"} | json project", + "regex": "project\":\"([^\"]+)", + "refresh": 2, + "includeAll": true, + "multi": true, + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "sort": 1 + }, + { + "name": "effort", + "label": "Effort", + "type": "query", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "query": "{app=\"claude-token-metrics\"} | json effort", + "regex": "effort\":\"([^\"]+)", + "refresh": 2, + "includeAll": true, + "multi": true, + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "sort": 1 + } + ] + }, + "panels": [ + { + "type": "row", + "title": "Cache Health Summary", + "collapsed": false, + "gridPos": { "x": 0, "y": 0, "w": 24, "h": 1 } + }, + { + "id": 1, + "title": "Cache Hit Rate", + "type": "gauge", + "gridPos": { "x": 0, "y": 1, "w": 6, "h": 5 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "legendFormat": "Cache Read", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "legendFormat": "Input", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { "type": "__expr__", "uid": "__expr__" }, + "type": "math", + "expression": "$A / ($A + $B) * 100", + "hide": false + } + ], + "options": { + "orientation": "auto", + "showThresholdLabels": false, + "showThresholdMarkers": true, + "reduceOptions": { + "values": false, + "calcs": ["lastNotNull"], + "fields": "" + } + }, + "fieldConfig": { + "defaults": { + "unit": "percent", + "min": 0, + "max": 100, + "noValue": "0", + "color": { "mode": "thresholds" }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "value": null, "color": "#F2495C" }, + { "value": 30, "color": "#FF9830" }, + { "value": 50, "color": "#73BF69" } + ] + } + }, + "overrides": [] + } + }, + { + "id": 2, + "title": "Cache Read Tokens", + "type": "stat", + "gridPos": { "x": 6, "y": 1, "w": 6, "h": 5 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "legendFormat": "Cache Read", + "queryType": "range" + } + ], + "options": { + "colorMode": "background-gradient", + "graphMode": "none", + "textMode": "value", + "justifyMode": "center", + "orientation": "auto", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { + "values": false, + "calcs": ["sum"], + "fields": "" + } + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { "mode": "fixed", "fixedColor": "#B877D9" }, + "thresholds": { + "mode": "absolute", + "steps": [{ "value": null, "color": "#B877D9" }] + } + }, + "overrides": [] + } + }, + { + "id": 3, + "title": "Cache Creation Tokens", + "type": "stat", + "gridPos": { "x": 12, "y": 1, "w": 6, "h": 5 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "legendFormat": "Cache Creation", + "queryType": "range" + } + ], + "options": { + "colorMode": "background-gradient", + "graphMode": "none", + "textMode": "value", + "justifyMode": "center", + "orientation": "auto", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { + "values": false, + "calcs": ["sum"], + "fields": "" + } + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { "mode": "fixed", "fixedColor": "#FF9830" }, + "thresholds": { + "mode": "absolute", + "steps": [{ "value": null, "color": "#FF9830" }] + } + }, + "overrides": [] + } + }, + { + "id": 4, + "title": "Write:Read Ratio", + "type": "stat", + "gridPos": { "x": 18, "y": 1, "w": 6, "h": 5 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "legendFormat": "Creation", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "legendFormat": "Read", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { "type": "__expr__", "uid": "__expr__" }, + "type": "math", + "expression": "$A / $B", + "hide": false + } + ], + "options": { + "colorMode": "background-gradient", + "graphMode": "none", + "textMode": "value", + "justifyMode": "center", + "orientation": "auto", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { + "values": false, + "calcs": ["lastNotNull"], + "fields": "" + } + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "decimals": 2, + "color": { "mode": "thresholds" }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "value": null, "color": "#73BF69" }, + { "value": 1, "color": "#FF9830" }, + { "value": 2, "color": "#F2495C" } + ] + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Cache Efficiency Over Time", + "collapsed": false, + "gridPos": { "x": 0, "y": 6, "w": 24, "h": 1 } + }, + { + "id": 5, + "title": "Cache Hit Rate Trend", + "type": "timeseries", + "gridPos": { "x": 0, "y": 7, "w": 24, "h": 8 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum by (session_id) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__interval]))", + "legendFormat": "Cache Read — {{session_id}}", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum by (session_id) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__interval]))", + "legendFormat": "Input — {{session_id}}", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { "type": "__expr__", "uid": "__expr__" }, + "type": "math", + "expression": "$A / ($A + $B) * 100", + "hide": false + } + ], + "options": { + "legend": { "displayMode": "list", "placement": "bottom" }, + "tooltip": { "mode": "multi", "sort": "desc" } + }, + "fieldConfig": { + "defaults": { + "unit": "percent", + "min": 0, + "max": 100, + "noValue": "0", + "color": { "mode": "palette-classic" }, + "custom": { + "drawStyle": "points", + "pointSize": 6, + "lineWidth": 0, + "fillOpacity": 0, + "showPoints": "always", + "axisLabel": "Hit Rate %", + "thresholdsStyle": { + "mode": "line" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "value": null, "color": "#F2495C" }, + { "value": 50, "color": "#73BF69" } + ] + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Cache by Model & Effort", + "collapsed": false, + "gridPos": { "x": 0, "y": 15, "w": 24, "h": 1 } + }, + { + "id": 6, + "title": "Cache Hit Rate by Model", + "type": "barchart", + "gridPos": { "x": 0, "y": 16, "w": 12, "h": 8 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "avg by (model) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "legendFormat": "Cache Read — {{model}}", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "avg by (model) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "legendFormat": "Input — {{model}}", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { "type": "__expr__", "uid": "__expr__" }, + "type": "math", + "expression": "$A / ($A + $B) * 100", + "hide": false + } + ], + "options": { + "orientation": "horizontal", + "showValue": "always", + "barWidth": 0.8, + "groupWidth": 0.7, + "legend": { "displayMode": "list", "placement": "bottom" }, + "tooltip": { "mode": "single" } + }, + "fieldConfig": { + "defaults": { + "unit": "percent", + "min": 0, + "max": 100, + "noValue": "0", + "color": { "mode": "palette-classic" }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "value": null, "color": "#F2495C" }, + { "value": 30, "color": "#FF9830" }, + { "value": 50, "color": "#73BF69" } + ] + } + }, + "overrides": [] + } + }, + { + "id": 7, + "title": "Cache Hit Rate by Effort", + "type": "barchart", + "gridPos": { "x": 12, "y": 16, "w": 12, "h": 8 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "avg by (effort) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "legendFormat": "Cache Read — {{effort}}", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "avg by (effort) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "legendFormat": "Input — {{effort}}", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { "type": "__expr__", "uid": "__expr__" }, + "type": "math", + "expression": "$A / ($A + $B) * 100", + "hide": false + } + ], + "options": { + "orientation": "horizontal", + "showValue": "always", + "barWidth": 0.8, + "groupWidth": 0.7, + "legend": { "displayMode": "list", "placement": "bottom" }, + "tooltip": { "mode": "single" } + }, + "fieldConfig": { + "defaults": { + "unit": "percent", + "min": 0, + "max": 100, + "noValue": "0", + "color": { "mode": "palette-classic" }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "value": null, "color": "#F2495C" }, + { "value": 30, "color": "#FF9830" }, + { "value": 50, "color": "#73BF69" } + ] + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Context Pressure", + "collapsed": false, + "gridPos": { "x": 0, "y": 24, "w": 24, "h": 1 } + }, + { + "id": 8, + "title": "Compactions", + "type": "stat", + "gridPos": { "x": 0, "y": 25, "w": 8, "h": 7 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json hook_type, model, project, effort | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | hook_type=\"pre-compact\" [$__range]))", + "legendFormat": "Compactions", + "queryType": "range" + } + ], + "options": { + "colorMode": "background-gradient", + "graphMode": "none", + "textMode": "value", + "justifyMode": "center", + "orientation": "auto", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { + "values": false, + "calcs": ["sum"], + "fields": "" + } + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { "mode": "thresholds" }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "value": null, "color": "#73BF69" }, + { "value": 5, "color": "#F2495C" } + ] + } + }, + "overrides": [] + } + }, + { + "id": 9, + "title": "Compaction Rate", + "type": "timeseries", + "gridPos": { "x": 8, "y": 25, "w": 8, "h": 7 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json hook_type, model, project, effort | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | hook_type=\"pre-compact\" [$__interval]))", + "legendFormat": "Compactions", + "queryType": "range" + } + ], + "options": { + "legend": { "displayMode": "list", "placement": "bottom" }, + "tooltip": { "mode": "single" } + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { "mode": "fixed", "fixedColor": "#FF9830" }, + "custom": { + "drawStyle": "bars", + "barAlignment": 0, + "fillOpacity": 60, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "axisLabel": "Compactions" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "value": null, "color": "#73BF69" }, + { "value": 5, "color": "#F2495C" } + ] + } + }, + "overrides": [] + } + }, + { + "id": 10, + "title": "Avg Turns Before Compaction", + "type": "stat", + "gridPos": { "x": 16, "y": 25, "w": 8, "h": 7 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "avg(sum_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json hook_type, model, project, effort, compaction_count, assistant_turns | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | hook_type=\"pre-compact\" | compaction_count > 0 | unwrap assistant_turns [$__range]))", + "legendFormat": "Avg Turns", + "queryType": "range" + } + ], + "options": { + "colorMode": "background-gradient", + "graphMode": "none", + "textMode": "value", + "justifyMode": "center", + "orientation": "auto", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { + "values": false, + "calcs": ["mean"], + "fields": "" + } + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "decimals": 1, + "color": { "mode": "fixed", "fixedColor": "#8AB8FF" }, + "thresholds": { + "mode": "absolute", + "steps": [{ "value": null, "color": "#8AB8FF" }] + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Per-Turn Token Burn", + "collapsed": false, + "gridPos": { "x": 0, "y": 32, "w": 24, "h": 1 } + }, + { + "id": 11, + "title": "Per-Turn Token Flow", + "type": "timeseries", + "gridPos": { "x": 0, "y": 33, "w": 12, "h": 8 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json model, project, effort, running_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap running_input_tokens [$__interval]))", + "legendFormat": "Input Tokens", + "queryType": "range" + }, + { + "refId": "B", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json model, project, effort, running_output_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap running_output_tokens [$__interval]))", + "legendFormat": "Output Tokens", + "queryType": "range" + }, + { + "refId": "C", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json model, project, effort, running_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap running_cache_read_tokens [$__interval]))", + "legendFormat": "Cache Read Tokens", + "queryType": "range" + } + ], + "options": { + "legend": { "displayMode": "list", "placement": "bottom" }, + "tooltip": { "mode": "multi", "sort": "desc" } + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { "mode": "palette-classic" }, + "custom": { + "drawStyle": "line", + "lineWidth": 1, + "fillOpacity": 30, + "pointSize": 5, + "showPoints": "never", + "stacking": { "mode": "normal", "group": "A" }, + "axisLabel": "Tokens" + } + }, + "overrides": [] + } + }, + { + "id": 12, + "title": "Turn-by-Turn Output Burst", + "type": "timeseries", + "gridPos": { "x": 12, "y": 33, "w": 12, "h": 8 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json model, project, effort, turn_output_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap turn_output_tokens [$__interval]))", + "legendFormat": "Output per Turn", + "queryType": "range" + } + ], + "options": { + "legend": { "displayMode": "list", "placement": "bottom" }, + "tooltip": { "mode": "single" } + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { "mode": "fixed", "fixedColor": "#FF6D00" }, + "custom": { + "drawStyle": "bars", + "barAlignment": 0, + "fillOpacity": 50, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "axisLabel": "Output Tokens" + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Token Budget", + "collapsed": false, + "gridPos": { "x": 0, "y": 41, "w": 24, "h": 1 } + }, + { + "id": 13, + "title": "Token Type Distribution", + "type": "piechart", + "gridPos": { "x": 0, "y": 42, "w": 12, "h": 7 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "legendFormat": "Input", + "queryType": "range" + }, + { + "refId": "B", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_output_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__range]))", + "legendFormat": "Output", + "queryType": "range" + }, + { + "refId": "C", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "legendFormat": "Cache Read", + "queryType": "range" + }, + { + "refId": "D", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "legendFormat": "Cache Creation", + "queryType": "range" + } + ], + "options": { + "pieType": "donut", + "displayLabels": ["name", "percent"], + "legend": { "displayMode": "table", "placement": "right", "values": ["value", "percent"] }, + "tooltip": { "mode": "single" }, + "reduceOptions": { + "values": false, + "calcs": ["sum"], + "fields": "" + } + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { "mode": "palette-classic" } + }, + "overrides": [] + } + }, + { + "id": 14, + "title": "Token Efficiency", + "type": "gauge", + "gridPos": { "x": 12, "y": 42, "w": 12, "h": 7 }, + "datasource": { "type": "loki", "uid": "loki_local" }, + "targets": [ + { + "refId": "A", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_output_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__range]))", + "legendFormat": "Output", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "legendFormat": "Input", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "legendFormat": "Cache Creation", + "queryType": "range", + "hide": true + }, + { + "refId": "D", + "datasource": { "type": "__expr__", "uid": "__expr__" }, + "type": "math", + "expression": "$A / ($B + $C)", + "hide": false + } + ], + "options": { + "orientation": "auto", + "showThresholdLabels": false, + "showThresholdMarkers": true, + "reduceOptions": { + "values": false, + "calcs": ["lastNotNull"], + "fields": "" + } + }, + "fieldConfig": { + "defaults": { + "unit": "percentunit", + "min": 0, + "max": 1, + "noValue": "0", + "decimals": 2, + "color": { "mode": "thresholds" }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "value": null, "color": "#F2495C" }, + { "value": 0.2, "color": "#FF9830" }, + { "value": 0.5, "color": "#73BF69" } + ] + } + }, + "overrides": [] + } + } + ] +} diff --git a/observability/local/grafana/provisioning/dashboards/claude-token-cost.json b/observability/local/grafana/provisioning/dashboards/claude-token-cost.json new file mode 100644 index 0000000..b32ca30 --- /dev/null +++ b/observability/local/grafana/provisioning/dashboards/claude-token-cost.json @@ -0,0 +1,1574 @@ +{ + "id": null, + "uid": "claude-token-cost", + "title": "Claude Code — Token & Cost Intelligence", + "description": "Token usage, cost tracking, cache economics, and model efficiency for Claude Code sessions.", + "tags": [ + "claude-code", + "tokens", + "cost", + "observability" + ], + "timezone": "browser", + "editable": true, + "graphTooltip": 1, + "time": { + "from": "now-7d", + "to": "now" + }, + "refresh": "30s", + "schemaVersion": 39, + "fiscalYearStartMonth": 0, + "liveNow": false, + "style": "dark", + "templating": { + "list": [ + { + "name": "model", + "label": "Model", + "type": "query", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "query": "{app=\"claude-token-metrics\"} | json", + "regex": "\"model\":\"([^\"]+)\"", + "refresh": 2, + "includeAll": true, + "multi": true, + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "sort": 1 + }, + { + "name": "project", + "label": "Project", + "type": "query", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "query": "{app=\"claude-token-metrics\"} | json", + "regex": "\"project\":\"([^\"]+)\"", + "refresh": 2, + "includeAll": true, + "multi": true, + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "sort": 1 + }, + { + "name": "effort", + "label": "Effort", + "type": "query", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "query": "{app=\"claude-token-metrics\"} | json", + "regex": "\"effort\":\"([^\"]+)\"", + "refresh": 2, + "includeAll": true, + "multi": true, + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "sort": 1 + } + ] + }, + "panels": [ + { + "type": "row", + "title": "Spend Summary", + "collapsed": false, + "gridPos": { + "x": 0, + "y": 0, + "w": 24, + "h": 1 + } + }, + { + "id": 1, + "title": "Total Spend", + "type": "stat", + "gridPos": { + "x": 0, + "y": 0, + "w": 6, + "h": 5 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", + "legendFormat": "Total Spend", + "queryType": "range" + } + ], + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "fieldConfig": { + "defaults": { + "noValue": "$0.00", + "unit": "currencyUSD", + "color": { + "mode": "thresholds" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "green" + }, + { + "value": 5, + "color": "yellow" + }, + { + "value": 20, + "color": "red" + } + ] + } + }, + "overrides": [] + } + }, + { + "id": 2, + "title": "Sessions", + "type": "stat", + "gridPos": { + "x": 6, + "y": 0, + "w": 6, + "h": 5 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json | hook_type=\"session-end\" | project=~\"$project\" [$__range])", + "legendFormat": "Sessions", + "queryType": "range" + } + ], + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { + "mode": "fixed", + "fixedColor": "blue" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "blue" + } + ] + } + }, + "overrides": [] + } + }, + { + "id": 3, + "title": "Avg Cost / Session", + "type": "stat", + "gridPos": { + "x": 12, + "y": 0, + "w": 6, + "h": 5 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", + "legendFormat": "Total Spend", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json | hook_type=\"session-end\" | project=~\"$project\" [$__range])", + "legendFormat": "Sessions", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "type": "math", + "expression": "$A / $B", + "hide": false + } + ], + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "fieldConfig": { + "defaults": { + "noValue": "$0.00", + "unit": "currencyUSD", + "color": { + "mode": "thresholds" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "green" + }, + { + "value": 5, + "color": "yellow" + }, + { + "value": 20, + "color": "red" + } + ] + } + }, + "overrides": [] + } + }, + { + "id": 4, + "title": "Projected Monthly", + "type": "stat", + "gridPos": { + "x": 18, + "y": 0, + "w": 6, + "h": 5 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", + "legendFormat": "Total Spend", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "type": "math", + "expression": "$A * 2592000 / ${__range_s}", + "hide": false + } + ], + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "fieldConfig": { + "defaults": { + "noValue": "$0.00", + "unit": "currencyUSD", + "color": { + "mode": "fixed", + "fixedColor": "orange" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "orange" + } + ] + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Spend Trend", + "collapsed": false, + "gridPos": { + "x": 0, + "y": 5, + "w": 24, + "h": 1 + } + }, + { + "id": 5, + "title": "Cost Over Time", + "type": "timeseries", + "gridPos": { + "x": 0, + "y": 5, + "w": 24, + "h": 8 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__interval]))", + "legendFormat": "Cost", + "queryType": "range" + } + ], + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "fieldConfig": { + "defaults": { + "unit": "currencyUSD", + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "bars", + "barAlignment": 0, + "fillOpacity": 80, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "mode": "none", + "group": "A" + }, + "axisCenteredZero": false, + "axisColorMode": "text", + "scaleDistribution": { + "type": "linear" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "green" + } + ] + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Where Money Goes", + "collapsed": false, + "gridPos": { + "x": 0, + "y": 13, + "w": 24, + "h": 1 + } + }, + { + "id": 6, + "title": "Cost by Model", + "type": "piechart", + "gridPos": { + "x": 0, + "y": 13, + "w": 8, + "h": 8 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum by (model) (sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", + "legendFormat": "{{model}}", + "queryType": "range" + } + ], + "options": { + "pieType": "donut", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "legend": { + "displayMode": "list", + "placement": "right", + "values": ["value", "percent"] + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "fieldConfig": { + "defaults": { + "unit": "currencyUSD", + "color": { + "mode": "palette-classic" + } + }, + "overrides": [] + } + }, + { + "id": 7, + "title": "Cost by Project", + "type": "piechart", + "gridPos": { + "x": 8, + "y": 13, + "w": 8, + "h": 8 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum by (project) (sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", + "legendFormat": "{{project}}", + "queryType": "range" + } + ], + "options": { + "pieType": "donut", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "legend": { + "displayMode": "list", + "placement": "right", + "values": ["value", "percent"] + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "fieldConfig": { + "defaults": { + "unit": "currencyUSD", + "color": { + "mode": "palette-classic" + } + }, + "overrides": [] + } + }, + { + "id": 8, + "title": "Turns by Effort", + "type": "piechart", + "gridPos": { + "x": 16, + "y": 13, + "w": 8, + "h": 8 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum by (effort) (count_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" [$__range]))", + "legendFormat": "{{effort}}", + "queryType": "range" + } + ], + "options": { + "pieType": "donut", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "legend": { + "displayMode": "list", + "placement": "right", + "values": ["value", "percent"] + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Cache Economics", + "collapsed": false, + "gridPos": { + "x": 0, + "y": 21, + "w": 24, + "h": 1 + } + }, + { + "id": 9, + "title": "Cache Hit Rate", + "type": "gauge", + "gridPos": { + "x": 0, + "y": 21, + "w": 8, + "h": 7 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "legendFormat": "Cache Read", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "legendFormat": "Fresh Input", + "queryType": "range", + "hide": true + }, + { + "refId": "D", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "legendFormat": "Cache Creation", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "type": "math", + "expression": "$A / ($A + $B + $D) * 100", + "hide": false + } + ], + "options": { + "orientation": "auto", + "showThresholdLabels": false, + "showThresholdMarkers": true, + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + } + }, + "fieldConfig": { + "defaults": { + "unit": "percent", + "min": 0, + "max": 100, + "color": { + "mode": "thresholds" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "red" + }, + { + "value": 30, + "color": "yellow" + }, + { + "value": 50, + "color": "green" + } + ] + } + }, + "overrides": [] + } + }, + { + "id": 10, + "title": "Cache Read Tokens", + "type": "stat", + "gridPos": { + "x": 8, + "y": 21, + "w": 8, + "h": 7 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "legendFormat": "Cache Read Tokens", + "queryType": "range" + } + ], + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "unit": "short", + "color": { + "mode": "fixed", + "fixedColor": "green" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "green" + } + ] + } + }, + "overrides": [] + } + }, + { + "id": 11, + "title": "Cache Trend", + "type": "timeseries", + "gridPos": { + "x": 16, + "y": 21, + "w": 8, + "h": 7 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__interval]))", + "legendFormat": "Cache Read", + "queryType": "range" + }, + { + "refId": "B", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__interval]))", + "legendFormat": "Input Tokens", + "queryType": "range" + } + ], + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "lineInterpolation": "smooth", + "fillOpacity": 20, + "lineWidth": 2, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "mode": "none", + "group": "A" + }, + "axisCenteredZero": false, + "axisColorMode": "text", + "scaleDistribution": { + "type": "linear" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "green" + } + ] + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Token Flow", + "collapsed": false, + "gridPos": { + "x": 0, + "y": 28, + "w": 24, + "h": 1 + } + }, + { + "id": 12, + "title": "Token Distribution", + "type": "piechart", + "gridPos": { + "x": 0, + "y": 28, + "w": 8, + "h": 7 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "legendFormat": "Input", + "queryType": "range" + }, + { + "refId": "B", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__range]))", + "legendFormat": "Output", + "queryType": "range" + }, + { + "refId": "C", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "legendFormat": "Cache Read", + "queryType": "range" + }, + { + "refId": "D", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "legendFormat": "Cache Creation", + "queryType": "range" + } + ], + "options": { + "pieType": "pie", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "legend": { + "displayMode": "list", + "placement": "right", + "values": ["value", "percent"] + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + } + }, + "overrides": [] + } + }, + { + "id": 13, + "title": "Total Output Tokens", + "type": "stat", + "gridPos": { + "x": 8, + "y": 28, + "w": 5, + "h": 7 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__range]))", + "legendFormat": "Output Tokens", + "queryType": "range" + } + ], + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { + "mode": "fixed", + "fixedColor": "purple" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "purple" + } + ] + } + }, + "overrides": [] + } + }, + { + "id": 14, + "title": "Output / Dollar", + "type": "stat", + "gridPos": { + "x": 13, + "y": 28, + "w": 5, + "h": 7 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__range]))", + "legendFormat": "Output Tokens", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", + "legendFormat": "Cost", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "type": "math", + "expression": "$A / $B", + "hide": false + } + ], + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { + "mode": "fixed", + "fixedColor": "green" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "green" + } + ] + } + }, + "overrides": [] + } + }, + { + "id": 15, + "title": "Avg Turns / Session", + "type": "stat", + "gridPos": { + "x": 18, + "y": 28, + "w": 6, + "h": 7 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap assistant_turns [$__range]))", + "legendFormat": "Total Turns", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json | hook_type=\"session-end\" | project=~\"$project\" [$__range])", + "legendFormat": "Sessions", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "type": "math", + "expression": "$A / $B", + "hide": false + } + ], + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "fieldConfig": { + "defaults": { + "noValue": "0", + "color": { + "mode": "fixed", + "fixedColor": "blue" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "blue" + } + ] + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Model Economics", + "collapsed": false, + "gridPos": { + "x": 0, + "y": 35, + "w": 24, + "h": 1 + } + }, + { + "id": 16, + "title": "Avg Cost per Model", + "type": "barchart", + "gridPos": { + "x": 0, + "y": 35, + "w": 12, + "h": 8 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "avg by (model) (avg_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", + "legendFormat": "{{model}}", + "queryType": "instant" + } + ], + "options": { + "orientation": "horizontal", + "barWidth": 0.8, + "groupWidth": 0.7, + "showValue": "auto", + "stacking": "none", + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": 0 + }, + "fieldConfig": { + "defaults": { + "unit": "currencyUSD", + "color": { + "mode": "palette-classic" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "green" + } + ] + } + }, + "overrides": [] + } + }, + { + "id": 17, + "title": "Output Tokens per Model", + "type": "barchart", + "gridPos": { + "x": 12, + "y": 35, + "w": 12, + "h": 8 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "avg by (model) (avg_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__range]))", + "legendFormat": "{{model}}", + "queryType": "instant" + } + ], + "options": { + "orientation": "horizontal", + "barWidth": 0.8, + "groupWidth": 0.7, + "showValue": "auto", + "stacking": "none", + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": 0 + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "green" + } + ] + } + }, + "overrides": [] + } + }, + { + "type": "row", + "title": "Session Economics", + "collapsed": false, + "gridPos": { + "x": 0, + "y": 43, + "w": 24, + "h": 1 + } + }, + { + "id": 18, + "title": "Session Breakdown", + "type": "table", + "gridPos": { + "x": 0, + "y": 43, + "w": 24, + "h": 10 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "{app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\"", + "legendFormat": "", + "queryType": "range" + } + ], + "transformations": [ + { + "id": "extractFields", + "options": { + "source": "Line", + "format": "json", + "replace": false, + "keepTime": true + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "session_id": { + "aggregations": [], + "operation": "groupby" + }, + "model": { + "aggregations": ["lastNotNull"], + "operation": "aggregate" + }, + "effort": { + "aggregations": ["lastNotNull"], + "operation": "aggregate" + }, + "cost_usd": { + "aggregations": ["sum"], + "operation": "aggregate" + }, + "total_input_tokens": { + "aggregations": ["sum"], + "operation": "aggregate" + }, + "total_output_tokens": { + "aggregations": ["sum"], + "operation": "aggregate" + }, + "total_cache_read_tokens": { + "aggregations": ["sum"], + "operation": "aggregate" + }, + "assistant_turns": { + "aggregations": ["sum"], + "operation": "aggregate" + } + } + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Line": true, + "id": true, + "tsNs": true, + "labels": true, + "Time": true + }, + "indexByName": { + "session_id": 0, + "model (lastNotNull)": 1, + "effort (lastNotNull)": 2, + "cost_usd (sum)": 3, + "total_input_tokens (sum)": 4, + "total_output_tokens (sum)": 5, + "total_cache_read_tokens (sum)": 6, + "assistant_turns (sum)": 7 + }, + "renameByName": { + "session_id": "Session", + "model (lastNotNull)": "Model", + "effort (lastNotNull)": "Effort", + "cost_usd (sum)": "Cost (USD)", + "total_input_tokens (sum)": "Input Tokens", + "total_output_tokens (sum)": "Output Tokens", + "total_cache_read_tokens (sum)": "Cache Read", + "assistant_turns (sum)": "Turns" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Cost (USD)", + "desc": true + } + ] + } + } + ], + "options": { + "showHeader": true, + "cellHeight": "sm", + "footer": { + "show": true, + "reducer": ["sum"], + "countRows": false, + "fields": ["Cost (USD)", "Input Tokens", "Output Tokens", "Cache Read"] + }, + "sortBy": [ + { + "displayName": "Cost (USD)", + "desc": true + } + ] + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "green" + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Cost (USD)" + }, + "properties": [ + { + "id": "unit", + "value": "currencyUSD" + }, + { + "id": "decimals", + "value": 4 + }, + { + "id": "custom.cellOptions", + "value": { + "type": "color-background", + "mode": "gradient" + } + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "value": null, + "color": "green" + }, + { + "value": 1, + "color": "yellow" + }, + { + "value": 5, + "color": "red" + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Input Tokens" + }, + "properties": [ + { + "id": "unit", + "value": "locale" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Output Tokens" + }, + "properties": [ + { + "id": "unit", + "value": "locale" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Cache Read" + }, + "properties": [ + { + "id": "unit", + "value": "locale" + } + ] + } + ] + } + } + ] +} diff --git a/observability/local/grafana/provisioning/dashboards/claude-token-usage.json b/observability/local/grafana/provisioning/dashboards/claude-token-usage.json deleted file mode 100644 index fe8f1ad..0000000 --- a/observability/local/grafana/provisioning/dashboards/claude-token-usage.json +++ /dev/null @@ -1,893 +0,0 @@ -{ - "id": null, - "uid": "claude-token-usage", - "title": "Claude Code — Token Usage", - "description": "Token consumption, estimated cost, cache efficiency, and session trends across Claude Code sessions.", - "tags": ["claude-code", "tokens", "cost", "observability"], - "timezone": "browser", - "editable": true, - "graphTooltip": 1, - "time": { "from": "now-7d", "to": "now" }, - "refresh": "1m", - "schemaVersion": 39, - "fiscalYearStartMonth": 0, - "liveNow": false, - "style": "dark", - "templating": { - "list": [ - { - "name": "model", - "label": "Model", - "type": "query", - "datasource": { "type": "loki", "uid": "loki_local" }, - "query": "{app=\"claude-token-metrics\"} | json", - "regex": "\"model\":\"([^\"]+)\"", - "refresh": 2, - "includeAll": true, - "multi": true, - "allValue": ".*", - "current": { "text": "All", "value": "$__all" }, - "sort": 1 - }, - { - "name": "project", - "label": "Project", - "type": "query", - "datasource": { "type": "loki", "uid": "loki_local" }, - "query": "{app=\"claude-token-metrics\"} | json", - "regex": "\"project\":\"([^\"]+)\"", - "refresh": 2, - "includeAll": true, - "multi": true, - "allValue": ".*", - "current": { "text": "All", "value": "$__all" }, - "sort": 1 - }, - { - "name": "effort", - "label": "Effort", - "type": "query", - "datasource": { "type": "loki", "uid": "loki_local" }, - "query": "{app=\"claude-token-metrics\"} | json", - "regex": "\"effort\":\"([^\"]+)\"", - "refresh": 2, - "includeAll": true, - "multi": true, - "allValue": ".*", - "current": { "text": "All", "value": "$__all" }, - "sort": 1 - }, - { - "name": "session_id", - "label": "Session", - "type": "query", - "datasource": { "type": "loki", "uid": "loki_local" }, - "query": "{app=\"claude-token-metrics\"} | json", - "regex": "\"session_id\":\"([^\"]+)\"", - "refresh": 2, - "includeAll": true, - "multi": false, - "allValue": ".*", - "current": { "text": "All", "value": "$__all" }, - "sort": 0 - } - ] - }, - "panels": [ - { - "type": "row", - "title": "Cost Summary", - "collapsed": false, - "gridPos": { "x": 0, "y": 0, "w": 24, "h": 1 } - }, - { - "id": 1, - "title": "Output Tokens", - "description": "Total output (generated) tokens in the selected time range.", - "type": "stat", - "transparent": true, - "gridPos": { "x": 0, "y": 1, "w": 5, "h": 5 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_output_tokens [$__range]))", - "queryType": "range" - } - ], - "options": { - "colorMode": "background-gradient", - "graphMode": "area", - "textMode": "auto", - "wideLayout": true, - "justifyMode": "auto", - "orientation": "auto", - "text": { "titleSize": 12, "valueSize": 32 }, - "reduceOptions": { "values": false, "calcs": ["sum"], "fields": "" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "fixed", "fixedColor": "#5794F2" }, - "unit": "short", - "decimals": 0, - "thresholds": { "mode": "absolute", "steps": [{ "value": null, "color": "#5794F2" }] } - }, - "overrides": [] - } - }, - { - "id": 2, - "title": "Est. Cost (USD)", - "description": "Estimated total spend based on Anthropic public pricing. Cache reads are priced at 10% of input rate.", - "type": "stat", - "transparent": true, - "gridPos": { "x": 5, "y": 1, "w": 5, "h": 5 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap cost_usd [$__range]))", - "queryType": "range" - } - ], - "options": { - "colorMode": "background-gradient", - "graphMode": "area", - "textMode": "auto", - "wideLayout": true, - "justifyMode": "auto", - "orientation": "auto", - "text": { "titleSize": 12, "valueSize": 32 }, - "reduceOptions": { "values": false, "calcs": ["sum"], "fields": "" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "thresholds" }, - "unit": "currencyUSD", - "decimals": 2, - "thresholds": { - "mode": "absolute", - "steps": [ - { "value": null, "color": "#73BF69" }, - { "value": 5, "color": "#FADE2A" }, - { "value": 20, "color": "#FF9830" }, - { "value": 50, "color": "#F2495C" } - ] - } - }, - "overrides": [] - } - }, - { - "id": 3, - "title": "Cache Hit Rate", - "description": "Fraction of read tokens served from cache (cache_read / (input + cache_creation + cache_read)). Higher is better — reduces cost and latency.", - "type": "stat", - "transparent": true, - "gridPos": { "x": 10, "y": 1, "w": 5, "h": 5 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_cache_read_tokens [$__range]))", - "queryType": "range", - "hide": true - }, - { - "refId": "B", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_input_tokens [$__range]))", - "queryType": "range", - "hide": true - }, - { - "refId": "C", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_cache_creation_tokens [$__range]))", - "queryType": "range", - "hide": true - }, - { - "refId": "D", - "datasource": { "type": "__expr__", "uid": "__expr__" }, - "type": "math", - "expression": "($A / ($A + $B + $C)) * 100", - "hide": false - } - ], - "options": { - "colorMode": "background-gradient", - "graphMode": "none", - "textMode": "auto", - "wideLayout": true, - "justifyMode": "auto", - "orientation": "auto", - "text": { "titleSize": 12, "valueSize": 32 }, - "reduceOptions": { "values": false, "calcs": ["lastNotNull"], "fields": "" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "thresholds" }, - "unit": "percent", - "decimals": 1, - "min": 0, - "max": 100, - "thresholds": { - "mode": "absolute", - "steps": [ - { "value": null, "color": "#F2495C" }, - { "value": 40, "color": "#FF9830" }, - { "value": 70, "color": "#73BF69" } - ] - } - }, - "overrides": [] - } - }, - { - "id": 4, - "title": "Sessions", - "description": "Number of completed Claude Code sessions in the selected time range.", - "type": "stat", - "transparent": true, - "gridPos": { "x": 15, "y": 1, "w": 4, "h": 5 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(count_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" [$__range]))", - "queryType": "range" - } - ], - "options": { - "colorMode": "none", - "graphMode": "area", - "textMode": "auto", - "wideLayout": true, - "justifyMode": "auto", - "orientation": "auto", - "text": { "titleSize": 12, "valueSize": 32 }, - "reduceOptions": { "values": false, "calcs": ["sum"], "fields": "" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "fixed", "fixedColor": "#A0A0A0" }, - "unit": "short", - "decimals": 0, - "thresholds": { "mode": "absolute", "steps": [{ "value": null, "color": "#A0A0A0" }] } - }, - "overrides": [] - } - }, - { - "id": 5, - "title": "Avg Cost / Session", - "description": "Average estimated cost per completed session.", - "type": "stat", - "transparent": true, - "gridPos": { "x": 19, "y": 1, "w": 5, "h": 5 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "avg_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap cost_usd [$__range])", - "queryType": "range" - } - ], - "options": { - "colorMode": "background-gradient", - "graphMode": "none", - "textMode": "auto", - "wideLayout": true, - "justifyMode": "auto", - "orientation": "auto", - "text": { "titleSize": 12, "valueSize": 32 }, - "reduceOptions": { "values": false, "calcs": ["mean"], "fields": "" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "thresholds" }, - "unit": "currencyUSD", - "decimals": 3, - "thresholds": { - "mode": "absolute", - "steps": [ - { "value": null, "color": "#73BF69" }, - { "value": 1, "color": "#FADE2A" }, - { "value": 5, "color": "#FF9830" }, - { "value": 15, "color": "#F2495C" } - ] - } - }, - "overrides": [] - } - }, - - { - "type": "row", - "title": "Token Burn — All Types", - "collapsed": false, - "gridPos": { "x": 0, "y": 6, "w": 24, "h": 1 } - }, - { - "id": 6, - "title": "Token Consumption Over Time", - "description": "Stacked view of all four token categories per session window. Cache reads typically dominate — that's a good sign.", - "type": "timeseries", - "transparent": true, - "gridPos": { "x": 0, "y": 7, "w": 24, "h": 10 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "Cache Read", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_cache_read_tokens [$__interval]))", - "legendFormat": "Cache Read", - "queryType": "range" - }, - { - "refId": "Output", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_output_tokens [$__interval]))", - "legendFormat": "Output", - "queryType": "range" - }, - { - "refId": "Cache Create", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_cache_creation_tokens [$__interval]))", - "legendFormat": "Cache Create", - "queryType": "range" - }, - { - "refId": "Input", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_input_tokens [$__interval]))", - "legendFormat": "Input", - "queryType": "range" - } - ], - "options": { - "legend": { - "displayMode": "table", - "placement": "right", - "calcs": ["sum", "mean", "max"] - }, - "tooltip": { "mode": "multi", "sort": "desc" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "unit": "short", - "custom": { - "drawStyle": "line", - "lineInterpolation": "smooth", - "lineWidth": 2, - "fillOpacity": 20, - "gradientMode": "opacity", - "showPoints": "never", - "spanNulls": false, - "axisBorderShow": false, - "stacking": { "mode": "normal", "group": "A" } - } - }, - "overrides": [ - { - "matcher": { "id": "byName", "options": "Cache Read" }, - "properties": [ - { "id": "color", "value": { "fixedColor": "#73BF69", "mode": "fixed" } }, - { "id": "custom.fillOpacity", "value": 25 } - ] - }, - { - "matcher": { "id": "byName", "options": "Output" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#5794F2", "mode": "fixed" } }] - }, - { - "matcher": { "id": "byName", "options": "Cache Create" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#FF9830", "mode": "fixed" } }] - }, - { - "matcher": { "id": "byName", "options": "Input" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#6C7280", "mode": "fixed" } }] - } - ] - } - }, - - { - "type": "row", - "title": "Daily Usage by Model", - "collapsed": false, - "gridPos": { "x": 0, "y": 17, "w": 24, "h": 1 } - }, - { - "id": 7, - "title": "Output Tokens per Day — by Model", - "description": "Stacked daily bars showing output token volume per model. Reveals model switching and high-burn days at a glance.", - "type": "timeseries", - "transparent": true, - "gridPos": { "x": 0, "y": 18, "w": 16, "h": 9 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (model) (sum_over_time({app=\"claude-token-metrics\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_output_tokens [$__interval]))", - "legendFormat": "{{model}}", - "queryType": "range" - } - ], - "options": { - "legend": { - "displayMode": "table", - "placement": "right", - "calcs": ["sum", "max"] - }, - "tooltip": { "mode": "multi", "sort": "desc" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "unit": "short", - "custom": { - "drawStyle": "bars", - "lineWidth": 1, - "fillOpacity": 80, - "gradientMode": "none", - "showPoints": "never", - "spanNulls": false, - "axisBorderShow": false, - "stacking": { "mode": "normal", "group": "A" }, - "barAlignment": 0 - } - }, - "overrides": [ - { - "matcher": { "id": "byRegexp", "options": ".*opus.*" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#B877D9", "mode": "fixed" } }] - }, - { - "matcher": { "id": "byRegexp", "options": ".*sonnet.*" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#5794F2", "mode": "fixed" } }] - }, - { - "matcher": { "id": "byRegexp", "options": ".*haiku.*" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#73BF69", "mode": "fixed" } }] - } - ] - } - }, - { - "id": 8, - "title": "Spend by Model", - "description": "Cumulative estimated cost share per model over the selected period.", - "type": "piechart", - "transparent": true, - "gridPos": { "x": 16, "y": 18, "w": 8, "h": 9 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (model) (sum_over_time({app=\"claude-token-metrics\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap cost_usd [$__range]))", - "legendFormat": "{{model}}", - "queryType": "range" - } - ], - "options": { - "pieType": "donut", - "displayLabels": ["name", "percent"], - "legend": { "displayMode": "table", "placement": "right", "values": ["value", "percent"] }, - "tooltip": { "mode": "multi" }, - "reduceOptions": { "values": false, "calcs": ["sum"], "fields": "" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "unit": "currencyUSD", - "decimals": 3 - }, - "overrides": [ - { - "matcher": { "id": "byRegexp", "options": ".*opus.*" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#B877D9", "mode": "fixed" } }] - }, - { - "matcher": { "id": "byRegexp", "options": ".*sonnet.*" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#5794F2", "mode": "fixed" } }] - }, - { - "matcher": { "id": "byRegexp", "options": ".*haiku.*" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#73BF69", "mode": "fixed" } }] - } - ] - } - }, - - { - "type": "row", - "title": "Effort & Cache Efficiency", - "collapsed": false, - "gridPos": { "x": 0, "y": 27, "w": 24, "h": 1 } - }, - { - "id": 9, - "title": "Cost by Effort Level", - "description": "Standard = default mode. Extended thinking = thinking blocks enabled. Fast = /fast mode.", - "type": "barchart", - "transparent": true, - "gridPos": { "x": 0, "y": 28, "w": 8, "h": 9 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (effort) (sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\"} | json | session_id=~\"$session_id\" | unwrap cost_usd [$__range]))", - "legendFormat": "{{effort}}", - "queryType": "range" - } - ], - "transformations": [ - { "id": "reduce", "options": { "reducers": ["sum"] } }, - { "id": "sortBy", "options": { "fields": [{ "desc": true, "displayName": "Sum" }] } } - ], - "options": { - "orientation": "horizontal", - "barWidth": 0.7, - "groupWidth": 0.7, - "showValue": "always", - "stacking": "none", - "xTickLabelMaxLength": 24, - "legend": { "displayMode": "hidden" }, - "tooltip": { "mode": "multi" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "unit": "currencyUSD", - "decimals": 3, - "custom": { "fillOpacity": 80, "gradientMode": "none" } - }, - "overrides": [ - { - "matcher": { "id": "byName", "options": "standard" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#5794F2", "mode": "fixed" } }] - }, - { - "matcher": { "id": "byName", "options": "extended_thinking" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#B877D9", "mode": "fixed" } }] - }, - { - "matcher": { "id": "byName", "options": "fast" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#73BF69", "mode": "fixed" } }] - } - ] - } - }, - { - "id": 10, - "title": "Output Tokens by Effort Level", - "description": "Session count and output token volume per effort mode.", - "type": "barchart", - "transparent": true, - "gridPos": { "x": 8, "y": 28, "w": 8, "h": 9 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (effort) (sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\"} | json | session_id=~\"$session_id\" | unwrap total_output_tokens [$__range]))", - "legendFormat": "{{effort}}", - "queryType": "range" - } - ], - "transformations": [ - { "id": "reduce", "options": { "reducers": ["sum"] } }, - { "id": "sortBy", "options": { "fields": [{ "desc": true, "displayName": "Sum" }] } } - ], - "options": { - "orientation": "horizontal", - "barWidth": 0.7, - "groupWidth": 0.7, - "showValue": "always", - "stacking": "none", - "xTickLabelMaxLength": 24, - "legend": { "displayMode": "hidden" }, - "tooltip": { "mode": "multi" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "unit": "short", - "decimals": 0, - "custom": { "fillOpacity": 80, "gradientMode": "none" } - }, - "overrides": [ - { - "matcher": { "id": "byName", "options": "standard" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#5794F2", "mode": "fixed" } }] - }, - { - "matcher": { "id": "byName", "options": "extended_thinking" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#B877D9", "mode": "fixed" } }] - }, - { - "matcher": { "id": "byName", "options": "fast" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#73BF69", "mode": "fixed" } }] - } - ] - } - }, - { - "id": 11, - "title": "Cache Efficiency Over Time", - "description": "Cache hit rate (%) per session window. Sustained high rates mean context is being efficiently reused across turns.", - "type": "timeseries", - "transparent": true, - "gridPos": { "x": 16, "y": 28, "w": 8, "h": 9 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_cache_read_tokens [$__interval]))", - "queryType": "range", - "hide": true - }, - { - "refId": "B", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_input_tokens [$__interval]))", - "queryType": "range", - "hide": true - }, - { - "refId": "C", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap total_cache_creation_tokens [$__interval]))", - "queryType": "range", - "hide": true - }, - { - "refId": "CacheRate", - "datasource": { "type": "__expr__", "uid": "__expr__" }, - "type": "math", - "expression": "($A / ($A + $B + $C)) * 100", - "legendFormat": "Cache Hit %" - } - ], - "options": { - "legend": { "displayMode": "hidden" }, - "tooltip": { "mode": "single", "sort": "none" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "thresholds" }, - "unit": "percent", - "min": 0, - "max": 100, - "custom": { - "drawStyle": "line", - "lineInterpolation": "smooth", - "lineWidth": 2, - "fillOpacity": 20, - "gradientMode": "scheme", - "showPoints": "always", - "pointSize": 5, - "spanNulls": false, - "axisBorderShow": false, - "thresholdsStyle": { "mode": "area" } - }, - "thresholds": { - "mode": "absolute", - "steps": [ - { "value": null, "color": "#F2495C" }, - { "value": 40, "color": "#FF9830" }, - { "value": 70, "color": "#73BF69" } - ] - } - }, - "overrides": [] - } - }, - - { - "type": "row", - "title": "Session Leaderboard", - "collapsed": false, - "gridPos": { "x": 0, "y": 37, "w": 24, "h": 1 } - }, - { - "id": 12, - "title": "Top Sessions by Cost", - "description": "Most expensive sessions in the selected period. Bar length = estimated USD spend. Identify long/costly outlier sessions here.", - "type": "barchart", - "transparent": true, - "gridPos": { "x": 0, "y": 38, "w": 14, "h": 12 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "topk(15, sum by (session_id) (sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json session_id=\"session_id\" | session_id=~\"$session_id\" | unwrap cost_usd [$__range])))", - "legendFormat": "{{session_id}}", - "queryType": "range" - } - ], - "transformations": [ - { "id": "reduce", "options": { "reducers": ["sum"] } }, - { "id": "sortBy", "options": { "fields": [{ "desc": true, "displayName": "Sum" }] } } - ], - "options": { - "orientation": "horizontal", - "barWidth": 0.7, - "groupWidth": 0.7, - "showValue": "always", - "stacking": "none", - "xTickLabelMaxLength": 28, - "legend": { "displayMode": "hidden" }, - "tooltip": { "mode": "single" } - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "unit": "currencyUSD", - "decimals": 3, - "custom": { "fillOpacity": 85, "gradientMode": "none" } - }, - "overrides": [] - } - }, - { - "id": 13, - "title": "Recent Session Log", - "description": "Raw session records. Each line = one completed Claude Code session. Includes model, effort, cost, token counts, and turns.", - "type": "logs", - "transparent": true, - "gridPos": { "x": 14, "y": 38, "w": 10, "h": 12 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "{app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | line_format \"{{.model}} | {{.effort}} | ${{.cost_usd}} | out={{.total_output_tokens}} | turns={{.assistant_turns}} | cache={{.total_cache_read_tokens}} | {{.session_id}}\"", - "queryType": "range" - } - ], - "options": { - "dedupStrategy": "none", - "enableLogDetails": true, - "prettifyLogMessage": false, - "showCommonLabels": false, - "showLabels": false, - "showTime": true, - "sortOrder": "Descending", - "wrapLogMessage": false - } - }, - - { - "type": "row", - "title": "Cost Trend", - "collapsed": false, - "gridPos": { "x": 0, "y": 50, "w": 24, "h": 1 } - }, - { - "id": 14, - "title": "Daily Spend Trend", - "description": "Estimated USD cost per day. Spot cost spikes and track efficiency gains over time.", - "type": "timeseries", - "transparent": true, - "gridPos": { "x": 0, "y": 51, "w": 16, "h": 8 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap cost_usd [1d]))", - "legendFormat": "Daily Cost", - "queryType": "range" - } - ], - "options": { - "legend": { "displayMode": "hidden" }, - "tooltip": { "mode": "single", "sort": "none" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "fixed", "fixedColor": "#FADE2A" }, - "unit": "currencyUSD", - "decimals": 2, - "custom": { - "drawStyle": "bars", - "lineWidth": 1, - "fillOpacity": 70, - "gradientMode": "opacity", - "showPoints": "never", - "spanNulls": false, - "axisBorderShow": false, - "barAlignment": 0 - }, - "thresholds": { - "mode": "absolute", - "steps": [ - { "value": null, "color": "#73BF69" }, - { "value": 5, "color": "#FF9830" }, - { "value": 15, "color": "#F2495C" } - ] - } - }, - "overrides": [] - } - }, - { - "id": 15, - "title": "Assistant Turns per Session", - "description": "Distribution of session depth (number of back-and-forth turns). Long sessions = more complex work or exploration.", - "type": "timeseries", - "transparent": true, - "gridPos": { "x": 16, "y": 51, "w": 8, "h": 8 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "avg_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap assistant_turns [$__interval])", - "legendFormat": "Avg Turns", - "queryType": "range" - }, - { - "refId": "B", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "max_over_time({app=\"claude-token-metrics\",model=~\"$model\",project=~\"$project\",effort=~\"$effort\"} | json | session_id=~\"$session_id\" | unwrap assistant_turns [$__interval])", - "legendFormat": "Max Turns", - "queryType": "range" - } - ], - "options": { - "legend": { "displayMode": "list", "placement": "bottom" }, - "tooltip": { "mode": "multi", "sort": "desc" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "unit": "short", - "decimals": 0, - "custom": { - "drawStyle": "line", - "lineInterpolation": "smooth", - "lineWidth": 2, - "fillOpacity": 10, - "gradientMode": "none", - "showPoints": "always", - "pointSize": 5, - "spanNulls": false, - "axisBorderShow": false - } - }, - "overrides": [ - { - "matcher": { "id": "byName", "options": "Max Turns" }, - "properties": [ - { "id": "color", "value": { "fixedColor": "#FF9830", "mode": "fixed" } }, - { "id": "custom.lineWidth", "value": 1 }, - { "id": "custom.lineStyle", "value": { "dash": [4, 4], "fill": "dash" } } - ] - }, - { - "matcher": { "id": "byName", "options": "Avg Turns" }, - "properties": [{ "id": "color", "value": { "fixedColor": "#5794F2", "mode": "fixed" } }] - } - ] - } - } - ] -} diff --git a/observability/local/grafana/provisioning/dashboards/contextstream-deep-dive.json b/observability/local/grafana/provisioning/dashboards/contextstream-deep-dive.json deleted file mode 100644 index 1a918b1..0000000 --- a/observability/local/grafana/provisioning/dashboards/contextstream-deep-dive.json +++ /dev/null @@ -1,432 +0,0 @@ -{ - "id": null, - "uid": "contextstream-deep-dive", - "title": "ContextStream — Deep Dive", - "description": "MCP call patterns, action distribution, object lifecycle, and performance for ContextStream integration.", - "tags": ["claude-code", "contextstream", "mcp"], - "timezone": "browser", - "editable": true, - "graphTooltip": 1, - "time": { "from": "now-6h", "to": "now" }, - "refresh": "30s", - "schemaVersion": 39, - "fiscalYearStartMonth": 0, - "liveNow": false, - "style": "dark", - "templating": { - "list": [ - { - "name": "session_id", - "label": "Session", - "type": "query", - "datasource": { "type": "loki", "uid": "loki_local" }, - "query": "{app=\"claude-dev-logging\", component=\"lifecycle\"} | json | hook_type=\"session-start\"", - "regex": "session_id\":\"([^\"]+)", - "refresh": 2, - "includeAll": true, - "allValue": ".*", - "current": { "text": "All", "value": "$__all" }, - "sort": 2 - }, - { - "name": "cs_action", - "label": "Action", - "type": "query", - "datasource": { "type": "loki", "uid": "loki_local" }, - "query": "{app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | hook_type=\"post-tool-use\"", - "regex": "hook_payload_tool_input_action\":\"([^\"]+)", - "refresh": 2, - "includeAll": true, - "allValue": ".*", - "current": { "text": "All", "value": "$__all" }, - "sort": 1 - } - ] - }, - "panels": [ - { - "type": "row", - "title": "MCP Call Summary", - "collapsed": false, - "gridPos": { "x": 0, "y": 0, "w": 24, "h": 1 } - }, - { - "id": 1, - "title": "Total CS Calls", - "type": "stat", - "transparent": true, - "gridPos": { "x": 0, "y": 1, "w": 6, "h": 5 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" [$__range])", - "queryType": "range" - } - ], - "options": { - "colorMode": "background-gradient", - "graphMode": "area", - "textMode": "auto", - "wideLayout": true, - "justifyMode": "auto", - "orientation": "auto", - "text": { "titleSize": 12, "valueSize": 32 }, - "reduceOptions": { "values": false, "calcs": ["sum"], "fields": "" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "fixed", "fixedColor": "#B877D9" }, - "thresholds": { "mode": "absolute", "steps": [{ "value": null, "color": "#B877D9" }] } - }, - "overrides": [] - } - }, - { - "id": 2, - "title": "Failure Rate", - "type": "gauge", - "transparent": true, - "gridPos": { "x": 6, "y": 1, "w": 6, "h": 5 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "errors", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\", level=\"ERROR\"} | json | session_id=~\"$session_id\" [$__range])", - "queryType": "range", - "hide": true - }, - { - "refId": "total", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" [$__range])", - "queryType": "range", - "hide": true - }, - { - "refId": "rate", - "datasource": { "type": "__expr__", "uid": "__expr__" }, - "type": "math", - "expression": "$errors / $total * 100" - } - ], - "options": { - "showThresholdLabels": false, - "showThresholdMarkers": true, - "reduceOptions": { "values": false, "calcs": ["lastNotNull"], "fields": "" } - }, - "fieldConfig": { - "defaults": { - "unit": "percent", - "min": 0, - "max": 100, - "color": { "mode": "thresholds" }, - "thresholds": { - "mode": "absolute", - "steps": [ - { "value": null, "color": "#73BF69" }, - { "value": 5, "color": "#FF9830" }, - { "value": 20, "color": "#F2495C" } - ] - } - }, - "overrides": [] - } - }, - { - "id": 3, - "title": "Calls by Tool", - "type": "piechart", - "transparent": true, - "gridPos": { "x": 12, "y": 1, "w": 12, "h": 5 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (tool_name) (count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" [$__range]))", - "queryType": "range" - } - ], - "options": { - "pieType": "donut", - "displayLabels": ["percent"], - "legend": { "displayMode": "table", "placement": "right", "values": ["value", "percent"] }, - "tooltip": { "mode": "multi" }, - "reduceOptions": { "values": false, "calcs": ["sum"], "fields": "" } - }, - "fieldConfig": { - "defaults": { "color": { "mode": "palette-classic" } }, - "overrides": [ - { "matcher": { "id": "byName", "options": "mcp__contextstream__memory" }, "properties": [{ "id": "color", "value": { "fixedColor": "#B877D9", "mode": "fixed" } }] }, - { "matcher": { "id": "byName", "options": "mcp__contextstream__session" }, "properties": [{ "id": "color", "value": { "fixedColor": "#5794F2", "mode": "fixed" } }] }, - { "matcher": { "id": "byName", "options": "mcp__contextstream__search" }, "properties": [{ "id": "color", "value": { "fixedColor": "#73BF69", "mode": "fixed" } }] }, - { "matcher": { "id": "byName", "options": "mcp__contextstream__context" }, "properties": [{ "id": "color", "value": { "fixedColor": "#FF9830", "mode": "fixed" } }] }, - { "matcher": { "id": "byName", "options": "mcp__contextstream__init" }, "properties": [{ "id": "color", "value": { "fixedColor": "#FADE2A", "mode": "fixed" } }] }, - { "matcher": { "id": "byName", "options": "mcp__contextstream__workspace" }, "properties": [{ "id": "color", "value": { "fixedColor": "#8AB8FF", "mode": "fixed" } }] }, - { "matcher": { "id": "byName", "options": "mcp__contextstream__project" }, "properties": [{ "id": "color", "value": { "fixedColor": "#CA95E5", "mode": "fixed" } }] }, - { "matcher": { "id": "byName", "options": "mcp__contextstream__help" }, "properties": [{ "id": "color", "value": { "fixedColor": "#96D98D", "mode": "fixed" } }] } - ] - } - }, - { - "type": "row", - "title": "Action Patterns", - "collapsed": false, - "gridPos": { "x": 0, "y": 7, "w": 24, "h": 1 } - }, - { - "id": 4, - "title": "Action Breakdown", - "description": "Which MCP actions are called most frequently.", - "type": "barchart", - "transparent": true, - "gridPos": { "x": 0, "y": 8, "w": 12, "h": 10 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (hook_payload_tool_input_action) (count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" [$__range]))", - "queryType": "range" - } - ], - "options": { - "orientation": "horizontal", - "barWidth": 0.7, - "groupWidth": 0.7, - "showValue": "auto", - "stacking": "none", - "legend": { "displayMode": "hidden" }, - "tooltip": { "mode": "multi" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "fixed", "fixedColor": "#B877D9" }, - "custom": { "fillOpacity": 80, "gradientMode": "hue" } - }, - "overrides": [] - } - }, - { - "id": 5, - "title": "Action Mix Over Time", - "description": "How action usage patterns shift during a session.", - "type": "timeseries", - "transparent": true, - "gridPos": { "x": 12, "y": 8, "w": 12, "h": 10 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (hook_payload_tool_input_action) (count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" [$__interval]))", - "legendFormat": "{{hook_payload_tool_input_action}}", - "queryType": "range" - } - ], - "options": { - "legend": { "displayMode": "table", "placement": "right", "calcs": ["sum"] }, - "tooltip": { "mode": "multi", "sort": "desc" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "drawStyle": "line", - "lineInterpolation": "smooth", - "lineWidth": 2, - "fillOpacity": 10, - "gradientMode": "opacity", - "showPoints": "never", - "spanNulls": false, - "axisBorderShow": false, - "stacking": { "mode": "none", "group": "A" } - } - }, - "overrides": [] - } - }, - { - "type": "row", - "title": "Object Lifecycle", - "collapsed": false, - "gridPos": { "x": 0, "y": 19, "w": 24, "h": 1 } - }, - { - "id": 6, - "title": "CRUD Operations", - "description": "Create, read, update, and query operation distribution.", - "type": "barchart", - "transparent": true, - "gridPos": { "x": 0, "y": 20, "w": 12, "h": 9 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "Create", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" | hook_payload_tool_input_action=~\"create.*|capture.*|import.*|remember\" [$__range])", - "legendFormat": "Create", - "queryType": "range" - }, - { - "refId": "Read", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" | hook_payload_tool_input_action=~\"get.*|list.*\" [$__range])", - "legendFormat": "Read", - "queryType": "range" - }, - { - "refId": "Update", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" | hook_payload_tool_input_action=~\"update.*|supersede.*|complete.*|reorder.*\" [$__range])", - "legendFormat": "Update", - "queryType": "range" - }, - { - "refId": "Query", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" | hook_payload_tool_input_action=~\"search|recall|decisions|summary|timeline|smart_search|decision_trace\" [$__range])", - "legendFormat": "Query", - "queryType": "range" - } - ], - "options": { - "orientation": "vertical", - "barWidth": 0.6, - "groupWidth": 0.7, - "showValue": "always", - "stacking": "none", - "legend": { "displayMode": "list", "placement": "bottom" }, - "tooltip": { "mode": "multi" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { "fillOpacity": 80, "gradientMode": "hue" } - }, - "overrides": [ - { "matcher": { "id": "byName", "options": "Create" }, "properties": [{ "id": "color", "value": { "fixedColor": "#73BF69", "mode": "fixed" } }] }, - { "matcher": { "id": "byName", "options": "Read" }, "properties": [{ "id": "color", "value": { "fixedColor": "#5794F2", "mode": "fixed" } }] }, - { "matcher": { "id": "byName", "options": "Update" }, "properties": [{ "id": "color", "value": { "fixedColor": "#FF9830", "mode": "fixed" } }] }, - { "matcher": { "id": "byName", "options": "Query" }, "properties": [{ "id": "color", "value": { "fixedColor": "#B877D9", "mode": "fixed" } }] } - ] - } - }, - { - "id": 7, - "title": "Context & Search Patterns", - "description": "Context refresh frequency and search mode usage over time.", - "type": "timeseries", - "transparent": true, - "gridPos": { "x": 12, "y": 20, "w": 12, "h": 9 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "context", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (tool_name) (count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" | tool_name=~\".*init|.*context\" [$__interval]))", - "legendFormat": "{{tool_name}}", - "queryType": "range" - }, - { - "refId": "search", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (hook_payload_tool_input_mode) (count_over_time({app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" | tool_name=~\".*search\" [$__interval]))", - "legendFormat": "search:{{hook_payload_tool_input_mode}}", - "queryType": "range" - } - ], - "options": { - "legend": { "displayMode": "table", "placement": "right", "calcs": ["sum"] }, - "tooltip": { "mode": "multi", "sort": "desc" } - }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "drawStyle": "line", - "lineInterpolation": "smooth", - "lineWidth": 2, - "fillOpacity": 15, - "gradientMode": "opacity", - "showPoints": "never", - "spanNulls": false, - "axisBorderShow": false, - "stacking": { "mode": "none", "group": "A" } - } - }, - "overrides": [] - } - }, - { - "type": "row", - "title": "Errors & Failures", - "collapsed": false, - "gridPos": { "x": 0, "y": 30, "w": 24, "h": 1 } - }, - { - "id": 8, - "title": "Error Log", - "description": "All ContextStream MCP errors.", - "type": "logs", - "transparent": true, - "gridPos": { "x": 0, "y": 31, "w": 24, "h": 8 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "{app=\"claude-dev-logging\", component=\"mcp-contextstream\", level=\"ERROR\"} | json | session_id=~\"$session_id\"", - "queryType": "range" - } - ], - "options": { - "showTime": true, - "showLabels": false, - "showCommonLabels": false, - "wrapLogMessage": true, - "prettifyLogMessage": false, - "enableLogDetails": true, - "sortOrder": "Descending", - "dedupStrategy": "none" - } - }, - { - "type": "row", - "title": "Full Log Stream", - "collapsed": true, - "gridPos": { "x": 0, "y": 40, "w": 24, "h": 1 }, - "panels": [ - { - "id": 9, - "title": "ContextStream Logs", - "type": "logs", - "transparent": true, - "gridPos": { "x": 0, "y": 41, "w": 24, "h": 14 }, - "datasource": { "type": "loki", "uid": "loki_local" }, - "targets": [ - { - "refId": "A", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "{app=\"claude-dev-logging\", component=\"mcp-contextstream\"} | json | session_id=~\"$session_id\" | line_format \"{{.tool_name}} | action={{.hook_payload_tool_input_action}} | {{.hook_type}}\"", - "queryType": "range" - } - ], - "options": { - "showTime": true, - "showLabels": false, - "showCommonLabels": false, - "wrapLogMessage": true, - "prettifyLogMessage": false, - "enableLogDetails": true, - "sortOrder": "Descending", - "dedupStrategy": "none" - } - } - ] - } - ] -} diff --git a/observability/local/grafana/provisioning/dashboards/simsteward-log-sentinel.json b/observability/local/grafana/provisioning/dashboards/simsteward-log-sentinel.json new file mode 100644 index 0000000..63b945f --- /dev/null +++ b/observability/local/grafana/provisioning/dashboards/simsteward-log-sentinel.json @@ -0,0 +1,541 @@ +{ + "annotations": { "list": [] }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, + "id": 100, + "title": "Sentinel Health", + "type": "row" + }, + { + "gridPos": { "h": 3, "w": 24, "x": 0, "y": 1 }, + "id": 1, + "options": { + "code": { "language": "markdown", "showLineNumbers": false, "showMiniMap": false }, + "content": "**Log Sentinel v2** — Autonomous log-analysis pipeline.\n\n- **16 detectors** across two categories: **app** (plugin crashes, bridge failures, action errors) and **ops** (deploy issues, disk pressure, connectivity).\n- **Three-tier LLM** — T0 detectors emit findings; T1 deduplicates and prioritises; T2 investigates high-severity findings and writes root-cause reports.\n- All structured events carry `component=log-sentinel`. Cycle runs every 5 minutes by default.", + "mode": "markdown" + }, + "title": "About", + "type": "text" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] } + }, + "overrides": [] + }, + "gridPos": { "h": 6, "w": 6, "x": 0, "y": 4 }, + "id": 2, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_cycle\"} [$__range])", + "queryType": "range", + "refId": "A" + } + ], + "title": "Cycles completed", + "type": "stat" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "drawStyle": "line", + "fillOpacity": 20, + "lineWidth": 2, + "showPoints": "auto" + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { "h": 6, "w": 10, "x": 6, "y": 4 }, + "id": 3, + "options": { + "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{component=\"log-sentinel\", event=\"sentinel_cycle\"} | json | unwrap duration_ms", + "queryType": "range", + "refId": "A" + } + ], + "title": "Cycle duration", + "type": "timeseries" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }, { "color": "red", "value": 1 }] } + }, + "overrides": [] + }, + "gridPos": { "h": 6, "w": 8, "x": 16, "y": 4 }, + "id": 4, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_detector_run\"} | json | error!=\"\" | error!=\"null\" | error!=\"None\" [$__range])", + "queryType": "range", + "refId": "A" + } + ], + "title": "Detector errors", + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 10 }, + "id": 101, + "title": "Per-Detector Timing", + "type": "row" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 8, "w": 24, "x": 0, "y": 11 }, + "id": 5, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{component=\"log-sentinel\", event=\"sentinel_detector_run\"} | json", + "queryType": "range", + "refId": "A" + } + ], + "title": "Detector runs", + "type": "logs" + }, + { + "collapsed": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 19 }, + "id": 102, + "title": "App Findings", + "type": "row" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 8, "w": 16, "x": 0, "y": 20 }, + "id": 6, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{component=\"log-sentinel\", event=\"sentinel_finding\"} | json | category=\"app\"", + "queryType": "range", + "refId": "A" + } + ], + "title": "App findings", + "type": "logs" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }, { "color": "yellow", "value": 5 }, { "color": "red", "value": 15 }] } + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 20 }, + "id": 7, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_finding\"} | json | category=\"app\" [$__range])", + "queryType": "range", + "refId": "A" + } + ], + "title": "App finding count", + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 28 }, + "id": 103, + "title": "Ops Findings", + "type": "row" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 8, "w": 16, "x": 0, "y": 29 }, + "id": 8, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{component=\"log-sentinel\", event=\"sentinel_finding\"} | json | category=\"ops\"", + "queryType": "range", + "refId": "A" + } + ], + "title": "Ops findings", + "type": "logs" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }, { "color": "yellow", "value": 5 }, { "color": "orange", "value": 15 }] } + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 29 }, + "id": 9, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_finding\"} | json | category=\"ops\" [$__range])", + "queryType": "range", + "refId": "A" + } + ], + "title": "Ops finding count", + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 37 }, + "id": 104, + "title": "T2 LLM Activity", + "type": "row" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 38 }, + "id": 10, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{component=\"log-sentinel\", event=\"sentinel_t2_run\"} | json", + "queryType": "range", + "refId": "A" + } + ], + "title": "T2 run metrics", + "type": "logs" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] } + }, + "overrides": [] + }, + "gridPos": { "h": 4, "w": 6, "x": 12, "y": 38 }, + "id": 11, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_t2_run\"} [$__range])", + "queryType": "range", + "refId": "A" + } + ], + "title": "T2 investigations", + "type": "stat" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "blue", "value": null }] } + }, + "overrides": [] + }, + "gridPos": { "h": 4, "w": 6, "x": 18, "y": 38 }, + "id": 12, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_t2_run\"} | json | trigger=\"proactive\" [$__range])", + "queryType": "range", + "refId": "A" + } + ], + "title": "Proactive polls", + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 46 }, + "id": 105, + "title": "T2 Investigation Reports", + "type": "row" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 10, "w": 24, "x": 0, "y": 47 }, + "id": 13, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{component=\"log-sentinel\", event=\"sentinel_investigation\"} | json", + "queryType": "range", + "refId": "A" + } + ], + "title": "Investigation reports", + "type": "logs" + }, + { + "collapsed": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 57 }, + "id": 106, + "title": "Sentry Issues", + "type": "row" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 6, "w": 16, "x": 0, "y": 58 }, + "id": 14, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{component=\"log-sentinel\", event=\"sentinel_sentry_issue\"} | json", + "queryType": "range", + "refId": "A" + } + ], + "title": "Sentry issues created", + "type": "logs" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "blue", "value": null }] } + }, + "overrides": [] + }, + "gridPos": { "h": 6, "w": 8, "x": 16, "y": 58 }, + "id": 15, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_sentry_issue\"} [$__range])", + "queryType": "range", + "refId": "A" + } + ], + "title": "Sentry issues", + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 64 }, + "id": 107, + "title": "Process Logs", + "type": "row" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 10, "w": 24, "x": 0, "y": 65 }, + "id": 16, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{component=\"log-sentinel\", event=\"sentinel_log\"} | json", + "queryType": "range", + "refId": "A" + } + ], + "title": "Sentinel process logs", + "type": "logs" + } + ], + "refresh": "30s", + "schemaVersion": 39, + "tags": ["simsteward", "log-sentinel", "observability"], + "templating": { + "list": [ + { + "current": { "selected": true, "text": "local", "value": "local" }, + "hide": 0, + "includeAll": false, + "label": "env", + "name": "env", + "options": [ + { "selected": true, "text": "local", "value": "local" }, + { "selected": false, "text": "production", "value": "production" } + ], + "query": "local,production", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { "from": "now-6h", "to": "now" }, + "timepicker": {}, + "timezone": "browser", + "title": "SimSteward — Log Sentinel", + "uid": "simsteward-log-sentinel", + "version": 1, + "weekStart": "" +} diff --git a/observability/local/log-sentinel/Dockerfile b/observability/local/log-sentinel/Dockerfile new file mode 100644 index 0000000..f72d0cb --- /dev/null +++ b/observability/local/log-sentinel/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.12-slim +WORKDIR /app +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +COPY . . +EXPOSE 8081 +CMD ["python", "app.py"] diff --git a/observability/local/log-sentinel/__pycache__/circuit_breaker.cpython-313.pyc b/observability/local/log-sentinel/__pycache__/circuit_breaker.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1e105efb7235233b90a3d5ae93ca52cb4f98fcd GIT binary patch literal 3049 zcmb7GO-vg{6dv1a>7lt zN}MXvAEl{^0w=2W(Bzg=l}eR*s+67@0+F39he$bbqajhReKX#*!7+!9;LXq5c{B6g z_rB*HpU;h;Y2W=J{o+9A1(}$QF$OzVV6cQZgb`=qtd}gO_OdJs(?T@1MQn`ZgQ}R-q^u~%k|it2lI@bLGbTIZFjzuy1Xo7b%2}|Dvtov` zVLQiQ2WQ81T#$3bom^eq#W~}ylMc?+f^IN3EZEJt3DK@EORWX^icMJ?)=FSf4c2yHJQE%MkB;ItAUM~x(W=?rcniivs&ZwfC zt}>^Ri?Pu3PY3$qS5*}T;c*v(!4OYtmtf=|`_yX+8K*09dRSs&^E=Z(_a!_jl;t1+Bwt#JCWw$x%hH!(1 z5{tQ3>id3!Tb-aVW=>SKp{N^^W3dl}&Ro87c0^|g`Y?%X3BRuM9}W-k zROJA~WJIhpQWl7d;QYP?lgp|khAbxc(|wTU${cT;pdYG&&K_+u**gUS@Z$;G?z`2u z+_)NE39ozFiwy@WOQDs}y64DdpyBrXm-AnJwmep7?OB7rK+glMaO&LJsdMXrbH&!k z>h+cD-!k93H-m58_1y71J~Z@*`OEdEYa=*XJQyxEG(QhC;3hb)Ht6nw=6`~24#LX< ze9F)oHHjE(nT>ErE!b3o2l^V2-VG1RJY^oD%#&~373UI-vRznADzhRMQ&CDxQAZ`f zI1CrUhlrpY6(FNl5WB(N2*-HkJD;RsUw{Rg$k@eu@i#*C8nU|jxq^J zX$myG4$4p#c9p?MGwR@eU^CRmz=Zla2mqVYTX066I3quNyyol#9BoEAeh&Q@ z`YB8bO&c&r!PCCxX}`x5y873;`qw@EMPQHot^D%X>a~??>w!+f5&k>=V&i!zU3V|s zxv;Di4xe5-eEMN~q3_~a-^EvUTm6A;Cu(V11^!ta{O01e4|zYJh=sJG529|MkEeu5 z(~7uZBx>CvAl^CyA{r#6l>D55QtIaUX(g4D$=J*DH*K^sHCbR9J z+updWy+Y8vY-a48ue~n&(b56LHuxw8onA+(2EhS=i|)#Z+Au96B|4(F=mAaTX1eoC z$wmDbQU?)~s!joN`OUq1Vo=y^cXSM9`7^{AS*J-}{-F zop;>R`#VQPY6Z8XK&Z&({z(3-dU z9f$s?EbR#S6691tmV7%7{Bx5tjbxI^y#yA6#?R~W{l3Cre#N=R|Z85c?K0{0$Cy+?B&YGQz z?QDyw;~YqCwyO=vbq=I%wx_{8ZfQgi+{4|4HiRAJkSWhJm`*u~e7@nFEtLWEsA^yGNHsPOz=08WR$%A8V(#pA1o zUQy%<+)QvLJQvCY1W8e0DXRwV&w^#Jf@n6*tSvGaZxy41MP@CY+K40o8;>IE%@7)D z$rH)s^6f};*$~-qnrN^kTe}Y8OyqVV9z%8v&!dTU(P}CgiLNbf#81MHv~*(e*!n8i zTx4R&O#^;_U0h+;;+v6F3?!0wF!%;?h;o(_+nf^f)Ljr`8ASO9kyTbuNAvYUwmh#Q z7uhmUf+Ff_?v63(ZcP*(qCRt0U;de`!Kv68^1t~3OuDpLzObbP|__{v6LG;H?3$prnt|^#&oT_bAw`$wf zZEbc{OKs?%ZR+FSHKg84kFDD1{vf6^{WiHTMZ zHc9y{%u`H|Eh{RG69cEP8gh}|4fb3!HwxlG6eAy)_q5aK6(klMUIxl6?lBK+yLO4RuT!{4Z!^p zCpIO$0PF}*QAK<#B^dx1!p5j8E8<&Q;MH9@2XB&7f)aCO=T`tR6?q^43Q=`~R_897D%8%pDW1VMuKJHq-$C{yJkw@<2C$ qPJ$!Xtws*>@Efc`uBAfSjJGz6#qt+5^_uGZ*EwhzdPiYmK>r7V!*s6z literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/__pycache__/grafana_client.cpython-313.pyc b/observability/local/log-sentinel/__pycache__/grafana_client.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bb3368a53c75679a117b480285a8c303a0cbdfc GIT binary patch literal 3326 zcmd5;O>7&-6`uX!&r+mB%d%|Ivb?tJ*0f9#k>Vzn_0x!nZK*b8b1m0_XfKxJl3thG zrDvB?MAU$hLy;CO(iAX)o*J|TDx;SkRrF9JMu8l6>%*$s9s;9wGr@Bv1nV9STz} zr!nnv1~UaJANH4R@#9Ef&Qk3$4je)-W)nyVoTWWkuu3O`ty34VRFPDPzjXcjRsOB3 zvwW$h$g06tG|Wq?su_}@XsVumPL}0baUGP3D%aB0_QF!zT${o!+_pIm>>(;3@D>qI zF(X6-y1)tyd~AVzJtzbc=q`Jg!T}+eKsYFbfQE!H4p+HkmlN~+%DUb>?*e;HxG3qe z*u*u5ZR#?1!VO8+mo;22`OzXjXS{Fo+t7K47O1=z1sHakK>4v*K(&tk=J4w@jJ*gU zR7FWT=TMm>;{>s8V5NbBV8UUgrg109I;<|&Dy|5VonvwgI`M^v=7`zx!UFiI7nT*( zxKk+0O9fr2r*uPJmT;N$?=*|4My**@)Ph!osY_B(sVT-vp{A9j8qjLWk9Wa~x7a?P zbYsN{i=v_`hA2AyJEE2L~at_9xsX z=uxS&XBd!WB#N8AgUCEYLue5O?Uvs@&yyWT3n;(+1Uz{+Wb7EkGf!f+t1WWAV9~7s zGRk*)z79dwp7jO-oeCi2pPInHxe`BjHIR1i62TCp@mHl8$i%(p=l8!l+xG)@6@XQJ zug(BX5H$UJX~rPG(~J-nxN|HId36je9`fgfki;XT(u)aSl0V}2wzv>kp#9qA(NRDI zm41t^db~na&?IvdRj6cF?qxh9HI&S@>V5tXIM0{S?fKMVCyG;xE&3MU3QuH;Qxioz z^@wp;L#fL^rK%28e$RLgQAdv$OoHvOcXds5LS&BCG+KjR+HC_0UdC9%vo0^m7_#39mn1{3YIwy7G#d@5^njrl zHQ5R2&3at|G55}UCAmR>$qB-ys$@!8E;g%42FHliP(@M8iduCjStqFs^@vGgIAJd; z^6r}Pp+Gf(0H=E zd`%#U=P*eROk#l#0C8f9x&-B{RNXg0s}~StcDrPpZyolr1ax^#Gx(AOcl9`t5+#5P zQOG5&UYFIfYsXXexk|Y{mDSXWQkK<{4E7@Y_xUSYS*}5!gZ}lE1`!7)GB6Vpti>dT z_%M*xX&;Yv__;Sa^ltlyF#@{3+PZ5#?;z1S85WxtLoj}#^0}|Zv+k)<#*m>3OJ&Wv0 zABNkTfY?56j{09vOkAtmQ-U=m*i!{-s$ky!o_%}Cy1lf4&y%Ayt zuFx+y4xqA+K;_|X0+mA^RFVLASozN5dvp-ON!kfW<+`HcH_7>NBibr~LD5mAKmZq`UYDvI}-Qq9x!h+;*7 z$72m1&8h}!1};L}Lk|56A>RVx91umFJhV%qWI%=$n})25;v|UnrBz6Bd4Vo=4obW_@n^QOo3i5E)(?H}95f?ox{0cr3BBGk3_AEEKy$N&HU literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/__pycache__/loki_client.cpython-313.pyc b/observability/local/log-sentinel/__pycache__/loki_client.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2b45cf76a4f14d4250e3b98ad337c3696699920 GIT binary patch literal 10175 zcmc&)eQ+DcbzgiOzCn-#2!J3(9;r{rA}NW~7cJSQC|e&EDV0Y^vPcaA0!IoKKIlDA zGU;)wq-iR9(wcIjnrhq;({aaCrjw}COoxA@GqUW=IDcev7p4Y#)=WHgXIlLSMfRj) z|B=48hXX(evg3c+ox$7P_ulU9?Y{l(+jn^2b~^~9x8L|hva5xVPq3mVbFuJX8Vc8l zKqwImg7H}cHL%=BjVw1&6Xd36&Bx5te2k+UujyH+W!yMYr*~J*JBeW4Wzg!>79>#T zLPW6aG8TFEDP!0=GYqxk*$YYj$mr-ve&FO$KAuX7nH>Kgmw&*&FeB1g{xN=fMmoQZ zzm&|K=cOE-iRWghnBXN)OJ>9rFJ8n3=f%{tNTu$3*#B_YsM->-oR~|dMb(CME}IeO z+P(8-}ggCSsBpouMh!iYt(v$;C~@X~GQ4_gtpXfl(`MWd>>JS*Kg6ZeZ00CJhE2KgJF>z*ak zN^sj1^Y`4VfwsSLJpd7bw-Le8GjXiw3C)4c**BqZjf|CMWUMqFBl@1!+d$q)#w>bu z1ZSrz>2o~dtP(&~w`{@C1y9#CH{h)Q7ix>Y`qjWqi)w;dq&PIyl*wMAI0)4QE3Vpc z?WJ5SJ$;kV24GWseboY5(Tt=vRLqaY@_|^|0^~CJPfz0y_P)9Qjs0)i-f{iNwc_o{ zd-o{bJuBXQ%Y%yd$raC2vh69>t7^|qPD)}fnwH|FS%FSUGxi!({y%0cLb?Ed1!L9h znTGHEH%!+QjEY?TPWJ$9f9#SW=j_Yu~jb+ZO)Fo zdHjvzZwKFL{ZZ?RuRHJCtN8Y=`1+Se72m+^ql#}x{qd;a zVO8)(nV;!)>mC+*n^h4!x?@$t*=AyveB?6RI4u4b=pOp>nBYI+;`Li6j-{xnwqT z)2LdeV>FhQ81mlx0Q$K1M@SS6Lp8%`979NxW>PuTaxs>g5v4gFnlm5I&cFdKQktdx z{6;D4RBcp*M@l(KHBE^*hTf_Pu$^_SI$<0+*bnh+LR8JqOWBNSN{JcO{`8A+aT*7s zTELp&MN})iK_;1*3fm~IJMBaQGfe0PHYym;w94hOxmZfIzyrisS~Z9gS{WP?ENR=2 zV}n%dEA}7bw1KAd3m}(C!B}Va-}Sa2UafiqZyvaMV7^E0*eBQbuXb$unfKP2e_r># z^ug};UXstnaH9rI7q!O?`c;&?TfuDo=vNP=2c(Qm8Vxj zZ8yT#!#{lNM$h%0rA@awS3ag6hG}2}$)F z6HW{t5)nS0M$l31qqEamkvq z`{obb4TKi@zCXJ-aP6A~qp7)LwYB5M@$1L`(f8BPk3+W(EjRqr;g#_I4_gmBu#$DV zG>j?K5nnT)%%>db*qG=06u$3gNxA*02gK0ap|ppn3Lkp*amD1hZLWO86d7)($tiEKKS%&1n)C6K~Kw%Hj1%{8p?n=m+W4-E-)7~&ul%&}V^3tW`_ zn&GfZgDAvI)r{974ZN_K$|Yvf(-MO;z?3XJm|+^?5ycBKD2Nj?Q#v-CKt8?%crOyV z>uznR(Nt_gOo88YL4?f!ZkB4Dj?Jb3_vip<*8(atzJlthKqzE9gW7k105q8$_TIa` zz+0xb4&6BVw?}{Y?9zIrwQD8Vz2fTuc-gRHdHt{LzqHFE&&gx4l_L{MYy8U4M}a-_ zrd5B-o5`!mMbq~)^QPZ<{qw!6;cbiSSA3i9TS;(l!A61|OX=U99L=9R`aqjxLg%k2)`{mGq+lS@IsTJ30&8soM z2*Im4{_1hLx%-wO@7k%jc7B2A(yMzlp@Un=yZnG1vO8NHgFe%pKL4P{^qz-<@*2!Q zG+>zVFAOub!*vCAD=@2*V8r!_!I>nOP8bAp)F9x##xtrafy4rm63ૃn1P`ls& z>J(f+-JpxeRh2+e2a?i`FN;Cq0ZEN35J>7lQdTXMK+*t`+M{g*NeLlJO$fB{f}{pw zfW!xq8khl+CXkd+Voh&;kd)A(QUd&vb5Z6$ZOEA58rWRs=%q6|%a{T&p}?@Vy^X3F z?*OVTPLsJL#BEgb>4A}9DnL8c3XusZ1{VeFk4Aue?y3yy&{L>YuSa>JlgUg1kcgf} zwndtSU{P9i6uHr40)mcW4(>zXQ`q7Vew@m}kz|tqRN&#WuvL?}lnAIYlTOD#;)Is) z@M#LM6V)S1Fgj?Ih~~1<+)mZWZsbwz?&X?HK^!2S&E&)vbKv?+$ zns`K|0Ow8G4ZDXqHVuwdmv`Y(zP(3j?~!{Cue2Y&dg_By1yf^#<6~FT{G~TvzWQ>3 zgId8t+`)xFzIm(CytQCOjtx0&1v{1<#O;%tHW!>&aS?aZ{P}_#%XP%vys$YR+NOlI z6+FnPC+@(4H6Prf1h*6#kkeSn@gm1p$!S84A09A&`pt7!&lLh#4-&T@hWE>OIxZCj6b}8+<3hR*5R;kmD9G626AN{iQG~4;@C1?oNJ=-Ja^ol-A)#_zF9EkzFPC%RVq~Ez0hh00K~)HLn~Six z{$XhqEH-Pf$l#AAL4D41G;U{5g`pK=NHAe~IJ@l2?(uhNKc--D_q@b3<#O zY$F5HHM;7$Di005FjOoY&c8HQL|i{Z+<8D)C;`GQv@fm8hX<7K!0ipUd*$$9dCL(w zaJ0aIScBYk`R46P^Y(%jIXdKGS%ciJf)gt`Q_Sjg$Cp^;C1JNT}rSkAKa}3cNe_KXYjk0--LVsVgLNg`M@?Mu&oe44#2Q) z{#?Fki_)~E(2N`aV)m?^N@!=H1vvo3WzIU}01%fs?Z^Qj4lNwWxAZA3eT5F>bmAeB z&il72{;h@e$k{;Jx8>XVmA3xXjXU!jk0=|DtakR~I}a+I2Uj=s<~JQvHXZx@W1tGC z-Te@1gGtZEQ(%J-?_7i6=(L&O_uv~q;8QE$cLeYoV=-{40u&dxVj0j{uolaIY++}O{nZry2NQOr3fU#v^p_)1Si zlR9|ai%2jyQ0q!azKI>L(s z#(gHCuXCAv+|;t#%rAzPo>e;b%KX0N#^qzTUs3j-mLESOpNY$fOo5PT!+`NVe5W!T zG(s(57{qMQID}bn*tpufd9`Wds=w{N(+bP)BA(`g8%nUc^$)MEjm~7gWNqhRSYQ1q za|>u;ViyM^TpZXbUVHt)7VJe^aDXj<9SClruEG}3rm9kvEqI3EUdsYq+|wf?Cq@|f z&)E+ZT~%Il9Irwr^bH_$?vgR^$qcVLE8)}#@qIiI)TWXN=q_&*riNJpy#@+5joRh5 za^V;75hsz8_@RgmZt<~#aDaG+h7l42n@LTZn;L_dg8EEa-giR~>Z$SqVqDT<|S{F_)p1K~DTf3G9oDMvbS951>oK8LUymbC^A69AgMEt|7#x zO6%}{iFG(}66oy17J}|Avye$B=oV6QjVq&uB{yEMV4Og(f30<(86ZS#p>s^ zo8v#gIPN8YRPX5G8!+Yaj$X5dKgMP!kvwuM7kkknY7cGYO6fb$k8})3aXmLLIe+T8 z=~=ek_O5Js`gY>BByV|E-h5mR3_ZM4Lyuky2+858%f1N=hYj}-N8v1LXBmVA@oyYq zKmJ)G6HANmXAMU*3V!Jf9N$sZ6@{yJELD_vqHyPhzn7rLn8`+?DF>fGlkprKLN0zb(kM z-2y&j_xtQz+u~?}Kz7UY8D{tWK$f62vOr5`E%q_iQoPJs(i$$C{(xFctj!=!Uf7{p zqtQe*4s*K?>Z%pL9Ak{f z;p~qY5Q;Py{+*0|LYyCwx{rwScVzP?#PhMeK5q~G#vWRDa>f4GWy>EdaigK__XH^$ G<^KXfb(dKH literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/__pycache__/loki_handler.cpython-313.pyc b/observability/local/log-sentinel/__pycache__/loki_handler.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16bfaca1d3c2da5613da98059d83f27d7677e1d8 GIT binary patch literal 4429 zcmaJ^U2qfE6~6l?t)%t;54LP<*&v9DZ8-^q5+D;y)X>CsgBAs_44bvIl0{Zpad+j^ zv=2-lI;KsABrP?h>DYOLw@%xcrhQ3D!!+$QGuD`3<%WmM(3iZ~ffk1Lq35oC?P_{8 zy1(b1J?EZtzI*Qdh6Xo+^3l7uQoj-rxc5XF=>~1mYqpDu^k` z>@+WP@aLz6y^aXl%gBNkofFUT=NRme9bSZ;vJ+}i7N=d|_FC4({$5sg!44jKWH-=B zvIjRr&|wBQ?nHqG;-P{aE6V@12euGVhZ!Y@b;4M_3N6ZBN* z|Nf^?J;z)@(aPcpK7!7GMfR0ud?><%%ND9oA+XRsrZ|isy>!Y5o$~3G@0=RdEBk^7 zs2t_((Z+Hw++6Ne+ri4WVUMdkA3@=ca=+SErA(p#J4u*ez(WKxm(?&46;M89DGG65 z)557NrUGDUI1Da`h*O!#B@#gCvSzWS#=#>9KW@ZcCxW8Wrd2U56}rP}M(%%;689ev69JYA5lZwPV`6U)VffZrOTVkAUHs{;u_U8n60=) zb#`Bg12{lskIVoqwcD0bb+Y>F;f5_6V7SF?P6V8|38sl~g4(#~9iEceb;HOKhhkfw zIO3|7HZtI1)G=-_xDB?M^i+J9x+iX>Z2C6YJE7bkU?@UPLA7 z!R2aP%2ok_(+1mEt<%t`w^dmN6|SH@ia{{7wbWN3QbDF@#d+#E8QdI94MiQ-0;grd z_Pw?F%Vp#%yQ$2|oXi7lof`;dD`;2cA`2s8h6VZS;2(h53b4&=K7g`WPUgVzo&&#m^1+}%E~bmqM?D|?Q9a^f%1o6(iyC)bWE`QyrJ=(W}D zYGM1HLig^1cd+2~E$z9sr_j|$QGKJu)zP>RT5m_Kor~U|c?-?$i_$yNTJw&4^N!`v z?dESipo&!{({uJCiUi}-m9?u;*w4_h*TLdJ_=Qh_R#h?-0{A5W&ROQu6ZJs4tOJ z{1y20)$wVFjXJxi=kq8-FNr#<^)i?$6mE^1QIug%GHvMCZiF&igbkOIy&5GVkc-lr zK3Ivja3xo%l0~>`bWQ{i@<4P2QHBSeG-umV$`ZksFCdfyJ+iMx9Bt@T&SZngWMTd& zl_*-d(xUX*vP-mrB=s<*np4T_Fo<22s@z5^Xj3m01stsC5oBLsCqhL{GEO)hzMkGP z2X?67N?AsPB7)4zg6xo;BaVnu7DsB(YvPARu6-!lRo)S%cgU7vrxLweCSll!WY^QI z?2hoGjO>x61Kj^dz{v+i1TYYhSp?&t*kqe%h zbPoGr7Vn}J_zem;2nQfi%s1&SyoXjpv>K-CaJ2$5X=<7{AV1G(=7pA^4k=zx&8C8L zPX_6GggA>N=GQc!U<-fF1SoP+7&CHKpt%OJgqW_Wn7B>Nifaiqr(5{ju!#sMKv|P; zz=?2>hvKKG2sB9`LOhqwnzo2U#M*ff!ZHb;HGtMmmJiUciZ<2}Cxy9`W)g8`UMV6r zqv0{ijH*Y3BsOwcd<6QY2o+$VU5bg(03~Uy#lvD=5v1e?89Pa6Xt_M~)2Tw+w#7q> zhYHP|4+YfNwJ^2rK-+p2^>_3So_yO_co9JB^7y6kHK{Kz_1%_y4_!dD?ndpM3*wh8 zT?`tba3 zCssSh{u)~N&PqqPAhlhdyfj%D41UOeB>qACbl-}SUPox2d69)iW~1x|LOs(gfc1vJ z^)~(saScry0@^;jE^)4wyHfk*iJwedJ+XA=+L^20TiJ7H#e4YU(<`r>TzTQt%1mrk zia#6x{ob1T&o}L;Q)V7LXhhB36un!%keVrEw>&lhSO{AYJJjgB)#(oH;BE~vPzN0L6<73}9@trZ64^!vFB9z7hwbLD{(PW`KBnkh36Cka zqNI&@PN#iIQO@U7y|mJ#D2Wt2wRCu`WDG^Y--XRYNa%)Y*^iZC4oO3rIO*+8A6%|+ zBN?_cKul9&Z2L(EJfketO2rh_0-u}7SsJ{oNJHt771MNGQ34F6&t^LveZJ^qG~dIt zqTp&CgX+Z4T|LXp=o}qlt&;ta6 zhl6cG%Q}L}&Q)#JkgqruC2qt(K^jE(G_CBIqF{{2p(4)G%iP{^0~L4Eyq}1Nil_G+ wehr4;_sFDZ0fuClFVNw8$nzy?{F2tq_fXRV$9|^e?cIyPcY^;!(6t%;7w4aW82|tP literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/__pycache__/models.cpython-313.pyc b/observability/local/log-sentinel/__pycache__/models.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8dd2664e2e615e53d62c9b663e095dca38eddf3 GIT binary patch literal 6028 zcmcgwU2NOd6{aLgqW&#g@{jUk#dgtZv6CRS+d56!txnv;cAdhkk+#vs(Go2)p-A~-f{l1}`y z6a|J6(BVDjkXQHKbIy0pVKWvBb8x9|{zZKYMxP_&UVdlL40|~4CMR-)6Fs7L!b3dl z?j>Hh`zCzjJmJUv#GgeyKM@!YkidA51hZV8@8o(o(VyYOz_8bzZ`hfZ2#z)gT49ei zLoKP>&2|9FDRy^mSjscbo1CIZhk7~rJ*8BQP5=5 zr2aWo(TX4wK&Dt~S)l^b8-}hFoWo+bZG*f3>`gAufgw0F0`WZpN1gYJ-bX>hM!xM4 z&qe{!pXbGZ$PMu#9QYM}+YCWbAVMjW3)Aq6@UNF(^~SZ$5KKe`(~^lL=_U;*dJ*r@ zVwJG1NTyPtQOz)}&dP1#VcK_bP zUB)-rH7&TK`}bjIp6SSanLBbfCVB?oz}zQw?~GUUqD1$-bi~#TfX6#Beg^#XrO_Gi z=Mi5Ym*c0@f5N)euRojMaz27VLh!#x6i6;WeX4E|veu~TFh%kLDqnl2298O^0r$Zt1MDsju>SL{+YJjx9l-jn7~z_ zCoOSbBvWs_^mhKu{BIt6xBs2~diwY$>5*D`q@I52{bDWsY&|);5@;kl)^fFEFN{)| z+h=QsPSjE--n&q1JF_BeBwE+b|FOT8xVSD{Y{Hz}9q{rH6(mWQ%Zen?uq464sv3@? zlJsg-)*MYjlIB!mS{ek8Zb%Yof!8TWWnDKcJ8s~m2n&|7V$B;x(t(epB?%7NQVWu7 zSwx+!T8as;XFx}~khCK~?_mTz+I$?ykGRj`$>rx8(ZupZqcvUU)1UcUw!FOm1<$R~ zEe`LuC!6fv^l^b?ld0?}akd>X={j%oE@-}DOk%|PF*-#6Ocu347OTM?E-#3}h=+vo z0z4CPrD7OB3FQEPAdhUzqmCSa59Bd6XPiXyG2jx6Yawx_OFCSFajk5I7O^eQ4|&DZ z4qQlzY0_G1&vnq?n2l(g?AV2(Vkrg7AOH`_fAZI0)Fw?&)*W z+i-DT?}q*~T7K8%frIi{4{NG zj^>yv>4rC(=z>e!@1Oj`>EE6Hv+%d*pQ81_=RX;|R2#f>ckpG~liJ{!J)Xq&{xoR5 z3RJp#arfKDk>fDM9Ut|X7NLB#surmq>Cmo?kdyEn83dArpNWV7%O3rR|2X_X_`~Sk zp>u4lH0-1_NiB}ixMBk4fN3SmkgSuGqtQ7H(5+zTmU7*q5kR9?HHdJ9ERr}(S=a`B zV9;W0m%oO=O>TyphWqX-4uu40_OW@K8}#X(hugh}oAEr%=I%K?GY?gr= zjXW?vZF7C`0U`d6`3)dT+*W{VZGZdF8;4dCYp>o)y_}lf-Z>FXz_wdmPXxi)eD?&KhnLr4xIc>)Rg zKlvIGv^3?5YQZ7{$fLuPQ%Ig-npwlp$Qk6(;i*s|MnxgkA~}z;BS>~Zc`wS4AP$*7 z1mZ$D!}CaddAt#8S)Od9x|Ytb6zY7}MmlqQsFpqfY_ZOt0Je7YTO!k>Sw&nHt&#Z|} z4sMtm;!PCTM}rG1z;FA)1Hh~K1(1iNh5&%HU9RECDc4}cQbQhP0S41JVhI8Ywjh(U z#2|*Dl13d%2VxYG26A>1h(fABF3x6%KrPOyUPvKPCTHm&kMdTmhCv=<@-}>4OhM8} zz}snuYhhfw!zCHl!MILH6$p%fo;vb9S1b!K!{0N*SGoRnx2j_$*cw8g{S|d1@+};(Rec*rh{r@mGK*wC*qK5F67**CXFvdUVyQZxYP7U1Uu-1VkX4hZjym6A zOWV6`Q0Z>8buT@)BG&ot2S6p+wsdY~aQWiK!6UVk6Saer>wIbz5NmQbL^Y1|tn*!K zgLS^AakzV(Z(jqT>TdM(uJf6-(R+NaeayJL9nP^0KHmY&L%;{?#XY?u)Sp4o4`3kV zgP>>dBL)Fj!oUeE;6o&mj{+A)E+2q5V(g7Lt2tdLN|a)`I2FdAJ`gRXviS@=Oubl+ zK;@`g)UV02ibj~T159WZbU}o9O9cBUqiSJiIYtGefDpF&%ek`I2OW zCqc%Ms7S6LVOGD2qX{6C*HoyKH01YlfFdf8;PTB;Am9K(Wcj%UOF2k3QeC(EYN_L3 z=@;w#@kU3doisWd>5QFxG8-KSZ@*aUI0QEm{#$WH}SA2kL?-s`>I*dSmZ;1Y zG;A7Gg28MQsNXgSvs{OAy#oD#QiO`IT+!fzhM}|X13YxII*pd(3b1zL;FvJ-B_=Gx zg2)0AX1MD(+Rso1Dv;m~OaxyrR5-SL!EW=MNM%7A!RD{GZ=FA|2BF#4=sb-1{vLnW zwhKNhZiaZa{}~IaMsVsmCYHL2g*8dWXfax%+eMy zLFybzc;7=|zQ|yXZI(UH0B^tZxPh}F!9O#xYQpnqU7l)$JL`O>Q`+3)yX;NllRGxe zg8;8Db4CpRAbdG-0mG?bKpJAwa6aT7L_Q)0@=<6ucYcJ5K}{29AEB5to?DQ$DysoD z^YH)VyGR&H#GoB3%!6|=kE1+zlexFcz$n6B6Mf*pUTXiY!tDq|&nYNAq3ZWeF%*Kr z+ovfy`2oz5^N=yPnI0fc^({CKuFwv&>itC@a}}6V%rp@A#*%1V9eR7@jggiqCVvi+7-OZG3FUFoay{jA{i_-~)?~B(nthb%MFmT)%&FO4na8sA;<4EOgP(Dr<+eW` zJxzWLEE>W5E08Up$K%=Hp81@M|DB6`%02ce_b4zCw-w!v!sp3saX=gevQg)a RUVH=3(;NAh7e;KK{{h}D25tZV literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/__pycache__/query_cache.cpython-313.pyc b/observability/local/log-sentinel/__pycache__/query_cache.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..894b25dcff1d329acb850d89361358810a62dd00 GIT binary patch literal 7343 zcmcIJTWlLwc6Z1bz9mr-Wl;}HV_CLLTTid;#E#|I)+6!Ck!z0Gbyq@WDRLx@Jsi?^ zhW3h1ca1Hukbx~?Z`L&fED!~|MeMdfl>XG8+XB1&6(b{3X5GR_P@wsvD+jx2e|pXg zhomJY4Y244JoldaIQQIh&+A@o)z$e3e9J%ot(hb@61DiPqa>DrFB5xTB1PQ_nCFT!LF<1drea=o9<^ z1413ZdZ7VeP-q0$Bs2pI2`vD339SIbLL0z#p#xy2up3~P&<&6m_5kb=_5$n`_5s{4 z^ob1hh6${Epq&VjqmI(*z6D3*VD?SGgvB_OlKlDfRhhq*mFN1s4QRS_BTkb& zf62eaU(wR4;e|00rLaOSvU6BJuSnM=Wwd7^8ody$%<>`Sgf6F5EjLnY8q8Al(H?7z zXl<%8Jpj`cd0sMio_n~HfXyCoX;J!7Pe#cu$f~I6ajHwnw@N6L;>jf&(e!jC1AQ`$ zCuDU&Or%v6q*v9Fl(kCZ9F>y`l0^}Qzg3Nk^Kn^$iAgD=q?g20oL-e^kHG=sH?-W1 zZ^iKqO-z`_RK-Z8?FCjU%~ZN#v0Q_j9s>m@s|h&?($#WDce2r6l2?yCO0+acsycE! zyrjG!X+YxeH;~XIO@k9IjeYU-;6&3|nvk^I%ilm@F|Fx%_~uYii>ES*RH5TbDDiAk zGF5bsMa60vNJ`fSl=K3e_d;cOur$1)IHjd)OzKBt%cNCP3Cj~Iv;rksdOEFWxmUh+ z@oG9h>cP=(r4pHe!Gjf2btO{q1@lafd@EW#=IEV7N|reM4Mb`iq|Jq%zAAxD?tOuP z**K7rbSfvb%A$?7yAbEWe}JsD+_W{X+EY}Epa%|Rz@F7p)-}7urc+s>q1Z_ z0^3fmu+#<7#x?5}+_RopF9+iVZUE*VVFeHNUty?E@XmS#AN2dNAA0=e%s{E#KkF^` zIZ7h}=7_o-GwaOu`q>7c=La6@5kFdDmj;Cf+PJ_)8jXfA3^ErnDve=K`l1)c2cQiatO5}!Q3{CW+E{nOZ>t}` zJ#smyn9T@=NsK6^&|SjHGZ)B1W;)`eZ7|MoiB=TziODRDo3SS1Fj!4e=FI^bPUCrF zcI?D#Op~ZKdqY9=kjnmQ8STyalrpE$JtC?|64*-wu-~L7bzeXW6Qj8F|NEz*wq_*2$?A- zdbLwNn@82vgc{%cFwTH%ck2=U{&gG{VX;M#G=wa#*{J*HCHa%v`a&0baY&a~U(qWkPx zr4(E9X zXtkr$kWloSsXVyMLHR#nZOFhmMNY|juEX@JrNqB)pVr4)DdY)$pEjLfEH+JuoBj^` zwq6FX7gz_QCtyIJ&?-|g_84P}={mW`@#KvrODz`2OU`+QcEIF^jNw!y^`XOX3=SL2 z^^!}ZsG^4VBxEGg4>RU?@R*n)fV#qlW;qe8?m-*6HCHY%i*IR>v~#`E4%_>MMFHLMM4lPWKB2R z7h@CAa}xsX1!A-lfWbk4ic^XQZ8#Ss-C%XN+E}r`Z+T^tlHoD$0HUfHE=f&d>7Dpt zLdsx>FkF~7O4M+}HAjV<#&AMVFq~R8Ww@@#m8_&0tgJ#9i@-V&AML{RQB_S&3_n~i zU~Li4+i(Je<4rGIQy}xliw7PyY{pou!|uVjs2vBbxJ91s zYTwv(_?O9Fzx2V$_fP)n)Oy$9W&frp@V5W9f5X#O@U%VlbRgbvyJ5q#yWrXV*wg)a zbJz0NlTgR~sk>7v&h=35^2EP4hd&L5pM=8q&;9(|J5$ROMF&^6e>2?qp7&kvMtGnQ z9(W-AllYPNoB4lD{c~zh?bg?z&%pw|=erI<0(DlhtstrGyy@S;#||Dq+YQr#Y7hKPOX z?*+ThO}r-QmZwO*kRHWLJ^++wkdK-iXuOol=u2P;n$FAfyqdOW4n`cN_EU5SP&d`_ zG}Nn2+3Yl$w;M6&=>(2!1psQ)(ERqz+c)3-@$DbywUx8$4Sj2#KH39)FYF&;khOmU zD(3#HT@$q7S8+Y6V+cGe{t3(=3mJt29MyS@^f5MCW2WG+omN#A1MwXmb)2=NYDXxQ zi#coMG%>Cs^{Ct8t{PS{3UH9qmV=Br;e6QK`5Nc5K+{=xA>ilWEbw9D1aIBA_<5OX zdOxo(N-AIS9N@A{6ure@gf%$x4a8u-cP2V*u;->HFBshT#M#(u;1rM+h2>#g>#(>i zyhKpLr%5{G)p2;zF)rl zN)$3zqq~MSR1Y|ZR3reP8Ntwf&s|Tx`JK9DZnLxNfn)XTZ<-2CfhuW6MH(T27 zU%q=ezp&o2f1_pi!TycVONG!&>!IVz6PtnNjX+l+(3Q`w%&i9^8-d|MV0bn3N#NKs zjA=E3(B?UrK?n~~P8paHl?J(txM$cldT1($9xo!OheT$X!jRk#k=Jrp@ijL2Dcw?%# z1W>hkSyGa`6@3N`mx*e%Q-Ys@v`#syl7eG|WWjziO_!Xtq*1ZdnB&Cjgj87_WW7AO zRMSeg&nhJtkdI*AS(RW!7|Stn^bjy+FjpxCF8UV;VE05rcLI8P>z1jebO2BzvRyUj zO~KU2H7+BGlK}pR$wrYl!}Xtaj@%u;bEfDdjji{6cYXPhcj{Ie3&Ea^;IWnD2aE47 zKDhR)tE(3a`;R>i9y2;ezA}0Et5atf#x_1Dq-O>v>rzUiph+a+shFHmXNn)5D)6wh zk_DDhJ%dZd^~7S;{yZ?6iI_`tE=a%g$- zNvQQp-1^hb$VTT-p>ybQ=g1ZZ4O_(7QolU$2M-ChEN3@^tr!&Z`X|BOXANk3!7AIU z_Qhy>W{k^2tAmFp{`M zp1Vl6BY)(*SKfVPy|wR2M^~Y1{DJ=A&4)KPhRzg*&O9C(TkjfQ?-<`~+x=emyWP*+ zY|vLEtk-9{YItM8oZIBW%&^VtM5GCyJJfheg69sOD5lcMtb+A`C|=9Pl~PZG2-XAP z5_hVmq1!Er$#g;#@d1NS9s!GB7yzW+I;E3XW0P`1H~k6ybL^hOMo)&OAyVo~^a`Ra zBEHA#azc#j@LV~U)g?_79}<&m%g124LlN}CPx~7HKP8{C7mBQt8_0kEIf3Gtua)!V zb43EhYG^g_QQMnLc~9B-eu@<7*0nJn0wD+tp|a5 z&-0wL&gm4*K^y9l)&{WXWEkdClKh<1|DO2&gY^6x+5OZT-0-%4>}_A`IJEBVzvU{r zTKtZuY+$YaV3C6=gmW&q)_Ah$M#Mw7#~}*ABN8| MjPJh)LQ{1A3nAwPr2qf` literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/__pycache__/sentry_client.cpython-313.pyc b/observability/local/log-sentinel/__pycache__/sentry_client.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27141cd5ffa001ac6f4236f4337eafdef4a32ea5 GIT binary patch literal 5970 zcmb_gZ%iD=6`#G``+vXz=D=W^l}MzcCllW zsP326sVWh-w&GNk;z*TBK2-9@^qV8qs{8FO!M*F&s-i}!nVos_-f!k{MTL(*`t(DoeV|E1I?< zYy4u80!Pu6sFdIr6*Z=)agCSM7(a7_SJaELrYmttSCXpM`w+*STCLJRv7*X}UL@${ z@tM_ri;8HmH+eOwgP^8MiG&>M;d8T4R9%i!QxP9iG-&}Eqq*-e3umtKcp8WsWS&q4 zR@Nafuwp*JG0zF?JlDqwTqC)_U0`tKfai+13F&j->RsTdYYWd~^4x-#dg89I-w0S! z8BHiK?T5IY1{>2#z-oCvlL? zkJJJqt#$hgD_f*`T=Xe-)3Mz&l7G9xAbqV&v zsL`nul{Hb5mX;H;2&N(%K`^VB7?T&Jm4q%XE6Qq9-kiW>rG&C7$M|kgbFxDVuO7%X zo))HfP1gB^6&*@6(d~x0+K}sP8zbMXCV7wyhdl;8}l*w;=HCTMKoOo;TY0ytSm&96Dx5=olh=6*Nf7El2G)^^NA#!RmkHJoChAn z`P_t}rfK&sUp73Vh!Yb84+5dJbH6y3-Bq96)0}PV%(irX?R8dEJ_(SD+O;$9ok=zH-`4I2PW-E?=A+@? z3_l2lKB&A_nQETAA3Xbyx(2NKulZAZj@=IqY_cS@`!Nhh3>TI46{=1}^Y#c_v_kX5 z1t7lxX9%4@Y0BFzoX0q`ugIdyow8QapHtu?%Wkl(KF;&kjtN( zg3=x3IFJhz`rFiwa!zpdx%J`+%&FX2b%So)){+F=ni3XNfeV49GyxZULTLhSLn#lU zi>KFFG^eubSbpq-up0@lt+C*jrWyrcg(tFquD;a8Z? z+&;VCH2A*KBm_WbnY98P6&!h$?MGXL1Z&N8%ZAUy1EPL;864e; z`lgz^q#EuVAP74RUycNwh979~cFPo8m*Lh{mX;*YQbCGVJlh`-3yuFM}9O3_~dP! zqOnliOWKB6J7&0jMpaZ=2EQeXOR}a(aoKR{ik^@ufI~v@fEnJgx1;hhA|+Z2^{|uT z38Oq_VaRYztW7Z(Swpx3fq>^}9kvCEKgQU-#b*~c@z7ZtVbE`XSyj9C&e}WK+WJjL zy|>{(byKFgHJoYfOSkrA+xBPLj-}g_0~KguIk#&O41q0 zbo8e?`fp##47`*ccq!F>=6&Zzu;oEhN2aO&^QQi6sO47I?;{^aZZ)TN9nLnkWE-0w zxw$7}&|6{_`f>CVkJkb6f8Uq|ey2F@fjA~JK7p3f zBsd0N1B3|@0(*`TIF;QAFBZi-ZkC(&mudt}PKY1h9fQitxj+sNR!J&&v1g3u2Dvid zwxgVV^Eoh2dh;k+QAt4h*r=A zT)t4VDd6U#kt{1Pr_(*BH-cj}jw;7f*7s(@!|CwwM(|VtIUUS)_h-7t)7|3* z2-SKp(>j=L9n7{J%Crro+lI3H4rKNnN$)$7?L3<4d_LXzd^R$eiM*VSy!@!j(+6nE z18Ay-REP3-Y;XHV=RZ7egRngIa&w5iIOy#wV6S&G1Lx8M=Ta{VsaJ&5z)Y%rwiJXl z-8`Gw)0f`UcYEe`G__|CKvQbh5MZ$)ICc=>*mHR}cIwXF%!rsC5mP^2NJ$H+k!VU@ z+6br?96Mpdv7v2nY~tSGjlkI=IEF@ez5m57@|kzM`Na_X=k5qp?uC3Ww!81~Zlt?> z6V>d!!=Z@)`snocRkaC#K8QOJxsQG_>4co!8V9sCr$CBfsA_z9tS7dGvlBC0Vn zylQ9|7k)+_IVzpc-5lK{P~3|C8;ec8$=PsATc7!O^$7uTbFjPOt{XD$Zv{{m4Y*L45@ literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/app.py b/observability/local/log-sentinel/app.py new file mode 100644 index 0000000..c990164 --- /dev/null +++ b/observability/local/log-sentinel/app.py @@ -0,0 +1,64 @@ +"""Log Sentinel v2 — Flask health/status/manual-trigger + background sentinel loop.""" + +import logging +import threading + +from flask import Flask, jsonify + +from config import Config +from loki_handler import LokiHandler +from sentinel import Sentinel + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(name)-20s %(levelname)-5s %(message)s", +) + +config = Config.from_env() + +# Push process logs to Loki +loki_handler = LokiHandler(config.loki_url, env=config.env_label) +loki_handler.setLevel(logging.INFO) +logging.getLogger().addHandler(loki_handler) + +app = Flask(__name__) +sentinel = Sentinel(config) + + +@app.route("/health", methods=["GET"]) +def health(): + return jsonify({"status": "ok", "service": "log-sentinel", "version": "2.0"}) + + +@app.route("/run", methods=["POST"]) +def manual_run(): + sentinel.run_cycle() + return jsonify({"status": "ok", "message": "Cycle triggered"}) + + +@app.route("/status", methods=["GET"]) +def status(): + app_dets = [d.name for d in sentinel.detectors if d.category == "app"] + ops_dets = [d.name for d in sentinel.detectors if d.category == "ops"] + return jsonify({ + "version": "2.0", + "poll_interval_sec": config.poll_interval_sec, + "lookback_sec": config.lookback_sec, + "t2_enabled": config.t2_enabled, + "t2_proactive_interval_sec": config.t2_proactive_interval_sec, + "dedup_window_sec": config.dedup_window_sec, + "detectors": {"app": app_dets, "ops": ops_dets, "total": len(sentinel.detectors)}, + "models": {"fast": config.ollama_model_fast, "deep": config.ollama_model_deep}, + "sentry_enabled": sentinel.sentry.enabled, + "stats": sentinel._stats, + "circuit_breakers": { + "loki": sentinel.loki_breaker.state, + "ollama": sentinel.ollama_breaker.state, + }, + }) + + +if __name__ == "__main__": + t = threading.Thread(target=sentinel.start, daemon=True) + t.start() + app.run(host="0.0.0.0", port=8081, debug=False) diff --git a/observability/local/log-sentinel/circuit_breaker.py b/observability/local/log-sentinel/circuit_breaker.py new file mode 100644 index 0000000..cedec1d --- /dev/null +++ b/observability/local/log-sentinel/circuit_breaker.py @@ -0,0 +1,51 @@ +"""Circuit breaker for dependency health (Loki, Ollama).""" + +import logging +import time + +logger = logging.getLogger("sentinel.circuit") + + +class CircuitBreaker: + """Track consecutive failures and skip calls during backoff.""" + + CLOSED = "closed" + OPEN = "open" + HALF_OPEN = "half_open" + + def __init__(self, name: str, failure_threshold: int = 3, backoff_sec: int = 60): + self.name = name + self.failure_threshold = failure_threshold + self.backoff_sec = backoff_sec + self.state = self.CLOSED + self.consecutive_failures = 0 + self.last_failure_time = 0.0 + + def allow_request(self) -> bool: + if self.state == self.CLOSED: + return True + if self.state == self.OPEN: + if time.time() - self.last_failure_time >= self.backoff_sec: + self.state = self.HALF_OPEN + logger.info("Circuit %s half-open, trying one request", self.name) + return True + return False + # HALF_OPEN — allow one probe + return True + + def record_success(self): + if self.state != self.CLOSED: + logger.info("Circuit %s closed (recovered)", self.name) + self.state = self.CLOSED + self.consecutive_failures = 0 + + def record_failure(self): + self.consecutive_failures += 1 + self.last_failure_time = time.time() + if self.consecutive_failures >= self.failure_threshold: + if self.state != self.OPEN: + logger.warning( + "Circuit %s OPEN after %d failures, backing off %ds", + self.name, self.consecutive_failures, self.backoff_sec, + ) + self.state = self.OPEN diff --git a/observability/local/log-sentinel/config.py b/observability/local/log-sentinel/config.py new file mode 100644 index 0000000..70997e2 --- /dev/null +++ b/observability/local/log-sentinel/config.py @@ -0,0 +1,25 @@ +"""Configuration from environment variables.""" + +import os + + +class Config: + def __init__(self): + self.loki_url = os.environ.get("LOKI_URL", "http://loki:3100") + self.grafana_url = os.environ.get("GRAFANA_URL", "http://grafana:3000") + self.grafana_user = os.environ.get("GRAFANA_USER", "admin") + self.grafana_password = os.environ.get("GRAFANA_PASSWORD", "admin") + self.ollama_url = os.environ.get("OLLAMA_URL", "http://host.docker.internal:11434") + self.ollama_model_fast = os.environ.get("OLLAMA_MODEL_FAST", "deepseek-r1:8b") + self.ollama_model_deep = os.environ.get("OLLAMA_MODEL_DEEP", "llama3.3:70b-instruct-q4_K_M") + self.poll_interval_sec = int(os.environ.get("SENTINEL_POLL_INTERVAL_SEC", "60")) + self.lookback_sec = int(os.environ.get("SENTINEL_LOOKBACK_SEC", "300")) + self.t2_enabled = os.environ.get("SENTINEL_T2_ENABLED", "true").lower() == "true" + self.t2_proactive_interval_sec = int(os.environ.get("SENTINEL_T2_PROACTIVE_INTERVAL_SEC", "300")) + self.dedup_window_sec = int(os.environ.get("SENTINEL_DEDUP_WINDOW_SEC", "300")) + self.env_label = os.environ.get("SIMSTEWARD_LOG_ENV", "local") + self.sentry_dsn = os.environ.get("SENTINEL_SENTRY_DSN", "") + + @classmethod + def from_env(cls): + return cls() diff --git a/observability/local/log-sentinel/detectors/__init__.py b/observability/local/log-sentinel/detectors/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/observability/local/log-sentinel/detectors/__pycache__/__init__.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cb6326ee76023d9ec9633d4ad878aafc2afa1af GIT binary patch literal 196 zcmey&%ge<81P3M^%>>bpK?DpiLK&Y~fQ+dO=?t2Tek&P@n1H;`AgNo)&Q>v@#i>Qb zG3A+gB^fa(sbw+6nYp^fC8_0!MJXV5Mro36K~8CUW?oEw5>RPbVp3*KW=UmCPJVJ? z4v*B~1|oII{Q}8q>wGH_pOl4YTXe z7%JuIWR3^IaVX|msj1qBpu6gFZ+YlL`cR3!IZ7q=R-A-%y-K`=!l`Os+HZF45IF9= zBYF0lnQy+C`TqSrKMw@F2-=pre^KTd5c-A;>W!}v&p!m>9ug5oA|=u%sS`A&1wu0? znG-B#ZJIslI^o7{n|7V#PVkr)P#m=*(Y+msT$(+|IP$PZUsgEm(~z9A7R^$*I1oHc|@)=&zXFp*KzB9;Z5F=SoH zXgMKU&~!OlG?Zz1WA#eLFl4Oh$*1HF3Ci;2_OL^kafU1=`O z6J~far~{tt5jpJ5^9hgDc+7sukn@oGC7u$ySPjlArP?bkCUfPA<;s$KSZ*vEMXY6M zV63+w5Jb(-S159y9;DGJY6R%ln@An3&1y`F*;QN4fvgnWfyQd4f)TU9$EKJl`oN{r zJxHgc==fHIG}l;N11ROHbJnG0K-#HBkl}itKD7-Y9}z^d*Ewjcy;D#HVFEry0;r}2M^Y) zB8HNT7#1U9^j*IGRsoHL>%61;Q>Y#of~|X<7C^t6YXT)=r!B9pOO6ldPl>8Ak(>YY zK52hFns>xJIco7)tQbl*qgw2>4Ax39$Eh{~fP{xoVv_qhOSG`dqIxVUQDTL{gis3C zLP@uo<(#nVDKSujv0O&M0Ev9f`*e#XVut?n_H}T>{&roN>edYz&das$7367IGy2=^ss%&w3acU}qXW$heKVKk}Jgwwp zEh}3MvJTIJdC4e9#(vAE7V=kB=_(0Gh#n>{8y2I=n&p!eUDAu$tgP#n8*V6TIZSkB zv07$I#;t^O;X+Z%SrLgu@?~jLu1I(-R+rTYOzaD9b`pSYaW(bhEo7d`VDNind5KP7 zdr>oR942rxbUJY`VWU~fKwmng%UDleQ#4~Tos*~2o0LZA$>MnT6}6aGv~*z{Sf?}N ziVCVps|9#9^!aWb^r~pG3foEID}cC~l52KJUYW7n&Z5=q*nQ~LWRi4NA;Ba4ztGJh zv%RCtn~|;ai93nP@LD8M=Kc}fVn&SrFn#Gzi|J;leR9iBl%e9T*xgR`+dVRj(uy(x)*gM zOhJ6MEzlNufdb*^3jukWDu)(xf8%?f1h;}qWRYf;!r^EZm zxKWaHBLjgc0RUlTya*r#yY|D{bNC_LLplPqc1Qe2-vQl+$n-VMf+AzH$Jg0*SG;m& z9l>XbUHaLVfqxMazgyo$PSs=_fN^$ns%K$>MDp{8(A`6jx;C%E9BCr857m>EM*RR- z(Dl@%(VLPtJw{HjTV2BD)c+cg&|^eHZ|06d3Z+LjQgxjVp@TIN;NUny9VpKxT&FGf zgd(ds2+ex5vj#58=MpYVbZ2=LUC}`8Bzv&qg@S4^U=+)x7Ou%S!Qeh%!iNbbGo4X& zq9TY{#}u7pjdZ>6(?#eYRs!MMqc=zACKu?nz>e7=(;K?I`{wSs^K0JL*@4Ggcy7lh z#v`tIm1{PGk+RpB(I0V5t6USz+@85PbL)C#Vl~)NbeOYTu` zBvNhD(U9+<^rAGVCp0#N0CztlxSMjl(U{C8W4crBH}bHet8Okpx*y`=ME)gLFQajr z4F5f(&qV;I^}bO+Y`|bZa!)sSqsE?snG^?i_W=D1d&O&G`~c9@2uaSbwXqEh2ZtbW z@mdpoqCdsJy=`RfWyOXRbC`y4&_?P|uM76Q0p~{#h~Xqna9E6OM&i0=PyNxUPvGv( z4F~^UZkVU?RHCtTc+e?(d-uGlz|{$o8(&3jS&OsQV#MkSEQcN(EDX6|Tp{Y8P zpe=t5r|e5uzD>*LFw}zxZ-*}7wxiY#I^0EOiCer(tO*{!8Yxx)qFK&duGK}BzczHh z_+@ar>|=G}M0H4ZVmv|Qldd0r`XIc07Cni@XHS{Y*zE8(p{-@s3^&fl@5C#+7T#M6 z?<~8`=(hR3JAIYOMP@C!w;JhQJi9pkMd?9lxpO75)-za5{-hcjD!cw3++I2GC@8E3 z1<2iN!A>*Oyg;pn+CKI_5jy7rW~=bAf8C8j&6TTb!1EFmI|-_pEj!HS{Y(8zJu8uw z;gur<3E%2mm|E?8XK8Zz#KYd}`Oxj)&ERi1v$19V!kr6dW0M)%z1Y9lvm99- zUOrNdoG`bvRHi`o;^fkeC9OI*T8*6t?njMXtBqY|SMO@$S+l9tjGu%9w|T!q9*=LG zLHss5sU?CICrP_e~DMc^6x{%;dqb%cD5mEvPB2oGNc;X08BC6Yz592X)5luE%QYqB==MqW` zrU21*=t?vxznGTkl|zx}v>Y{!HOtaz{2c1n$t0l`I1#I4p`2u;Hi@Hl$u{Yb?34VY zfXYKQ+%@dZ2|MQ}UDyV@Uhg&c?%t%=WuA=Wt~l1idcB+UdRcFkR7HJpU(jzRY@dx%Wp^xX`L_n`l@|8nzHQS@4PV83>tpa=5UzNI;`__GKy?H2AWvf;e&Tu{oSyqhakP zLGHNWi6$fIm>fpW(+qE+#?DC#+6^u)>qa1q&T&4x?x+RznotX z^khuFIH@IO`ZZm?6rnMcr_<;9Q^|BZp-ighK=oqeTq2p!uS_P@Xe0?`yk7&?NGNg= zh9mq`m~P!I22xiHhuL8?9@ywb17=SZSfsrQ#Z{8)>Yf#H{`wovYtHPzsy{eu&s8_v zXu8&v-M?DBeb#Yb@O&b4uL#|9p*zBG4r?7NLdTr_jEz`69+eGJeL~RlGJ> zm)R~wW(t9Q*YI{xPt3@gm{3Gjkws0`w1ldN{m+Qtz{-H(oL1HIVf{)FyeU=F z`wFd|ElLsFxJOiKD^_to;Bz;{m#hVZ&82^(Vhd zID16H0{<^(1gT(CMi2opNjWB_(f105|M}_%B3Pj%&Lw5WPC<@#K{xHj`CT|797Q7f za_{SrRBE)#3@H5o)qVgU;42|Xzryq2DsiO?&C5RK?^Tt!K~n25!#32a7f@mSeOvQ24*EeWB+u;FVmI1LWm zhP}WexMvOhjfE;KXba9)3xyHEHQ!;Nj%wq^ItH?7tT&K_FVZCb4z1YJ+Z zTx`z0>gijstax_a_4KWINquwPPqvJF(zSP`YwyQhqbr{Fw~x(A+4`KXE^B@F^_;)) zhVPp1Lq6BmdF#y0Gr6`cxz=3^-3tv17Z&Rm`Na#%%?EPrJ#*TeJimM4@WRMq_2Tei z@A82&%dIctpjU3ba=#~-+ZxREY+vJT?G1Ss>HL;uE#T>_`6uyI*Z#v!wmd`IVY80~ zHiQ}rZ1+HbG|W;6WIcsIRtj{`U3egj6^GU2ki_-6oWDY z4;?{?VaKrnJMP(fwaQQA#IpHSIf4TqXA#0<&%t41ty# zm#psrNduhInve_AnaU z2|nyFJw5^lJ|Ts8MzQ-?yS;>j!%(+XqT5h8g#qCfh`9o>mJu`Th>>!7LdWERSpon_TbDOGYXJRv1J2RYEeOl!jfq+9v?T9qDQCkepS}5rq)3yf-w|A2_TUT2g=P5?xl;^;R>4+vOYMCuMn83mTquQXAj-eSy zyI&6dPb}NTj4jb?0nh?47z71Ij$9-bqeD0?zT1uX7EFAj>$5}@C>W1ThI~T<<5F_W z1pCvdfW~PU2u;KniLf48jGj#~|JZfm2u|^PC;;_aw$AeRT04F)Hp_or*Ia=Ki@{+2 z;A&vU?2$j!wjd`0KL&bNeZ9HvZCPQ}- z_B^|0=YZ-2(s|mlR_{itb2F;z82BpHLFQ^w-82gw6~cy@!~X=S?`(;lD#LgE9$H3* z6@e~UC9V`kAk;w6DCW{nLEw;wtRZ$2U|@b0z|2+3u2HYJlKB4u-Y&uV0C#?n10u!T z+llo-yeC%zKWJ~Vdza)c?n&}MPVSYe9)tgfg`6C|4`iB5@W$knJ}o{o3fv1BctV*{ z2Ml*vmx;42@RH~OC=45_WSS4d9d^U#m~CvTQNt3H3eWO;`xK~Zzk~vE)V>`*8=G}AVBV{1%f@a^-<+O%bG3bVwQgi~ zELXzjH&z292(G%U<=ss7@^br*h1c)49|W?-EOV}LS=@EU|70cDwT>*L7vhUAE;sMb zwRg`=%pF}G7+aE;PA|8PmtnAXBMgffC%|w$<8%Yy84PbjLgP1tFuNgz!-gvyo>61z zqztkr4A+N960ZhO^M%7x399Ls_$z8Syq@+@JVd+^Xu26NTUi~yyRo#NVwyuS?ooES z_2DptO+66}M|8-=Ak@nm%y$YEFbg(c1kxtO0xzuEdr*9r+HRwuRHb@IFE@-`HfYoj-9|3cBUm6?eM zjY8-uJvNXqOg%(TzE0Z9K1+m1F<5kyPIA^-9CH@OuxH51lL(9!Ne78rrpPGQL8dH0 z>v5L*204QR7#Pk|a62BORHcUNrARVu%J4OlyBJAoXiVYe^-T~d7H`3yhBSPY1pZffH4t~GX@CNd2mzTWj7r9#&kvksr8hkh$ zQ={My?x+g4;;0VycFnw9;s%VW;(b*cI0qkGsT~^~;8Su^W8Pu7*_|}ZzBn@1VQ-nf z0*cJc4US+4`%I5~h9cl_W7-ZVz($MZQ*!KYr0=h!_cPM`8R>oGxL~o=W<8Gx{^qAS Po2B~w=obWj*xULKX}?@o literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/base.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/base.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..987099bce2a6cc9e57d3bfbe29f7830afcccb938 GIT binary patch literal 1032 zcmY*XO=}cE5bgQiO?IP_7&IUZ29Z5vLDY+gU=o5P5YbV*3^cv7y_1p2Oswun*qm}R zM-c*EJ$ln0;l-;LZ4d+z@fKLUTQ#$S_TlxbuI}opS2gK&XBe&zFW+acEyli)@ppMs zGTA_K!XgGNvLbuS+O)ymbifJ8cDCG254@VYTmEJMLC7w!%PjJiSmej{O{a+itu*Mh z4wqMjlHpKlndlP2tk4ohB9Y+;tw))J@LK;Tz3AD-xx2b%=EOj05Q*NEdZ!%rEE8;G z#V{*Uq-WN~NiH8$62@yH*^x=}SA+i4m^?yq!eRy%7GXygI9FI?gByFXA30ai8@UkF zmPc0H!o=HA5YNQ3q{0n1YAvy+?oH&hpcA#Bo6huRO^3=^pH}m;jZWhZ@W{A@*pPu1N#ldm6%>hGM{5ApXQb4`ML1!p)w2&*A6CUws;n%+yiBts6sgDx zrNcVZFnmU}hagpzYt;`Q){(1d6i~GTn_N+z8GYArpIw!CL%{nSClMtR>&NmH~myG72n?* z8^1X+^ZyFeZw|YZNPuv}PJPzyOpr1-hp^q7GcC?ZJI>7v=ey;w%87S4e^!Zn>N&&t zCRSv78*5E=%U|%R0SFc&1~IfUiZ8%qeJq}KV_DW`*89Phzq6$;?CLK+usScU|7N(=$@~G6D-4A1M0o`aW4bBer0cc1r6da;*9ohXGkPaJXHoa-=c+rU1~mt)3U91}-3+}L08rvtU|lcFx> zb#_7)ss)jqP=RQorb(*8%2H9x&*o*36>o})uCanrfS+0xWJ%P99uu=^%53?bpoz8_ z6~owfT2cyiruMM5^;XsK=`^|P6H)kVmfOyP+w7Dy9?TL4`W729xGd+2N!}MZNuVN+d zvY}WMT#tyWwu)|o=O9vCS!#;fCgHFgb=a9z=;uhdniB45$NdN$cJvib)*VG>&mp9E zv)&z7pgH$p2bRLIu#eco3m1)9jEDY}g z@82;>f&ZhPCV6T-&&i8uL$>i&g4rM!8-E`>4e^_9ZR!WP_Q4=RTt_n|EiV{{-CQTv zMRw1{vn@dPWP?DvM}0fTBO3rjiDx5nhPnvF0O)rtuOzLEhVuh6uYMEXfZQ8)4rl#v z!XTd&=4&Sbvg69TufX}W&P5cgzF6k%=Jvvl8Q3K|O7FZ&&XZ+^sb{k8qJz-252br9 znGvT_r(Dr=^HMpfV9_1TJOA~WC=GhQmxLKRih2Z_*NRTE8LRe4KMZz;8q zBQvP!0@h8pq!iU!?2>q^QRJQP<7_Qqm(QomzNID>#@VzBw?VGp2$0&*9}AVr@stg1 zupr(Xl+{uR%6uxr=GAgVRiH&2Pc?Uu`YHPp_J*b^X`jgytR|yklhL4+U`d}fJ-VdJ zfLg1T%L1M?nY^HjC6#dEO{pL%dC?5Q5^u}=+oXTgf)y+dl9hzKUh9}p6h{XbtcKpEBf(?&}O2Lde1HMqj0x`^&H3>Z`BkA@(j-bb+ z@|*5k0#<5~3#w9TS~)Q?Q)4)%b=DYG7Bn5!snIiRe~m6{X~&&raGPP%t?6o|7CKd~ z=(7z|wG0bDK=+oU&#***B{`t6*Q+`^%r+wKbYZ2`1j2bjy+= znr?y(R+kE9o8vl1iXgW$XfLtvnNh1)(?YN zquFiei4(5IydUAL@c4*FTMA{eq3;v~{^+$A(cOzj7mhBSUuj#9W?%7~!oLTj+ca6_Y zeo3sYhkK1k=VEvv{E^Q{^xV68_o|V2$><(ljjy)UhsnMb_ovJ!O#RU5_5L&K$+Po8Bbr?9T8|!h*xj={uM_Gm^tb>cGnQN@jItt-4nFJ@?t3&qDR=l{$NsNF_c@ ze0gBZ7&x-&p;Pe<2JPpm&Gz8_j;~O#efK{-C^oQ>B>U|6AZbLJ$R0#SHm51np4|Eo zg<`#P6MqS}&1v)6=erN@)vQl1xdC~{CKL=Ci#5G``=#3O`|E0#sb1zGC_@(W}H_uM{d zgk!hQZn!T{p3bGq8;HDD-K%f@F7%bf+a@e+ayatN^%q!#NR_d!&8LC6gOUh*={#A~ zxPt6MJ!ral*`6jk)-&evApx{mv=^1AB05g*MMWy@svU#^51m411@@*OS4F6LP_p$5 zYhT=#lP1;8{|0o+Qy4?T34EVuTRi+9p~03$;IXzphzBqs z)9};!fZRqKZraoOFx)mj`pb9b!vA^L(*Ay}9!zZ^%G3HqII=;*`@b4-!_NaD^s5L< z-=kRiQaWJzc)p7fK6rTODcm0$95Ig*|n=}Y46{nP-mhc-wPy{NJY&I#vXyw=wv#{PZGix9z zRXJ5g97==`IrPAhiXP~#R6VqnD$%M3r$Mb5ft0G^kXvY|DsFwV8;6v3qSzazjHDL5tq_!5Ro5^amf*8;t$7dtszb@^!M zNSK6uJ#@Oq>x8F{A^xFeB*nu<(sJ%l}+^E6C|OEg-t&S9!qjz+EG zHtOMvS(4Xmw?Na2d|OIj%Ai|zJWN21AIwr5?05NCIczz3%PD}`G&q&B@rg1fQ-j&u z8Jr6a4diM>eK5X>GKdHem8c6u(nTWc5((*YCai}tN=D_2`(%#(tV|-hLZTcuYaQZ% zb&j`=V3BjkVtyp19|@DPr4kF}I067pNRGlYegzM*wD6b+zNDtMmjshsc{oeozz^E&ZdR)|1M}^r?{}%{35knk_ z{MW;&SbfewBk{WavNu#cV#tk1kM5BCK4vI-oWl#`9uaH}t8Iz;-yqdBs|X` z4|(OlD8WZ@&n=lNi0E|cL&(&<-D|3@`%TTp7qGpzduU|j_(=Ch+GXu5b)A&T)ZlQJ zFqOg*Kt9Dnp5@sP8Z8%#SuzCyWj$PQxrQ%TdFX$J9)wxW3rPiVzLR;RGi5a+mnPR4O1tB`ll=$ z@E$}eV5ZgsB}rIF*88jLXqZrBLXL75+M32D2m3Py#e`;nVDC&Ok1u4_5XAK}<%w-2 zyIioGj5`5KFJvby8|cW`E<_5lu#IBJvm9*0cK*=0;BL(ekuFWKaA0Ab{+<8pNl6DL zp2D;R?(+(&b@x=%T1&^R&Y8~H-SaJ5D&fb?ZPyRqh*d%hvF7XBukEU}ZKxdoeb@f^ zwgX>x+#9$xG&3}tx_$E3_TKv)6}8%S;Bn%$*-P_@t-mGu<`RAPPRu9Xsl@r_gV%P~ z5?xn^7ZRH)!wYT6*@2nRHT+)=8nZ`BeT zuNSJD_I-8v!NC2GADo}te`0RmXmxa~I-aZM@!a@iwQy!`+^UY9t)8{3?M3e9^vvmn z&et9dk3Q6lpW@%i-wJn5JnY*2WBeD*covd68kbbGv8R?Ce3poJMxUd2B(}5>H76@g zFaF$)+6ROe%RQ)TOrZSbxVvF+gZ%yO=0R=!=jd1}##GbfDPo!|YMMniU$(g(HzC!s z_KKy+G$$=WJsU2^aZQtK22XxckuYdXwk_)U`9^yAc%5ldFAFtlW<8Hs6J-xm*tH#O z%kUwR=0dFt{x6_!fcON}V#zCqYEA7|hL>b5l$<@jgrK`8-y8q3;cu>zck8>3rJ_tR z&Agifys?~X+mJGr>(HPi8dq~DJpsQyBEvowh5)fmgVKok#bo+_96yc#5l0!E!cj7H vPe95(vJZy*ouC?sB}ov3M=1Riz5Wz!d4kqIL0g`OdxYfd#=j7BzK4GRwn`-6 literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/flow_gap.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/flow_gap.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6455acafa4dd83ccbd617fe81b2ed908b5fa01ae GIT binary patch literal 2892 zcma)8VN4rG7M@-2+KX*$8iLO?2`pR!xGm<%K|DAK#4^$lL7Je66T(@Mh$^Q8 z6M?f5k$hU3kk1B5P(?}9i!?cfv|wHw4%F8mp;9QVERKv}7Z+R=Un|287YAo8=c-yV z%2b_MP>a~YCBrrClIl9DJBQVAV4t!}rj0W{@rBX?i=H$nt}o*d7>CA9y9g>k#zz+l z7Cv9WWMR}O%;7?P?|T2pnGqN-qdX!4I3#KU31}jbv;dJcDTPQ-lk*`kUCFm_m7|gt zN}hZ7Ux!-Hp)#Seo{b=DcltuTwwvDTrP<9UHzWZJPr@qpj$>4+bn;Y zcO`IXYDHiXUmN?brbQ-EY|WBZ5TIOHRas$lB%h<0(EL@?cIWa%d?ioK`9bR9s|G1@ zdag1vShgwqF0O=(=y$KyyX-O3;NO^#WsW&TcFO*!GYzPW@pL^Owo1IHeFq3 z$z48Xnw%)Ni937)d7XXVD9HyvBPJJkz*&%Bw?gd4yl)4-@BQDQUq(a77IQ*Y;A)RI zO&D&>+JRi49la${aR>s?j?Q!<q21N8%{19DpOc$=*uh+|9i0}=%pqila-nQ+hggI3`5N#Q;PXK3 zYFYojJwExmu+uuYO_JS`6SMsMceP9L<^*nzUCC+&EO=X=Fz@3Yxszo&R| zV2|COIg*#Uo7!Np+N8Jt*jF$;evx**(@GTJ+VHrnU-MJo#4regO4e zo#OX`iYk1zG>g)$Q!GfSZdewR@fCm$wtvo{u1+v@Dx`pQYRs1{%p^b{doj!*gJb!K zTH%?i{_Dn{)p?Urj^~T9Qw_*h0kO*JV!LWPt~%>f?4oMgDn}tTEi;J|EXsSmXwtG} zEU;)}5uIzY*5=4EX0QcA!v*9i`b;{$nEC+F_x0W#(AfJ2_4n#!>e%TPrZjPnDHJ>+ zfR0RdO&6R9(aQY1K>!2`hKoxMr|=cCi0uMqF-!qMVP1C~-96GUu;AD(zUHzBK3u9v!mf|qd;!C%fEtr{jUzl|Srkb9(e6Po7u!W5ah7Bo^O!{&j_dwN^1YVp z-bRXG2rDYuP2@}mClPr_Eq|0C7J-k5EVG@G@;m8Yhsa_u`3&TO`{y9=6U>2Uo zh7D&kTd`QAVJIvMa@zy&#V8XM)}b7>)JKhQ9Ad$GI9Y-p{pszBn5oZm8Tt`mI@F5Z zLraR+k-9bg@$~I$8yy2n;jJL*eQBfP;$Ks@Ppno}Dr*=1_Tl=83)P_uw~TwKrSN*k z#UJC(d+nV|=e|#*YKgvTqOX=1swRfk!W)U#mLmU29bP)?{i^5I_2ug~177FhyT|Vw zuXVms?R=%ydE)cm-c-D{H}K-BvZAa;Rw7>drPbt0a!0btPcPRp2!4CvTtIMDSaa)-L>`^&uzgi*Ws^w8BEVUUUjzb*y=} zX=VnVhy-36kfGodwrKr&Mf@A8`%jg+*}f5@Ah>N9Q;yXG@dh~q4E`da@Q^^80YMPH bMZfz2^?!#_-=Y3rf&)VGruAP0KVSX-|J;`X literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/incident_anomaly.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/incident_anomaly.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c71d21d2f6c1def99f562d90ddd759beaff2a711 GIT binary patch literal 4586 zcmbUlTTC3+_3qB>`-#~#yVwiM!!|1$f_OAGHAxH@8yq0r@dj)*nhv|OF!3@=?#$YO zJVZYl;pW4(lE_%K6x`&4Djy|MrMgl-(n^*3xhtA5xpk_jPx)^nyKR)Oo;$-VgpyXx z6`cDx=iYl>_nb5P$meq-XivWNTd6sK(4XnVZ1(NJqc>pi4&o3-oQ1Pqu#8%|J=Sv5GBG6;!{5mc;>rMoUX8<)Ms$xcsOL9iain1CPn6R zp_o^*QbtW!h<89xMB|nMhPh){lCzRL3&X(RVkR$suPEZhK_N3IX6R97qkPl$Xb8YN zD2=cM)Mn)@nBlCLKHx=UWkyZPVL!2Ar+B%bv2+o5yNl zl}(ze+PMIw>?Um-jU7eEgW{AD{9|wH-Cl^JX_twcdM2s+vN<>U-fdZB$J9d7`*j?h z_5BJV;rmpE{WzMY+S6MKC4=U8r{}C3lXCbG#{zzz+W}+iw&@i1Z=`T^)7=MlPBlls z@jhpA&JHW*;@q6)lygU4(_wR$sT`pnS>r6gV?kc6V->Sd(V7m_?g;A`>fM|^0_5f!xYVO(TO)0_S zeuhvHZdmwWY9k4p?QOb+hrhLjYq$d`H`uW@0# z_F8J}GD5)V;k|qszDVEG*p+Y1t@VF!3;h3AZvVrt#y;G(0=)4=bewnJUwny$*c>MoWhqt-(n>t5M{O}1zb2`3fEVIRP6Z}%wr~w%u}6>Fe1=A3 z;x?9fMa&E@^}Yfjt-o23=Gzrjye8mm^NF~46(WCsv&jo@p5FXg{MGo2NYkvKDgD$W+@NW4nZNk$}oQOO99Dnwo_@M;(F)@in0WFzNm6b?4!4V9DNKq|>yPfkX|yMdqGDo`RB>J*;i2Tn^JkO%dPH5iHjfv|Mch70j*t^j?Q7zi2McUS*>z<9C`|Q@qvGU2W(v#!A6G{i4(+;(+ z9xojk_^4-VaH>2wRT{Wdx+Ih$Gjwn4PE1R5Z4FG8;V(L+HMVMpxi4y5G5=%a@&z78 z(6Ns9q!pjez7~$&p1L))I=61!3?E+^*20HwU%qvD&A%CbVk?~9_|7l-KI|(!HNM$B z0n-wf1`==dZp&)!$HzClN7tLm-s2y8kD8J;9N&s{mSdft#JaAJ0cil_>ncaOHUnK+ zFmgL^EAYNUi#5G_>CPoB)}kG5SsPwEx!$!du1{`^mg=9@8jh^x?pXERZ)SH z&QOA9DCnnvR$w}y#fs7g;0>gCV#`BdfaUX_*F{TgNIO`!JfR(KD6vrkR@l?F$m&D| zLASwfOn&J57abbM>L;G0$JPKC&(IiZHp-n|uO5KacMxQ=-PmoaUA3wuWug0LQ3Kjl zl~!xnR@8*1q4G7_G(BB!Jyr)2;GQ^YL=$Wd^)rnqXK6sR;)5?9R<=R~WKDWZ`kj6n zY8a!Xs<$Mob)<=d?;Do0nbntB3DL?rNoC&C%xXvHwpuCfa5#%h4iolq;;KeV48a7! zM~gEH$&9WN%jG5e@y^b^&d!7z(?mmTxqLx@jQhNhFN$X|hHUA&xVT^p@hBzn<#1tM zQVO{Oo)=WyN!J`$gzT3Q@d%x{&O(A(px;&r7E{X+1`F4W-PWwCPz}@YYj{ioW|)E` z{3|~M01?maztMKR?dEqk-HpqGn#28rSF}KF#md-x4+5c^+?z`!XY|Vl-h+2tB~SAs zWU=`^b$Tld^j}q2n=SO2^N8jPZ~0ovzLxg~*O~XuZ?*Q6TYEOR`)!-8gPXpg6)S|v z8?Rn}_088y&e)d^JRy2a=u@}`jtPCG#DJ6A?q>9pU@!A?OE2@wr_Y>1zdq$2urt50 zSpX6jO`KvDaw(>T?AN}es-AftFw`@v^)mH83ulqxLr`X>tNyynACc(+W)F2O7+q$k zSXvoRF%-7L14~)@-i9xh5RBlHlGe*I^9?k#k_5VGx>avettp0{0sCNX`krhZdvaHe z$BgSvgIj?l!4Ml3@!sV89MgZe%4}%tmYph+E<>l`YfTFLlp$wYHID`o%CZmti)ZG@Y>LN_}+yIg5iBm N?>^%GL1()3{|1^{F`57X literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/mcp_health.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/mcp_health.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24d884813870cf23973529773e8a97ec32bbe16a GIT binary patch literal 4560 zcmbUlTWs6b^@^n4l10B|MUE-ij;$tAk|s{`UfZ~hA9b9DRoc|f2w9?SwKVBoQc3N$ ztI>}^#a;=TAPz7f4p1x#wjmy{j|>>Fd~Cor>_dUJh9OcQSkaIC*>l=;{o1*tXuFw> z4ke&-&%HeNyzb#S=V~Pw^dWej`RGsiU)Lh^8JXCRvx=<9fLumA!iab9)SzR4!W2hv zdXOGqFk|A(pmV^5T_)}vbPss2heKf$N4zV7c=ssNLtFf?m-meNc<-ohFT?vH=nV$@ zVHVg+@qTj_oCqX>v)#R-CT2Bm=-5e4Qxu6C&*Y^77F8}I=QveT&U4v}B&n%Kgl)oM zxQ{6XSraj!f#)--Xzf?X-2Gx+&gJC^Ktso7vXc06LBuo1GTBKnYbj;HRg3q+{4yFv z*a22kyaUrbg&Cg4PM#Tc@y=2AsD}`P1z;Um;N{)eH{nTmjoP8?bf1`!v`JfmN5sX3 zZz?-2OqwqYTA5Z2XO_q}Tv*f!Sk6+G3MPb;K*U=sb0Fk0+KFT;?KtIFM^Qz2M^ANC zrh$qXsrne9o+^dxgmqV%u19^pKuC6{-5c_uc)F*mf$T|pPT8JVCz5AuS>4su9PAiM zBD4ul=Q^I=5x31M?XmgDz7g;0>c)DxY+8Gi{b}&^A=s7)AoSYnN2ntdQI_{4g9xd# zN_D{uXuJns0~+vTnmOgSrL2u#aBOl~z(t#SphxX9s$nK%i=v3Vr zg^i^HX{H{X4yx2nq*8F=4G1ZpZgSqW_IaPAhMFvER7wu=bt#Ik=NtIO1FPr6q2Wt} zbKg%Pf@;2_6-|6|y5`$-sX==vKK%dDZQ_GXWZ0*@3GbG*SlxsovPQvgNzve;=z(=H zG<3o8VcG?L`Z4$^v@W(o8`}N9vCV#G-?pYVmO=5W=WP0RYnp{fYU6zXxjoKJe7Fg( zRUfjONL%lt*akj=_#t0S+G%HyO#gTOD&l-Q-!am-8v9OLJ7VX?cJZAX(eH9Rd6sFa z1HK;)esXXeCAOU~0$C|j$cX|eM5^H>gCJ+7L<1R4lGg>pJE&9$O+Z!ZSd=LAlJCbyUo8|D3zTfp9fHCW)LX;&b_|$SDOaGa(x8g8Zhe zoR<@n5v+<3vZS6GPSfUBW)HoSnVvopw;YkoiRY4%GBE)~HNJz(DpS*nEXvxEAH)gy zyWo>#>Z$m9+`HTxsv;*mhR0Gv!G=c_&xv5u46x2?63A8yQ&Sl{1Eyv)aY7-OcrKq4 z<*aDbh$`3)>)?Aqd)n}W)VCzzEmFV@=6nXrv%z(K0gp7Tb_}FHFx?~O4swQ1#8|=B z5&2XR`~WAE&n0L~k}mEfU>ksh8%GJ=O2AeE+6dqXh!M~ZU^c{)4upszQb25tjgxNW zO>WjXrE=XzXM-?klhC$IDpD?C3xmXL2{V|yJg&^LtDCF7u&f}gRc>s?J}t0;@cX|WHp=8@Cs^XTom$fsxCN8QO+5(B+8i7 zPlF-xY+7kBxqMbj)EbN`O5+&HEJWeB)j_Fv3z-H;(Ay1}(LiQT>Sg#yk7g|!jFgu} z)6WJ&J`^}}EB>(Qi3s887&jQg(qITH2$^rkd-5^FV+Tjmz(pNB4+c$+LPUgrMK*f z>aq5s@3V%MVy_;KUh`b_ENm%_Rl>WA{d#@$UVYcy`mU1uUj44S^}Ft~;fM9jSN31r zKR;NitJLo(_C9FZwlGs^+Fd;UAkx0jd!z4qU+HY6<4`4XxH!1vLTp31`MFByIi2mO zu*t>RyX@02R$rtlp_I->@3HN7S=jt{HbGu&nLquHP`l1X?y<4EY;2+96Six)3pGX_ z??5fl`Ijo;L@}uQ!}Gf<{-_>`E;uTon9hc;gf50YcI&ONYiF*W(ObLp=+4Dp>HXXJ za->gh+cB?{leuzKBm`%#o_(0ucfUVfiJksy^v-iPIo-F?h6#CtzU_v*2INgy&-yK_~P)#$Msm}jh5>zdaO(5lBL|O$(xgl z@7(FHq)wER!{x0f_4xkV4Sxv#E_`SEpJyru&Xo7RUgk!~uGZ_V4-?5-;hW*b?Ulq) z{i)Qg?wj3+3|N)*4VL7+Q9MHebe3fZIrVQ@0-3_1lK>zJbc_!Sb$`)+FhX9ly$z_Z+XJ`pPZ+ zdi2b4$kST$7{7qJ1BHML6}l< z1xW-vAV4C{NF;N>EF=iyd8}&WTbC6NDoM~kM7qCuu2&B=%=IlXBMxVHes~FyagkYk^*6zP zn>aphZ?y6|>D05N%GwV}nB=!|62N5yEyMb^RjPFtEXUgAD!qE`;wiJahYXsA>o7^A z9f)T-P{KKT=mjhN+R2+EZsNwVGR2V;BOI=#z@vapiQ1%+JIYOiCqyi#3`!6lksbg; zYmRUW0I1APbsEwaPMI?+n>wdSzE=q;$5{^BPIeF=!B0&9m_rW(jU-Cvdn$G(uD` zpAoXheD+dql!4tGYC{u_add=kL*tGFb;4|K60|{+QsjLFnx+>>Z8m(TGg86A;i^ub z%Sb8_S2b)0px6X}sN|YAhn6VXS^FSVcVW+u-Y$l|dJt&5G+Xw^SCGS5dq2c3(J+3k zk`nfTFNl7^a`ZI^N1sU47;ZtxDOm_xe^!ws$oG{m>*dVGScnYunpFf$fOI2)eB*4J|s!}Syq>(u7~fT9k^ zeU$nfwSA7dK1GpFQP*Ra;BbWJ1CJ3ImwIX1v2TH1LNJu*$7EOvFpk;_*)I?bri}jr DUMv!< literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/plugin_lifecycle.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/plugin_lifecycle.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6fe0db4f5004df2d7f15261b85ed4d525367132 GIT binary patch literal 4987 zcmbtYU2NOd6~2@xiIObKie*>+kxk2TEG3nr)Nxw7&9B`$P12+_qvgly2wJ3Mwlb++ z(#fo5UG}ieMIYKA4dS2$8f+cV0(}r31G0QGtP$AyS~|et^8WNY@s9+PS2p z$c~C-EdgDgd+)jDe)sVFUEK`?dF=s>IzhKwL%~!ib|d#}VqV z13OqkJC8UI)0nns`iSeW8@pK)K{3R+x)JBjI>wx~4D8`NS%zb>Ue25HAhe6-eBI~; z8v6>~q`$m#O46mA&X$Uc1zBN>a$d?U<%$w3os|?_V+BQIv83q&*4d(}mb8&OL~fEY zTMi1EWUp2+^gU0>iYO}uAcK=c#B+-hUYZng^HR=+Vz>3P?SlSgltq|=^Eo&QJ2?lY zIVW~;bk@zevVfZ~17f#@*vokUHIww34?SzaJz7IsgK>ueezVO!3|~Kt=@d#O)0HDw zOgEPFMXcl;HXN%BLQC{@cLX-PjCLT!k)dX&297GpQDfCn#hGy?s-q&v$~fYPb0~mc zaly~cA;kkfCgW^J)6^jqA;p{VHkCf{D3Dn9CLt_^M|LtA#T z$wD)(nqLb00EfHQ6Ce8lsJ3#$-+`hBEHzQp(Y$qOk>m(B@T ztYxI?9GlpgP7gr}DnOblBu&qPq-jDyN;)tJ-Z%_RdGuw7^Lt_qQer=4&$BORssb+y z(4N>vII$#SGNP3ORctaEFkx9=GF`f?7bVl9EiNnwc*$gPf-V(QLP=+3QBrb}*(zx{ zp$Ox=uJZaLrXSF~RODYGdCv^VSjYiXKBp=QWLQZohvXS+h`l!ys||5eM{2_|pd}f1 zt|)8sux$5(oa(;aVrIBDCd%5q9mBO9H;newMmOOos+wLN|9=29ijA4=b66G&5^sG) zJ}<~cNd#Sjie*q;x&5FuZ5seZ;8+!Ja5T_8^#=Y#uWM zqEsrXOFVqm7bR@ARP6yvg1B^t*se)~{VC;+s&O_}KWw7R*ch~AIk?e!*+t^OtS*CP zvQSE$gK|hc$9{g{O}3PDRwyC!x`eAd<&H)ntbnnFv%Ga9TlM3x34I>aKd%(qNss9)NV@3;hnEx)(*VY#6=6ZbU4-=VB>`*Tn40Orpjk25V$zya%wv-D zOeSyFs+#Go7m0X?%!K9zjo)x=vyBWj!Zy8C29VBJpN8y17iC4VgV8WZ(psQl8y@q; z$%$-6ldzTr)6nO$qI5Rf5UL10zc`n&cgm`BzPNgR&epD#CJ7 zxT_$pimiT!AU00sI`1Fdh7EUdD{~e4N z?LEuKKkH{JOs~;9P+_`^9-t#eZ0oYu7#Lb+{uz!gPZ{CJl|vT~t(?3zw-!z=KlxcO zT!|#sf(fH7{vLI+Em>)c8*Py*!HdCnJw|lkozquO8__|ddz;ZaP5RGYecl*;_|K6K zA~%vB&8&?-do%i+F_{tu!xL?Gu(Y?dy+kYec_sEBlkCK(l$+ZKgD*I1Y zde79v-hawrgFR!(|GIw46%d~^;)zPz!0q0lYvNnSFL^%;b*_wj97-62!<9Dnc2E4; ziMRG$V!l|UP)FZ-nL;h>wViAJxX}`R{rIcL-<~$QqVIHF?J~OhjYz68axw7T8 z^Y`m<(CFR0dhWWmx^QD}rF+Kc8(p2gKDoN5vOizxE!0%mH~hPf_s(6X){+yI=;PM6 zp0gAQxGi%b``!f$NAII!Nx#YPJc$rIZ~AzCK@}H^gl^#>V++OVOpxdEGS+nB%!xZiO4XoAf()lG`Ag@q(^n8N#uo!K!hx3hGUS$4CSBZo6TC(6&PJ zOJw+?7p9D0_`;!ex8EZ*f?9|YPhvh8H%hgpguHPzforDx7s)wK(i#6TF-3#PPxnBK?3lzs2?4t^JtH= zALXe&xbeb25%ghNaj@hjSie!!Bp}pKP1bjrAtl?UhikY6*KZ?m$DRpoVuUWyDS0fk zTWlJkdk71thA`GVW8o%CUnzfD)Zs0zC2 z`;kywl%B*GszUFHrIJnJgM=lJ$LfNtsd*JI0IL%W!t`KB+(J&m2MCAv$)Dt;k`9$z zlE&oAnNBR6vsXJrT_YMMslm36?a;%d`5FAQUqA!V*w^}c;?=|(kFWXqFH9O9-_Kt* zg6-=Lrz>zf7=D9$tqigFo7*j&Z+k2L*j+@q0=Jl!btk;PU8h~H@Tbg>5$O0NFnBXC z`0nI;&fgsRB);oreAg;>J+T&_TnkJsJB(oH_4BWuf9;hD6aD74KTO1gZ^1SoCj6}y z1teE|G4$)UG3OtsG3N*S4veD@$9)Ig&cC=QXu+pEJTIy_aHH0GJ{p;WKZYbXMlwmw+8<@Ab?0wx6q^i tLc^b;?oZM1mu{V+A}hr&5qYmG&HxpDV`?42YbA5-#MSJ72x&Rge*uC2;dcN4 literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/resource_health.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/resource_health.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f0b9589b03913415ad9afb07acbdc47eb1e18ce GIT binary patch literal 3968 zcmd5?<0Q@x2|M|LU`Vp43kw@9ZAB=Xh7#D!4kT;S>BPQq9L6)| z&NwvL(xorR6;ibki!f59U8#LY)jp(edE})M4}CB)YQ3YZgt}^_yk#3!RDJ5XGako4 z)E}yqb|l|9bM86ke&^idbMD<~Y6>8DF5US<_7`Dp71VoU)2(EL? zq>H$CO0(zKNjGs@wELWA(o4KN>O}oW@N^@=n{*v#Z5zZV_>!E!jrb5c<`VqfXwgml z87>|u90Q3n*uW`+U(j@eCs@}CB!&4r(dHDK)A_&L{FooVC=T;EoYTniFrUuqS4X#~ zJpi0ra_`c3e52uS7k-^P>q`gTcT@_ z>bc?P5!qHWp(CVvMem+gTu0II+WM+5QQIl7;oDQ1b=Gv=D)%3C+>WgCR{bJ3*8*#} zr=9pWxF>Rs9FIgljm`NvkoD{->)(|G!W6#_LnCkF12@>f-NP>-Jjx0Yp+$(k)e!Fs23TvgJXK%W(>9^~ z8|P{4MNdUfLzaY|%My>1Q);LoOX}Q)ovQP!ENK!$wJgcMa_qc92Uu~kq_ai`#ip0C zq`vllnI)lyeAvyMrUveAmeg$vU3+B-tuKJA`~S|9ecw1=U(OToHQqC2hICzWO1W-& z=d)Nz>t-;OFGzU|jY>7lX1g;v-&+#=LrpG4vPL!~D=W>=y?Ezqj9+0#kIt`n5Ab;` zU$rMGh10xj@GHIp{2_TM!>+VRlO8*W1C;XzWsd3Ka(pE3lh{@@o z0xm3@9wTcg*!1azTuvs-;F)aT476gC!%NvTR#Vt)#yYqPe96!x<6CA>(K6Q*=^Aam zTNi0O+yF$i-Ng!C0@3sse|Dwiyw%kkg$&ag(Kc!%~#(@JV8H=%>Q~3_5KS{Bw`FI6jsX zbxic+^{i?vB-3~)*|39@UMS3s3@eVd=pjs2a8-a+Ho(w8ZS*%qDP9pzlCGf z)_uhbPhcuQ=L27~?JJ(Cws+l|S)C~@thp-fM~Y`Rqx(u{KbgEg`RR>H&+$t1&Ej~S zRUSG~={Z@6zEvE55{~}-!)m1a&yl`Ik-kp{9!CyV!_nJ~tUpB#e%XTBd!EHmD7M+r zT^hUdVR2$J)>S%uNBbSSHu>A|qu7b!J5RzLrT*JDs*%on;nnc3ebwH+PiF7WR(pr4 zJ%`s$u8*&cZg|Vx->L2&UYl7@txatVJs5g8@SykM;^Wz5`AViTyHK9Vmb2H&J%lP> zxqoHz@ayZ`MqlOd+kb3-c)dI`TlxO`k28zqtA$Et=~3_X>cCKyfB(xCe_!+~QP&}n5o^`%@q(;^j*7da?ZoFCUK3&~Uy>DIn>Bi*;mmdlb z&X?a!JWfjGA1zjrSIY^dtQh5v~v(W57>Z9MF zsd&)jB#G7rNiqYHl+)4$h0-AjN}H_IX2Oy*pC!6M|Ng2bN#qEqTh)ZnW3CBxK{u>I zLypt$c1h9=8JdG68wSbF6%4F{)=8>$f)4afLg?w$8t8RGKM2DuR1NmrJW~y~-JIBV z|A6symM&}~`d)XhU;cg5GmExOSlaY2Je6v^LeOewT{5PCxQj$YXESI&O4Ox9wl7^EL^W zG8XElPS@gXFR5RI4hi__M`3_+;|u)kgKD^S+r@g?Hp6Wn3Ab0uT-QG~L$P1_%fbE~ z#CY01gLxL-|J13Mw*oC_H8RNliWy{~_WL9$t)*b2n(KvdbPg_xg!*oJVIQ%g+oeBf z^(e`@ms#6o2mZStXyJNcTTJe4I}5%|CP0LGVxa~s4D%Tp`zspy9Cd$=hMsyaGE8SF Z`4rLj_Poo>w0(q2=~d+`1f;b~{{qDfp`-u+ literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/sentinel_health.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/sentinel_health.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e9563a2310999f06494df59d37ad2d2a322f25b GIT binary patch literal 3486 zcmbssOKcm*b;#W%DSkxB`k17oXrjuniAu69*|4qDc5L}8C-P^_CXNYgm&>8F@p6}) zS;`SlMh}IGAgF^_s0U`9+^H;I9Dn*lEV^SuYS&IrT0Ia(!Ei!NYht2P_Kre)*z ztULPHbAD$&JtEiEn))v)IR znV!c0-AxB7Ds$ecD_Aw=ws~49lvK2^FP1lRnA>!-1EwYPVTa*;=F@B_A8vegM$<^m z#wxDXZM9^80l9T>S=Ve_ua#AslyM1c))Hu}wtxv(0BhVJWAXU;Pio)*K)OQ~*kl~T zFjQtM-nmk)Q`OcjQ>lW%HOpi~tK0fKQ3Agygi?#Lk#7m@@tst#|8bqviNf>-0Ve!j zrf62RW`L{8@tH3}Zbb2<&WBx*JDny#J;qhX*!_5FvRGh*vf^FcwC9RtGGAnRb(GoU zu1d=so~xHeYes!mH;YyYQ0LW>Zs_(x(XcetfO&Q_&>zVCfMdnrQ_Brt88@aVI)tX8 zxP$Vp9vll0;O!vR2Gc#X(T?`w&k~!kL9V3n@Ny%R*mY{|a3v@4TsZ{u5;};?Z~@)m zR4)xvxXtmrQ4nqf;Te2_RSq2u`P*PM#W&J~1KeOWr8m+D0dBCGkvGza0dBCG(Sj&T zlMfJ5-$AGlz7gG~N(;QrZRriLiI<{+y}!y48Mw3~IW{54Z5|Y#5CE53^`#+)3gV%V zoH!8)B*3U3$w^Ow$?qURsvv;`?Qkz&(-Yi(yWJlyNUz)>gX0z-k9tc2$r4-!f8hOssT%#)}5xAo2MjhBr;zn|N#;qR*yzL*pEuDS} z_9|r{1H)By0hb6aQR@yd^*?^BV>As5?dG3j6~9AZuH+RU5T4%+)y_@%A=avI;K_0LN^?NPcopY$ zk`K!3vK#U3?hXWc?X3G_{&5c{{uLzdd#E`yyclbCbpImpd16ueYghkr;nA&!x19Z_ z*9P8Q>pHU-ThF|`vhPX$ao!oZyf!qwmbtQ+TJOj%?|YPgn0JOxtmP)xI^J27*87H6 z3Qul5zU3S|v$pT-THllt%WOnYvip8zEz#dhcK`h9&#wORVlzANDElzm%nmj)!z&-I zOh46}-tlH`V)fGMq;v9`lex}O1|AMHbK|R*ob1Veq@n}uFHkg<-sncj?DFB)vHSkh z_n)4AHsbVNZ1x>kKIr6CCsW#DcI?@JlfAf;S?B#@pPt@LwBE^P?&vC6{ovWT-}d~z z_p7&@YqIn1P3Ptd&pL;voGI#L*cR6jXY7=dJ-vX-}AceiiXZ?FGlGY-}fk9 zE2L|RYTHzYHvr9&@+I|K7P~y8GUx4KXU=m0|Fkgg5q_ zkTT0xHxMkV;_3&#PW_96J}-|A4@s@nN&tuue=S~1^#XS@`V7hHkA&bFHiHFBP}jc)vhs!{i0Xwrpw z)nNQO|5ed_0D9AP0N8$*pxFuwQs;Vl&;5x{KUqxwdp+6npy4EjHc?3G{5svSA;9_{ z#*6Y#;wki52NoWMurQO4yHQ0cTN?PPeJe?hmEaEpWT2uvVMDD*Y@=x=EMHz@ZF+W#VQAr#6i7hfR0 SZrliqq0al-KM^dRPyP!VxNBPg literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/session_quality.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/session_quality.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96008a9e63d3cf78661823319671a42219c3f113 GIT binary patch literal 3700 zcmcgvU2NOd6~3hYCEK(m+p%RUX8aRdOX7HO+|@?YWpQGs@)9p$xwx}BmX;`)4n;bb zR5Ghu8}wn&Pz+13262D_Nr3`Y-@L~G!{CoCXHgFtM4T^V_Twk|+8G^+2 zW63CDHAA~#9NG?)?A+e(f!;$WkxKK_RqB97jpeAR+NkQ`J!7@eEUd-T!-)69&^&eV zw+N|T-rLlko4G?=IN_}|K8u_P<@O?SDlkmm)$oP<|4em4RI}e0BGx^k85*e#*y16ePGw} ztPPQPKOf+Oe25Qo9pgSO%Eh?O>0o`oSj0nQGan!%o+X+5pOAHN-Fy%*9h&rTJr1T` zM~=S;)6y!}H%>Rf5~|mgcoEQqxPAxOxo?0h0jnHl5I4}k#@0rTybztt4K_s=c6TZZ zbvaL6+V}>jy7@5d*H8mfuOkn?5YZYp(hL!x_#HwK#5!j%dGxX!*7aJ;(rtfEmXxe+ zM-41yuEVgtu813gDi)+tYf-(fYPVF@nX?%~Gekv@)r_1ajSiE8YBOTSfc_}tL|G|f zNw>S!r>BJ4pduQQn%NKvy4||26!WqwNEmBax4p8O(@N1=n_@FsVO@cSJHwXf<7_F+ zHoTN#b)8z5&1o2vK(U>506kSr`fQ&A6?XtK93>=1NGFiF(y6zh&7B$4<-!TwkZy@M zJ9ymf*)Zs~%i6XK{*3)8yQXVuGH3_0&UZk=c2I`~kKr2tUSva&?0{Y@6hypX2Qv_7 zUL%}zlWa~#vfCs*Ba-+9LlcZsc0|$gHxy{O(DQ9Ccvnj;^~hGmED6WmG`7BzrAU2V zG{|&)kFxA7*;r*u(dP`t^kkC8-SyDreQ3p{-iva6wXRr8#|#T8I#cqDPvmU6pqKhs z0szLI(_^(9Tk0z4?3l3%nO)VC>NFI^_+mn|J zJ3#hDRt-Bun4F@ChV6$0Nop2*LC^M*8pM6XBw~AYNy*^|84u>%&!}z(+=_r=Qe;)a z49vRWH_~rMNHNyJcFI5}Yp_ordu?_m&Fd1@)3*T4YC0?3Odlu_!mk!rPPh@JwG~jk zDXz%mE|69ompSQYt?=Rh5+FY&BJ6*@hb5>{k ze(G+@9GKh^b~-PVFYF#3d6fHP?V~mG+vm2ww{vuE=kR=a@dcytp+|F{q&`ZSC+9w+ zc7{_s@rCljZhvjG^6oYap4;i4EnlkoQES()7Jsq$;k?zGc-Z@(*XkXxdcIYes*G+$ zw-(LG^V=!2d(n!Ys7RG-Tj#dkHD8)H=dYPPOT-}YAo1tKpf&mu`8)BlHFD}n#2=4V z+Ymcq_5a{Wf=RUPBc_$94x{#gM;$xNpw-@UpSjC?9Iz5c9=`G54J$EZ^^a8s%&}>6 z`ZY72vJNLJBb7tu#M$iu^XwJ#O2+KZ66H4^ylIUd|6}5cH8f_iuRrMs9f|IvP)F>a z-Kf29^X&?^GceI~`Khhz+e7Bm-1dqYf330p!eZLxJYc? z_!Gl6bud;*RG6)I%=lbm^|RZFZN^*_%>K6yu6*gh%3l)jPh<*p^;h4aPJz3qnE578MEj7QwmgkuwDnBM3Pe>jpVDswN0{1k9bYhRGd=94Osz zjv+oyhkrnB`ecoa^Z(po> z=O|y#=3*6*_m+3-+HYFFba;0O$C}iB=HSJWv;uO)+nWP&4;@C!jaeSmFDS$42wJXP z8xdM2HqLXJ4j>V%Su}w1R1Tf-44@p9q%S+4uB6BIkOma~p{Nw43*@_GhhE=UciB%V z+jCP?bmES6;~NAP6T-v4J__VEs?r``$1W4SGx`2c%FMrZTe?3CnUTRgMEN=%GwoFm zyuaEbVZ9e_MZaukJr60?^M4324=$Yt!t^=q6~7EJo+e}l$ic{l$RHtUGC}TFZe+1t zrt=Q>e#s2>J`-B(F%u=B#lFI?fb!*J3-og#t7V|J+8Qlzd*W8OU3R;{{TMb6yART#z%Fz literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/silent_session.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/silent_session.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c420ab6a6e40050772d1e7635805b953010082d7 GIT binary patch literal 2058 zcmZuy-ESL35MSS&&lfw6TWY7ao6u95(70)m3R?98icmkJmc}Ait5mggx|}!8Rp-5Q z_neXfQXUX8PpClER6>Bn3%r#-f_I~UI2Ki^C=~V0Rf>3J_RbC|%$hAj7PMmW{4NQT}oL;eA z{8kgwmD85Ji0uecL^oqR4DuaRMKlI+N=A&zhC~%Zrg1~5sz$t;sA|Iauwtl1bX}o+ zMuH~mTB(ocj1amJMbrcFL<4I+5QSxS!{xHIw8Z1Kz`<3D{U#;06hR4T8mPFljSpf1 z&om-ZB{m2Ae;*@uq^l)zMIP@i*T7dr8byXg6!^ytnWz%r_>nFK zqE$3QJ@Ga|7NCILN?B+Up$2R~6R&7nPrEb?Cu)Xv;wD1k^h-yT(PXinX_67fc#ku*}Uru~2I zTB@h3$i4eBWcc+@37R$rj%z(VAcdPOWoEg`m}$B0xyw8rmK{fYr$fUUH8*RNXM3ue z!?tJ_J?5JfGp|W)Y%;5{DQS@N*dRqC_KtF2L7nyukg6<_z-o1H5(0!u5it^ zd|dYghnEF18}k9oY|90`>3gRCGEcc){f2Aa5Pe%xxm?FSSK(kxYE*&y;R>-Dm<|b6 zhS5<h_USPRJidk}cvNi9xa4}WagP4KVCmEy% zj&WhTn2#Q~dwz6GEv;}hTI569PMoP|wO?TT704@6OzwS-bMp{Cc+3I{Pq_ZRaP}GZVr1 z!5?;v1v>}Z#Y5{m4+Rs`?H%KPX0om3TCP2E?5D!9Kp*=g{d5<~?d{}Iar);%xqY6&{kZ&5`SbHZVff3!y+Tmf8w^f&Bvd#n3Dxku;onF1f0O*W9K3WO z7%M$ZD8&~#Nu-YkL&u&L(!&GKP&%FIyoh$>TbT_A4}FVfODV3IrYHo{A$8x(X8Pi;FDDy=kiD#O01h$8zL&Y%Rit8{xtSUMoJbcNhn0s7>aInjwa!-YwV1}QL5Os9^^O(5yT66`kQ<(Tjd?DB{ hkd7RS#ePAT|3Ld6pyC6x|Cu@&%dfrp7lKy^=^v6`Hy;21 literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/stuck_user.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/stuck_user.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41a8a4f8851b25d4596224a6a4a19fd68bc0edd5 GIT binary patch literal 4322 zcmcIneM}t36`#G``~KqN_%H#p23oi#5E4V&U^^`aY`_N8Th1oolGSpz;7yj@%j}+Q zE{RJjwK_LaV=J*tq)NqYs?h(46t#|2sZOP;QvW(c6E;&vjoKuV|2xZW6XmbI*<(5C zkAG5ibob`%&YRiy-n{v}H@Cy#0D{*2#-Ef6VTArkADrgdW^Vron9E2&7zvEvI?jx{ zu#2ZWdz>BTFlY1Jard|fdu-l)+&k{WJ{~oqHY9kOkl;;m{cL$X>=%5gfZ$IBg+R9# zp*~IsHlgP^91=pQa4P5qF(I6afJCIvBSh@6=t3YK%^iZFVcC?^CU2Oz^b6fNL&kho zGEEt42LF%cH+e%^lzAy_D!RsFIV(%1oZ+#QRWj;Q&rPa9oFT!CJTK)`Goz$Ucnlqp z47t3nj^WvRRM9euwg7B&Xeq7AKgh{=X-G<+meX_vr%_F5yZwD2mr)8~2At#)7|aST z%n2-Z3mo^{RucRcQ;3tN3+!TcAL4FsB-v9@7SRQW>yn5mG=%e@Oy-$ z7pVX5r~Pc5BbP*pdWWy{f=}=#z2M0}G5|CPQuomTMne0&LOAI?n+1+rr z-N%@Tw(U5BGZk-~gp+NEWw0`cXI_!j4D{s}g!vv}2H?Eo1%ik#jTKW#OKLu3g9J~( zV1UoN_;Y!VS2Uh5M&3JE#=U%?0)6=iZz_wj!Jk#k(;$~?0B@I}(gsgec|lf}%5WNI z0ZR16Ih{n)dQLM%MH9~g6zXS5pxldwoSx1<_7Z^5;Wk5A>^4mKtb{XdkMQytSu+o} zm6aE1aGc*ag|d z6v7CnDuzk?VphTitVq3`&B|JaxD``gGze$t*d#G=Q938yclXnKtnhU5)g)MMkI1-Ffmyfj|X9OS)Ioe7l?1ZoMQ~)-3b8O z_n?ESXfmz^Df|!>b%B}zdlOA*F>8=8Rc>c%3<_9$r4e~(AVqVCk%B!lPp2~SnbfYW zqWtOHTz6K@Eht(_p99e|(ww5ghNM&-d<}G=+ko8!HB^{R^Q{icRkBb|c8Pe(1Eleu zq}5X%p^#?$1(M1#vf4TdKC7nTO8ioM<-}%9yx{#JT5r`pRG7NsLEq_rZ+bI6Q1JXM z_E2His@rpA=F-f{={478-NS{WRzu5`{H6SA*V@o#LvOMEvGwZpCw@2Z?!YzqdSY|` zXz}r}Vr;xHa-+IwrSFZ_!VxQ4{n~k}s{T@VtEyw8s$(_rQB~KiAgXIFMNnJUJIz;{ z-`TqnYAK9Zq59XSE=>J3)bd$#?|Rc8#P`L`=83|@r_tsu{=f!*U^9Bas%pFvy%c@h zYqjkA#nW#+ZMAe-&0TACYu+`y-o4&>ox48$XYm8EIGijdPZpa_S$n^=Dqnqi?fGjh z*P_?Y7f(L3byD6qDHoIT#rao?&FASt&%X7n)%i&Axw)<97B}G6qFL>o7BAeY_V26t zH}Y54mSU);b*rs^qpknL>i&DRX#XFy_+bg5mO!;oge@WT$diGLJ(Di7hoJ?=m zL%<_M6}0Q9dU$&oLAKjHYKuVCTB)I!Btz#fxc2UO zY9ER@cQvP;u{Dl4gB2|oT#lA;Z~oB8ITHh%@e;4)00xzGIr&iB;=AOmo<7Y(2GO;Q z5$ETzz6gQ$QC-=kKz zZY$ir5pI8bXpQ~V=vGJHMn~VeaII^zV`wuxTyO!J9h#a*NnhuaM97&S$Hb9${itzoTzeoxF0|uY1|A)oj1c$+6^Z*$qc*+RW8I; z60S_$P)}>@17si~CO~eMPPN-iN0Oy$z&v<%;#qS|9N1N3vH4y>Rp@073Lsj^@Q^%t zJG1jTR3UuX1C=l}dhFP{NzJ155SFwBnXsy?5f-LGSR`P?b{FyQR0)u?<#`jNm4(yy z1PE0RQzF3M(19$YFM=^ET4hyLFOONl(DLw&>iRd0l_%dg|55d~3vMgYuo2n2y60-^ zMx=LrU^8;q3RPVkyf9W6F0m-uxO?!!$l=do4J-b&zN?4U_ieQA-;DMB)PKv1YC20k z6sjw7F&p0ERm3Zb89fa!9Zc)03RxZE+$h&Yk)1T(GY6MpOmBI_1Gp}$M!B#F+7*{* zUp32kP4SX&^bU>3fVIzn4dR4_!o#-sF0>}TL*-QQ6rO__)W^n2AP}((^D#Q|IconL zb$)`HK0%##Jbg^#O6U%PuB0(hCU$YSgrHkVu1>#|xUz1$u|2x#o+=E literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/token_usage.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/token_usage.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7e805eb4c3c8dd84e14ff84794b533124ddb3e8 GIT binary patch literal 3801 zcmcgvO>7&-6`tiT|4C64X_=&cypko^Y(*)yYGw#zK?BP)Y4I;svLV_pfI>g#*V6WiK`6gY0FNJP!*blt`^!!#YSPG4s^Bgic6?Y%i%}=zHfBt)OTnU?bBjc~$;pMaC=BR6TFoXwxmwlhD71 zatJd3+$AuW6DeV&2L+yPu_k24*2)#y{bJliR%`@-#=yf`7N7iXI?+@yD99gH;d_Sn;b!44Z z^JhJRZ|W0-q**}Zy6F9PZ_WFiu`If1CBM_RTfr{`V3lA5O}KJ@XbVmdlq_MoC|8{2J&KEW=zeAUZS17N(9-~aZ|A>J%w*(mF3A)V$hjU%Rk zzz{8jV?Zud_x@U1Ts$^tW6N0BG7GYamAoiDvOA%T~4s1u_#og5Y-IOFKNm7!cq=%9|Aj;Z@464Sg8lNf4($b1^ zFI5BlBGwC)yjzPHhQc|>>ejP-Yr0a?b>6KG;$Q+=h zj3H|#n5=?2eSPS#oCU?cqsJCkh{v8GNh{zxF7+{cie3oDFCc`sQFCyp=4(c~KTUp= zTs_{1{6bN7kxq>iWJX zLr;t+srs}~@4ZAtuid})myx%cL;IWKN1J0)FZ}NQ?p6pvpPzlv(=pKX4eE%-TK%Z2 zdzJk-^*`QbV2*i_3=ed@L}A#3N1c7E%ZwSnWyDLbS4Mt-xdr?pSs`~hyMry2frkNW0GIbEzNNQF1D#EYj*5CPR_|Qgo z&+3`Sd%x_>JigiJK5&t8 z`g?y4e{1pf5|%bSAo=YNS6X4{!*g>E$UW4L=C@|E$ay#op#e0%^&p<7I@UMo@*)Ye zd9(+Wm?ApH?mk{54H1g(o#v?@1t!?c|_Cn!f9 zi{GWhGHGXq5#Lceq2I>mL4*dyI1B_#VVGy=?BCJ8uTko2wC|-yV3@>e?@RdgWD+ga Y#WAr}_e%s#tAk^rck|x>AGH$ipVwcF9smFU literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/tool_patterns.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/tool_patterns.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f3a527a381ccad72115fba0ace5712c57e8af1d GIT binary patch literal 6283 zcmbVQTW}NC89tI$(n_)}wk2CKFTrVmc15A@Bn(`ou9)etrt(xFZBz?&UN+Aw|Sf6hv_Yy|0a zXLR(R`|ba~|NGBltJRF)JN5HFMc;BF^aaTjKYc-XJP5*d#3PJ&4X-_>IjY54j>x)W zx}ywcsLUMGA2nbDmG#GrM_J5r$c>s1Z>U4OafIp7spDbOIO{c~4<8hgLO97Kd9 z2jVF)DPRyQ_J<@v9X(F+#=}uD5*5cmv>%uX$Apt90Z$zWg(rltx+FDS-tTW9e;th= ztbsjic@5U_TFme|tml~#1Fs)3jUnMZtC$TkX%Qbk*E!5hBU?Mm49B-Qz#X6&uO7uZovf5P~rYpMnGt| zv!^gOhoD9A21=R??_brzSW;CG*NkEMjuzTxHh7SWwL%RZXR?P`aX_!J>vL?m`_^(jZaNpsJ+HHS{JTQ7MU|qbZ`*Fc~8W zu`yf$t5(=CRZ-!73d@4^1cgh2n3N=PHXQksi32-E0+N8Gk;_prIWZCuE{#ahNuQJy zE{AZ0$P=kiUm}(okBTGlQD}N8G#ZUXlT#zHcsLY;blfMws8LafL3d(@aTsouqWBY2 zib1VY)}1P&U4FIF2J4dEfaEHYo0_Lt+2OijnlWYk3l8tJUUt>pI6ZSZb7jHRF>QXx zR@`SB^K9d6%U!lpCb_me+m6M~Gdp085FFHS?{4jGErkia&kE|919U%+i z(3xdp3FwB(m_>2tMH4J(&-^d#eK4lT23RL}D#cCj!HQOq@8z!)&8ea;Y#p&#_o7w?a+@-uuj@7!)Io*IV+eyE=J zxXW9UZayJkA2E|v9&0BDb|-StF>cS+o)*7inuy0QPykSnVxm?hh9-rl)0828S48?-925?4ox02#0MYmugoP4_T`=LZ)caw^TK7bYlcvA-# zj!!1yBH+gEx0=YVu1tm#KL4hsA8=Q=^HN;AWmMQAnkcLUz=Z*G0L-Gvn4lP?)Z}Cc zPbq8|y!?2aNW!IPL=eM*Vu4vM#)21#XCCyLFfnFK?nu!|0LIJ(VMb!A_eZ=ITtj*-BZOj;QsJ;5Nx?AT29jbc?zWI;VgP^&LvBl9$c7XX0bC+c z2!<0=Or(zr{zOSB0;>tGBePYJ1$a+Em$ly4U(SJ$KGJcx`K#!kP2OL6r)>|^zb~Ag z>4S20?Tx;fzRa;~)k1a0v-!VO*M917%l7Jb-(Ik{%8vC5j^?Z;@9^GpG|QE)oM+3u z$}OvlmnxB~?uiq*+!@<_cYEI5zToyPIQ_q?o;!EHr$67*ztD4ZVe7HH^VqcMGiNTJq8H)i+b+xO<3d#4Y{c4tQOlQfyw)0X%6=Ja__&+Y2Gd)M^KvfZ8O{OMb=!+pa( zWB-*=u5bMC^v%<9{RWUShi98+PiLjs<8ym*b^GOp4Vkgo{_MHDZ(pus|9s>8q50mA zH|F?}+}SsBL!n$KoO>xkMmT%(?86qX+}0sCcjlY>9&~J-^WHwW(6M*Pq+egNSb-YP zX_h?J#;R|SwX*svD{`&J_2e=)TN{U z<}NMZ7hBp!l>q^DTQqzHZ!JZraof1fYft+J;=rJj6Csi7DPhn)EPz!>NlxVvoNo^o z702R!3Ng3=2F4sD>8cek=K@m^elT%MgI+zw4crSE ziWInyNCYUn1{EQQnotg*n8|eN1Q_lQ;oXo(;hyc+V#<`>y`$*PNVa4#9@$Bw1J$t+CsLO_0Ov8Ih}D(Tpl;!07hK70!2#UpxN65!qA! zq35PY_B6zDBIo zzNkYLb<{#u83%EY)Hu!o9+f9e)L7t7sj-w%?NtPc7>jP1!VqI&%91K(#o>yE;cd$_ z$FPbL!A%gF1LTG%;F_;UZorfW5lVr#OuQK|0v+k6#6(+;;G9_|9$;KF>Rml3&+;Zp zE6f3wkc}=qUlB0w(t>QICAKbod3rMZCU5s^fetzN%I$0kbvRL=4prXj^o}For4mKI zEpek$YBWoE!cI<*z&C?m1Sczsz{ept00S5Uo)Gb`SSbe3L5g`L!cdGU@q!q?EK*xl zs-EqQ3BgQP9aXPb=u6u!{HRhV6JHcmyAvdl2?n5lDYNN zcj69^y)5>_AKpw7V7drzB?+-K+y#kZD{cy0YB1S}yPczCSf>`K+whCPwYQXZOoBbvGtvCNfvDrx$8^-_^;Ebqfw|c3)({^%Wj(6KbM*tzde+z`JP+{CAT`} z&d1X{xtcL zpuFvu21^i7@eFYo)VYEd1q`c#C&29zbXXlQ9WJ87at~PdxS>2+@XK^wgaEA?y~@67 zl60Gqqy_wdFpT2IHEQE54J1rm%n9uPFmHlDqFH1i*!ll3#mrlXyB>mB)_wtV5h-kl zI{dN$%bL{!hN-I^zQ)`9T398pQnE@(Cz?dIB&^XDM<>`sk!kr9I5;UuTkJCi=ddDs ztJ2)d;Olq5!(77GL!aj;Sdq{HzzK+SDIPV#t^gWvtwu6K5_K;r0h`m}8KqU&Cyh@E?QeqS+jao*LhfH6k^T+IcfiZqAc4TmzI*n4e~!#YD8w|*p3FO1?>QPN#C%hM z9P8wYs)zQv`}Wqny><5Dg55jKeA?A}*WLHq?pxj2t(kq9uFU9c<*aVD^Zojdx~Hvq zcc08U-mzb^e_)hd9`cAIyVl87?Q%`yZ1byT&r zw~rlYeZNK`bp1}3^FXKJk7mb#KKCtf7Y>CD21%?J3@YYea55fA#fV%HglI7oE7aJ7 z!LcZok|d%NBEH0DG=(xs~cewcpc40;8~dlZ4F}=H zl)lpm5{x%W8UUw3s`p@tL=xg!q!~zlgk($I)q@Z%UF~0F%zAg`_#%S8IcDy)kE~CK zxai)Xug*4Sleac4A`s`9`Pc416^M&RHI;h%+^O5mAHBYaK>WBncjC3b?0!Pz#a5@@ zo{h|P-#WjDK%77LaqXRfCq!KA@aQeYDiGDGe<2Xn)si(1dd-S47>vZjaBh}xJQh<4 zhD1HFVk^e%l7AFl&M^aB6ayTz5R>SUDHa-+!|MS&iPH0=TVHb~3cE|aoIN-WBLE&F oiQvMbPNUI$ieC8=HGGNMK1X$*qqZl8%NmU(v+HX_{^?5p3jjahmH+?% literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/detectors/__pycache__/websocket_health.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/websocket_health.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24d5c3e979c50c61c51065136f2bd99d1ffbc085 GIT binary patch literal 2856 zcmcguO>7&-6&{kiFgiOPyfEVHoW%CeDKiR=2OiELH2<1y=>FkLLkF}2QW zSDsl)5StV}73!&Q8pASBzy^{-da81&3KXb&DC#2(BEWXg07ilqa4(hM^yk<&yOcyb zN)fa`2jJs-^M2-=H*enT(^#w>!5IA6?~QffJ{Q8)kekfY6TsX?7*T`+I5-nH6Qn^^ z@S&N|8Hq|BFU`nj!ZfU+B+4Qzrx6Ypf`>zX3>v9Ka!PIV6mdw&Q7@4qwo7jlM=jZw z1vAE?;Vi2Q)GnH2o{c{dUbz78IH5D*``8rZ$ZLjGHmnM;@srCXle|$Sbor!SnjpQmYj7;dnF-RAl4nzhL$@I)JQT zK5!w>V$o!A;Ba%+3gtulnzLo_$_HOWIA}@mlk=foH2pV(tZ+WOYm^X{4ma1gBKZiG zr!FI;Ulmah`Hfn!IMtsJOmwhEXdxbuI+AIDv|`~QN!=Q;eJc=&rg{1M! zBhGjBuOQ1!tvtJlP!y?Rrf=#uaCSfNe1lW(0}mdTFOEeK!u_6n9(4chxhC^+C&H<` z1T;M4NgkCpJiUk%WuN!{@6mYRc|;m9>PQTg=h01syC;e}}<)a%T);LpQ!H2Y;l*_^_ave&|F$`uBN z6#5pI9m6pRkFe_eyiS+7Qqmn#u?0sKjWV%Hgm)5F(oLAx99we^@~CN7ZkXB)(ZG0b zks9R+(U_xCN4usQCTw3RL2v_{YAw6;4OQPnb!^phsqH!81@NV&c1o>%>+9@um$kFJ zcW3v_7EQ`eL|fv0JDRe=c9f#0JQ0Ul>z_6%bLx&#rMhF->OLNr;(=VPt3}1sMW$A> z;I;JcpF6ZYKhU8 zKtuO>-lky@uNIl;k!o}Bw@*$L@{CYcSTZbUu23e6g_imh{9Ls-wqRB(hE=eOV7k~$ zOu@9FN`O|z7+fHTZ-TpcXl)Vhxrb_eVVQ@0H6Pl(ThG36Glq+oO|4x099ZAO7!zYX62NdNZy&gHNAT2ezB3rt$g=! zJW)^XZ^ZYzUCBG~Tk#JgZf5x2mAhBm%!r#l><%4!ApJr4o$@eMzxe&ecP`bBU9Jy; za`@g`ci(bH_y0Ee>*Ryn!wZcg=O1O>a7RX6^~z><+i=f6P+NEJMjz_wucr?*x(|Fg z{-~RN)g78#JN2vSkEhqKJPbAt&D1BqS09}HkKwO=RruB8w_@)pZg*cjeejd+gKpQr z>WxQT>U+`6qiE02#)$yx=wBUfL_Qt7G-im)DiGTK#ybo-7ON%-zgmb)P#j z{-;d#{wt40j;v2KGDkNfQo3iO9jPz5gVts$mg)Qg#bWV|A=H&zi9dyNL%%?0b5X8n znkZLI&q9Iy82gpl`W zqESI>(sak6MzQJ;243P@i%tsiO+nzL^NA>PHUi|w$c<)ho^s=fo6{T8D{^x6+y;Wb zHEHeA$FYA3*7p~#XW>>wG_7ox0B`4ZIgc0Ni;#+5!^5y0Vlw|yM!iO=d7ob%YJdG_ xF>&QpKLy{W#~^}eXDkC`BNPY({)oo@j7C32=}*z<7vW=p list[Finding]: + findings: list[Finding] = [] + actions = cache.get("ss_actions") + + if not actions: + return findings + + # --- Failure rate --- + total = len(actions) + failures = [a for a in actions if not _is_success(a)] + fail_count = len(failures) + + if total >= 5 and fail_count > 0: + rate = fail_count / total + if rate > 0.20: + severity = "critical" if rate > 0.50 else "warn" + findings.append(Finding( + detector=self.name, + severity=severity, + title=f"Action failure rate {rate:.0%} ({fail_count}/{total})", + summary=f"{fail_count} of {total} actions failed ({rate:.0%})", + category=self.category, + evidence={ + "total": total, + "failures": fail_count, + "rate": round(rate, 3), + }, + escalate_to_t2=True, + logql_query='{app="sim-steward", event="action_result"} | json', + )) + + # --- Consecutive same-action failures --- + _check_consecutive(actions, findings, self) + + return findings + + +def _is_success(line: dict) -> bool: + fields = line.get("fields", {}) + val = fields.get("success") + if isinstance(val, bool): + return val + if isinstance(val, str): + return val.lower() == "true" + return True + + +def _check_consecutive(actions: list[dict], findings: list[Finding], detector: BaseDetector) -> None: + """Detect 3+ consecutive failures of the same action+arg combo.""" + streak_action = None + streak_count = 0 + + for line in actions: + fields = line.get("fields", {}) + combo = f"{fields.get('action', '?')}:{fields.get('arg', '')}" + + if not _is_success(line): + if combo == streak_action: + streak_count += 1 + else: + streak_action = combo + streak_count = 1 + else: + # Success resets the streak + if streak_count >= 3: + findings.append(Finding( + detector=detector.name, + severity="warn", + title=f"Consecutive failures: {streak_action} x{streak_count}", + summary=f"Action {streak_action!r} failed {streak_count} times consecutively", + category=detector.category, + evidence={"action_combo": streak_action, "consecutive": streak_count}, + escalate_to_t2=True, + logql_query='{app="sim-steward", event="action_result"} | json', + )) + streak_action = None + streak_count = 0 + + # Check trailing streak + if streak_count >= 3: + findings.append(Finding( + detector=detector.name, + severity="warn", + title=f"Consecutive failures: {streak_action} x{streak_count}", + summary=f"Action {streak_action!r} failed {streak_count} times consecutively", + category=detector.category, + evidence={"action_combo": streak_action, "consecutive": streak_count}, + escalate_to_t2=True, + logql_query='{app="sim-steward", event="action_result"} | json', + )) diff --git a/observability/local/log-sentinel/detectors/agent_loop.py b/observability/local/log-sentinel/detectors/agent_loop.py new file mode 100644 index 0000000..5110c5d --- /dev/null +++ b/observability/local/log-sentinel/detectors/agent_loop.py @@ -0,0 +1,133 @@ +"""Detect repetitive tool use, retry loops, and agent nesting anomalies.""" + +from collections import Counter, defaultdict + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class AgentLoopDetector(BaseDetector): + name = "agent_loop" + category = "ops" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + tools = cache.get("claude_tools") + agents = cache.get("claude_agents") + + self._check_repetitive_tools(tools, findings) + self._check_retry_loops(tools, findings) + self._check_orphan_agents(agents, findings) + self._check_deep_nesting(agents, findings) + + return findings + + def _check_repetitive_tools(self, tools: list[dict], findings: list[Finding]): + """Same tool 15+ times in one session -> warn.""" + session_tool_counts: dict[tuple[str, str], int] = Counter() + for line in tools: + hook = line.get("hook_type", "") + if hook not in ("post-tool-use", "post-tool-use-failure"): + continue + sid = line.get("session_id", "unknown") + tool = line.get("tool_name", "unknown") + session_tool_counts[(sid, tool)] += 1 + + for (sid, tool), count in session_tool_counts.items(): + if count >= 15: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Repetitive tool use: {tool} {count}x", + summary=f"Session {sid} called {tool} {count} times — possible loop", + category=self.category, + evidence={ + "session_id": sid, + "tool_name": tool, + "call_count": count, + }, + escalate_to_t2=True, + logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', + )) + + def _check_retry_loops(self, tools: list[dict], findings: list[Finding]): + """More than 3 is_retry=true events -> warn.""" + retry_count = 0 + for line in tools: + is_retry = line.get("is_retry") + if is_retry is True or is_retry == "true": + retry_count += 1 + + if retry_count > 3: + findings.append(Finding( + detector=self.name, + severity="warn", + title="Tool retry loop", + summary=f"{retry_count} tool retries detected — possible stuck loop", + category=self.category, + evidence={"retry_count": retry_count}, + escalate_to_t2=True, + logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', + )) + + def _check_orphan_agents(self, agents: list[dict], findings: list[Finding]): + """subagent-start without matching subagent-stop -> warn.""" + started: dict[str, dict] = {} + stopped: set[str] = set() + + for line in agents: + hook = line.get("hook_type", "") + agent_id = line.get("agent_id", "") + if not agent_id: + continue + if hook == "subagent-start": + started[agent_id] = line + elif hook == "subagent-stop": + stopped.add(agent_id) + + for agent_id, line in started.items(): + if agent_id not in stopped: + findings.append(Finding( + detector=self.name, + severity="warn", + title="Long-running agent", + summary=f"Subagent {agent_id} started but has no matching stop event", + category=self.category, + evidence={ + "agent_id": agent_id, + "session_id": line.get("session_id", ""), + }, + escalate_to_t2=True, + logql_query='{app="claude-dev-logging", component="agent"} | json', + )) + + def _check_deep_nesting(self, agents: list[dict], findings: list[Finding]): + """agent_depth >= 3 -> info.""" + seen_depths: set[tuple[str, int]] = set() + + for line in agents: + depth = _safe_int(line.get("agent_depth")) + agent_id = line.get("agent_id", "unknown") + if depth >= 3 and (agent_id, depth) not in seen_depths: + seen_depths.add((agent_id, depth)) + findings.append(Finding( + detector=self.name, + severity="info", + title=f"Deep agent nesting (depth={depth})", + summary=f"Agent {agent_id} reached nesting depth {depth}", + category=self.category, + evidence={ + "agent_id": agent_id, + "agent_depth": depth, + "session_id": line.get("session_id", ""), + }, + logql_query='{app="claude-dev-logging", component="agent"} | json', + )) + + +def _safe_int(val) -> int: + try: + return int(val) + except (TypeError, ValueError): + return 0 diff --git a/observability/local/log-sentinel/detectors/base.py b/observability/local/log-sentinel/detectors/base.py new file mode 100644 index 0000000..92591e1 --- /dev/null +++ b/observability/local/log-sentinel/detectors/base.py @@ -0,0 +1,16 @@ +"""Base detector interface for Tier 1.""" + +from abc import ABC, abstractmethod + +from models import Finding +from query_cache import CycleQueryCache + + +class BaseDetector(ABC): + name: str = "base" + category: str = "app" # "app" | "ops" + + @abstractmethod + def detect(self, cache: CycleQueryCache) -> list[Finding]: + """Run detection logic against cached query results. Return findings.""" + ... diff --git a/observability/local/log-sentinel/detectors/claude_session.py b/observability/local/log-sentinel/detectors/claude_session.py new file mode 100644 index 0000000..9994b2b --- /dev/null +++ b/observability/local/log-sentinel/detectors/claude_session.py @@ -0,0 +1,98 @@ +"""Detect Claude Code session lifecycle events and anomalies.""" + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class ClaudeSessionDetector(BaseDetector): + name = "claude_session" + category = "ops" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + lifecycle = cache.get("claude_lifecycle") + tools = cache.get("claude_tools") + + # Build set of session_ids that have post-tool-use events + sessions_with_tools: set[str] = set() + for line in tools: + hook = line.get("hook_type", "") + sid = line.get("session_id", "") + if hook == "post-tool-use" and sid: + sessions_with_tools.add(sid) + + # Track sessions that emitted stop hooks (session-end) + sessions_with_stop: set[str] = set() + + for line in lifecycle: + hook = line.get("hook_type", "") + session_id = line.get("session_id", "unknown") + + if hook == "session-start": + findings.append(Finding( + detector=self.name, + severity="info", + title=f"New Claude session: {session_id}", + summary=f"Claude Code session started: {session_id}", + category=self.category, + evidence={"session_id": session_id, "hook_type": hook}, + logql_query='{app="claude-dev-logging", component="lifecycle"} | json', + )) + + elif hook == "pre-compact": + findings.append(Finding( + detector=self.name, + severity="info", + title=f"Context compaction in {session_id}", + summary=f"Claude Code context compaction triggered in session {session_id}", + category=self.category, + evidence={"session_id": session_id, "hook_type": hook}, + logql_query='{app="claude-dev-logging", component="lifecycle"} | json', + )) + + elif hook == "session-end": + sessions_with_stop.add(session_id) + duration_ms = _safe_int(line.get("session_duration_ms")) + if duration_ms > 7_200_000: + hours = round(duration_ms / 3_600_000, 1) + findings.append(Finding( + detector=self.name, + severity="warn", + title="Long session >2h", + summary=f"Session {session_id} lasted {hours}h ({duration_ms}ms)", + category=self.category, + evidence={ + "session_id": session_id, + "duration_ms": duration_ms, + }, + escalate_to_t2=True, + logql_query='{app="claude-dev-logging", component="lifecycle"} | json', + )) + + elif hook == "stop": + sessions_with_stop.add(session_id) + + # Empty session: stop hook emitted but zero tool-use events + for sid in sessions_with_stop: + if sid and sid not in sessions_with_tools: + findings.append(Finding( + detector=self.name, + severity="warn", + title="Empty session", + summary=f"Session {sid} ended with stop hooks but 0 tool-use events", + category=self.category, + evidence={"session_id": sid}, + escalate_to_t2=True, + logql_query='{app="claude-dev-logging", component="lifecycle"} | json', + )) + + return findings + + +def _safe_int(val) -> int: + """Convert a value to int, returning 0 on failure.""" + try: + return int(val) + except (TypeError, ValueError): + return 0 diff --git a/observability/local/log-sentinel/detectors/error_spike.py b/observability/local/log-sentinel/detectors/error_spike.py new file mode 100644 index 0000000..91d4cae --- /dev/null +++ b/observability/local/log-sentinel/detectors/error_spike.py @@ -0,0 +1,46 @@ +"""Detect error spikes in sim-steward logs.""" + +from collections import Counter + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class ErrorSpikeDetector(BaseDetector): + name = "error_spike" + category = "app" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + errors = cache.get("ss_errors") + count = len(errors) + + if count < 3: + return findings + + messages = [e.get("message", "unknown") for e in errors] + top_messages = Counter(messages).most_common(5) + + if count >= 10: + severity = "critical" + else: + severity = "warn" + + escalate = count >= 5 + + findings.append(Finding( + detector=self.name, + severity=severity, + title=f"Error spike: {count} errors", + summary=f"{count} errors detected in window. Top: {top_messages[0][0]!r} ({top_messages[0][1]}x)", + category=self.category, + evidence={ + "count": count, + "top_messages": [{"message": m, "count": c} for m, c in top_messages], + }, + escalate_to_t2=escalate, + logql_query='{app="sim-steward", level="ERROR"} | json', + )) + + return findings diff --git a/observability/local/log-sentinel/detectors/flow_gap.py b/observability/local/log-sentinel/detectors/flow_gap.py new file mode 100644 index 0000000..aeef491 --- /dev/null +++ b/observability/local/log-sentinel/detectors/flow_gap.py @@ -0,0 +1,48 @@ +"""Detect expected-flow gaps by delegating to the FlowEngine.""" + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class FlowGapDetector(BaseDetector): + name = "flow_gap" + category = "app" + + def __init__(self, flow_engine): + self.flow_engine = flow_engine + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + all_events = cache.get("ss_all") + + # Filter out noise — host_resource_sample events don't participate in flows + meaningful = [e for e in all_events if e.get("event") != "host_resource_sample"] + + if not meaningful: + return findings + + evaluations = self.flow_engine.evaluate(meaningful) + + for evaluation in evaluations: + for gap in evaluation.gaps: + severity = evaluation.flow.gap_severity or "warn" + findings.append(Finding( + detector=self.name, + severity=severity, + title=f"Flow gap: {evaluation.flow.display_name} — missing {gap.step.label}", + summary=gap.description or f"Expected step {gap.step.label!r} not found in flow {evaluation.flow.display_name!r}", + category=self.category, + evidence={ + "flow": evaluation.flow.name, + "flow_display": evaluation.flow.display_name, + "missing_step": gap.step.id, + "missing_label": gap.step.label, + "matched_steps": list(evaluation.matched_steps.keys()), + }, + escalate_to_t2=severity in ("warn", "critical"), + flow_context=evaluation.flow.name, + logql_query='{app="sim-steward"} | json', + )) + + return findings diff --git a/observability/local/log-sentinel/detectors/incident_anomaly.py b/observability/local/log-sentinel/detectors/incident_anomaly.py new file mode 100644 index 0000000..da4c62a --- /dev/null +++ b/observability/local/log-sentinel/detectors/incident_anomaly.py @@ -0,0 +1,107 @@ +"""Detect incident anomalies — bursts and per-driver accumulation.""" + +from collections import Counter, defaultdict + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class IncidentAnomalyDetector(BaseDetector): + name = "incident_anomaly" + category = "app" + + BURST_WINDOW_SEC = 60 + BURST_THRESHOLD = 5 + DRIVER_WARN_THRESHOLD = 15 + DRIVER_INFO_THRESHOLD = 10 + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + incidents = cache.get("ss_incidents") + + if not incidents: + return findings + + # --- Burst detection: 5+ incidents within 60s --- + timestamps = [] + for line in incidents: + ts = _parse_ts(line) + if ts is not None: + timestamps.append(ts) + + timestamps.sort() + max_burst = _max_count_in_window(timestamps, self.BURST_WINDOW_SEC) + + if max_burst >= self.BURST_THRESHOLD: + findings.append(Finding( + detector=self.name, + severity="info", + title=f"Incident burst: {max_burst} in {self.BURST_WINDOW_SEC}s", + summary=f"{max_burst} incidents detected within a {self.BURST_WINDOW_SEC}s window", + category=self.category, + evidence={"burst_count": max_burst, "window_sec": self.BURST_WINDOW_SEC}, + escalate_to_t2=False, + logql_query='{app="sim-steward", event="incident_detected"} | json', + )) + + # --- Per-driver accumulation --- + driver_counts: Counter[str] = Counter() + for line in incidents: + fields = line.get("fields", {}) + driver = fields.get("display_name") or fields.get("unique_user_id") or "unknown" + driver_counts[str(driver)] += 1 + + for driver, count in driver_counts.items(): + if count >= self.DRIVER_WARN_THRESHOLD: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Driver {driver}: {count} incidents", + summary=f"Driver {driver!r} accumulated {count} incidents — exceeds warning threshold", + category=self.category, + evidence={"driver": driver, "incident_count": count}, + escalate_to_t2=True, + logql_query='{app="sim-steward", event="incident_detected"} | json', + )) + elif count >= self.DRIVER_INFO_THRESHOLD: + findings.append(Finding( + detector=self.name, + severity="info", + title=f"Driver {driver}: {count} incidents", + summary=f"Driver {driver!r} accumulated {count} incidents", + category=self.category, + evidence={"driver": driver, "incident_count": count}, + escalate_to_t2=False, + logql_query='{app="sim-steward", event="incident_detected"} | json', + )) + + return findings + + +def _parse_ts(line: dict) -> float | None: + raw = line.get("timestamp") + if raw is None: + return None + try: + return float(raw) + except (ValueError, TypeError): + pass + try: + from datetime import datetime + dt = datetime.fromisoformat(str(raw).replace("Z", "+00:00")) + return dt.timestamp() + except Exception: + return None + + +def _max_count_in_window(sorted_ts: list[float], window_sec: int) -> int: + if not sorted_ts: + return 0 + max_count = 0 + left = 0 + for right in range(len(sorted_ts)): + while sorted_ts[right] - sorted_ts[left] > window_sec: + left += 1 + max_count = max(max_count, right - left + 1) + return max_count diff --git a/observability/local/log-sentinel/detectors/mcp_health.py b/observability/local/log-sentinel/detectors/mcp_health.py new file mode 100644 index 0000000..f8af698 --- /dev/null +++ b/observability/local/log-sentinel/detectors/mcp_health.py @@ -0,0 +1,119 @@ +"""Detect MCP tool failures and slow calls.""" + +from collections import Counter + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class McpHealthDetector(BaseDetector): + name = "mcp_health" + category = "ops" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + tools = cache.get("claude_tools") + + mcp_failures: list[dict] = [] + mcp_calls_by_service: Counter = Counter() + slow_calls: list[dict] = [] + + for line in tools: + tool_name = line.get("tool_name", "") + if not tool_name.startswith("mcp__"): + continue + + hook = line.get("hook_type", "") + service = _extract_service(tool_name) + duration_ms = _safe_int(line.get("duration_ms")) + + # Count all MCP calls (post-tool-use and post-tool-use-failure) + if hook in ("post-tool-use", "post-tool-use-failure"): + mcp_calls_by_service[service] += 1 + + # Track failures + if hook == "post-tool-use-failure": + mcp_failures.append(line) + + # Slow call detection + if duration_ms > 30_000: + slow_calls.append(line) + + # MCP failure findings + failure_count = len(mcp_failures) + if failure_count > 0: + if failure_count >= 3: + findings.append(Finding( + detector=self.name, + severity="critical", + title=f"MCP failure storm: {failure_count} failures", + summary=f"{failure_count} MCP tool failures detected — possible service outage", + category=self.category, + evidence={ + "failure_count": failure_count, + "tools": [f.get("tool_name", "unknown") for f in mcp_failures], + }, + escalate_to_t2=True, + logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', + )) + else: + for fail in mcp_failures: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"MCP failure: {fail.get('tool_name', 'unknown')}", + summary=f"MCP tool call failed: {fail.get('tool_name', 'unknown')}", + category=self.category, + evidence={ + "tool_name": fail.get("tool_name"), + "error_type": fail.get("error_type", ""), + "session_id": fail.get("session_id", ""), + }, + escalate_to_t2=True, + logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', + )) + + # Slow MCP call findings + for call in slow_calls: + duration = _safe_int(call.get("duration_ms")) + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Slow MCP call: {call.get('tool_name', 'unknown')}", + summary=f"MCP call took {duration}ms (>{30_000}ms threshold)", + category=self.category, + evidence={ + "tool_name": call.get("tool_name"), + "duration_ms": duration, + "session_id": call.get("session_id", ""), + }, + logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', + )) + + # Info: MCP call count per service + if mcp_calls_by_service: + findings.append(Finding( + detector=self.name, + severity="info", + title="MCP call summary", + summary=f"MCP calls by service: {dict(mcp_calls_by_service)}", + category=self.category, + evidence={"calls_by_service": dict(mcp_calls_by_service)}, + logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', + )) + + return findings + + +def _extract_service(tool_name: str) -> str: + """Extract service name from mcp____ pattern.""" + parts = tool_name.split("__") + return parts[1] if len(parts) >= 2 else "unknown" + + +def _safe_int(val) -> int: + try: + return int(val) + except (TypeError, ValueError): + return 0 diff --git a/observability/local/log-sentinel/detectors/plugin_lifecycle.py b/observability/local/log-sentinel/detectors/plugin_lifecycle.py new file mode 100644 index 0000000..8bd0bc1 --- /dev/null +++ b/observability/local/log-sentinel/detectors/plugin_lifecycle.py @@ -0,0 +1,140 @@ +"""Detect plugin lifecycle events and restart loops.""" + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class PluginLifecycleDetector(BaseDetector): + name = "plugin_lifecycle" + category = "app" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + lifecycle = cache.get("ss_lifecycle") + + if not lifecycle: + return findings + + plugin_started_count = 0 + has_deploy_marker = False + has_plugin_ready = False + deploy_ts: float | None = None + ready_ts: float | None = None + + for line in lifecycle: + event = line.get("event", "") + + if event == "plugin_started": + plugin_started_count += 1 + findings.append(Finding( + detector=self.name, + severity="info", + title="Plugin started", + summary="sim-steward plugin started (100% logging)", + category=self.category, + evidence={"event": event, "message": line.get("message", "")}, + escalate_to_t2=False, + logql_query='{app="sim-steward"} | json | event="plugin_started"', + )) + + elif event == "iracing_connected": + findings.append(Finding( + detector=self.name, + severity="info", + title="iRacing connected", + summary="iRacing SDK connection established", + category=self.category, + evidence={"event": event}, + escalate_to_t2=False, + logql_query='{app="sim-steward"} | json | event="iracing_connected"', + )) + + elif event == "iracing_disconnected": + findings.append(Finding( + detector=self.name, + severity="info", + title="iRacing disconnected", + summary="iRacing SDK connection lost", + category=self.category, + evidence={"event": event}, + escalate_to_t2=False, + logql_query='{app="sim-steward"} | json | event="iracing_disconnected"', + )) + + elif event == "bridge_start_failed": + findings.append(Finding( + detector=self.name, + severity="critical", + title="Bridge start failed", + summary=f"WebSocket bridge failed to start: {line.get('message', '')}", + category=self.category, + evidence={"event": event, "message": line.get("message", "")}, + escalate_to_t2=True, + logql_query='{app="sim-steward"} | json | event="bridge_start_failed"', + )) + + elif event == "deploy_marker": + has_deploy_marker = True + deploy_ts = _parse_ts(line) + + elif event == "plugin_ready": + has_plugin_ready = True + ready_ts = _parse_ts(line) + + # Restart loop: 2+ starts in one window + if plugin_started_count >= 2: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Plugin restart loop ({plugin_started_count} starts)", + summary=f"Plugin started {plugin_started_count} times in window — possible crash loop", + category=self.category, + evidence={"plugin_started_count": plugin_started_count}, + escalate_to_t2=True, + logql_query='{app="sim-steward"} | json | event="plugin_started"', + )) + + # Deploy without ready within 60s + if has_deploy_marker and not has_plugin_ready: + findings.append(Finding( + detector=self.name, + severity="warn", + title="Deploy without plugin_ready", + summary="deploy_marker seen but plugin_ready not received within window", + category=self.category, + evidence={"deploy_marker": True, "plugin_ready": False}, + escalate_to_t2=False, + logql_query='{app="sim-steward"} | json | event=~"deploy_marker|plugin_ready"', + )) + elif has_deploy_marker and has_plugin_ready and deploy_ts and ready_ts: + gap_sec = ready_ts - deploy_ts + if gap_sec > 60: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Slow deploy: plugin_ready {gap_sec:.0f}s after deploy", + summary=f"plugin_ready arrived {gap_sec:.0f}s after deploy_marker (threshold: 60s)", + category=self.category, + evidence={"gap_sec": round(gap_sec, 1)}, + escalate_to_t2=False, + logql_query='{app="sim-steward"} | json | event=~"deploy_marker|plugin_ready"', + )) + + return findings + + +def _parse_ts(line: dict) -> float | None: + raw = line.get("timestamp") + if raw is None: + return None + try: + return float(raw) + except (ValueError, TypeError): + pass + try: + from datetime import datetime + dt = datetime.fromisoformat(str(raw).replace("Z", "+00:00")) + return dt.timestamp() + except Exception: + return None diff --git a/observability/local/log-sentinel/detectors/resource_health.py b/observability/local/log-sentinel/detectors/resource_health.py new file mode 100644 index 0000000..da6fb8a --- /dev/null +++ b/observability/local/log-sentinel/detectors/resource_health.py @@ -0,0 +1,97 @@ +"""Detect host resource problems — CPU, memory, disk.""" + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class ResourceHealthDetector(BaseDetector): + name = "resource_health" + category = "app" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + samples = cache.get("ss_resources") + + if not samples: + return findings + + cpu_values: list[float] = [] + mem_values: list[float] = [] + + for line in samples: + fields = line.get("fields", {}) + cpu = _to_float(fields.get("cpu_percent")) + mem = _to_float(fields.get("memory_percent")) + + if cpu is not None: + cpu_values.append(cpu) + if mem is not None: + mem_values.append(mem) + + # CPU checks — use max observed value + if cpu_values: + peak_cpu = max(cpu_values) + avg_cpu = sum(cpu_values) / len(cpu_values) + + if peak_cpu > 95: + findings.append(Finding( + detector=self.name, + severity="critical", + title=f"CPU critical: {peak_cpu:.0f}% peak", + summary=f"CPU peaked at {peak_cpu:.0f}% (avg {avg_cpu:.0f}%) across {len(cpu_values)} samples", + category=self.category, + evidence={"peak_cpu": round(peak_cpu, 1), "avg_cpu": round(avg_cpu, 1), "samples": len(cpu_values)}, + escalate_to_t2=True, + logql_query='{app="sim-steward", event="host_resource_sample"} | json', + )) + elif peak_cpu > 80: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"CPU elevated: {peak_cpu:.0f}% peak", + summary=f"CPU peaked at {peak_cpu:.0f}% (avg {avg_cpu:.0f}%) across {len(cpu_values)} samples", + category=self.category, + evidence={"peak_cpu": round(peak_cpu, 1), "avg_cpu": round(avg_cpu, 1), "samples": len(cpu_values)}, + escalate_to_t2=False, + logql_query='{app="sim-steward", event="host_resource_sample"} | json', + )) + + # Memory checks + if mem_values: + peak_mem = max(mem_values) + avg_mem = sum(mem_values) / len(mem_values) + + if peak_mem > 95: + findings.append(Finding( + detector=self.name, + severity="critical", + title=f"Memory critical: {peak_mem:.0f}% peak", + summary=f"Memory peaked at {peak_mem:.0f}% (avg {avg_mem:.0f}%) across {len(mem_values)} samples", + category=self.category, + evidence={"peak_mem": round(peak_mem, 1), "avg_mem": round(avg_mem, 1), "samples": len(mem_values)}, + escalate_to_t2=True, + logql_query='{app="sim-steward", event="host_resource_sample"} | json', + )) + elif peak_mem > 85: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Memory elevated: {peak_mem:.0f}% peak", + summary=f"Memory peaked at {peak_mem:.0f}% (avg {avg_mem:.0f}%) across {len(mem_values)} samples", + category=self.category, + evidence={"peak_mem": round(peak_mem, 1), "avg_mem": round(avg_mem, 1), "samples": len(mem_values)}, + escalate_to_t2=False, + logql_query='{app="sim-steward", event="host_resource_sample"} | json', + )) + + return findings + + +def _to_float(val) -> float | None: + if val is None: + return None + try: + return float(val) + except (ValueError, TypeError): + return None diff --git a/observability/local/log-sentinel/detectors/sentinel_health.py b/observability/local/log-sentinel/detectors/sentinel_health.py new file mode 100644 index 0000000..b895005 --- /dev/null +++ b/observability/local/log-sentinel/detectors/sentinel_health.py @@ -0,0 +1,86 @@ +"""Self-monitoring detector for the sentinel itself — uses in-memory stats, not Loki.""" + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class SentinelHealthDetector(BaseDetector): + name = "sentinel_health" + category = "ops" + + def __init__(self, stats_ref: dict): + """Accept a mutable stats dict updated by sentinel.py each cycle. + + Expected keys: + last_cycle_duration_ms, consecutive_detector_errors, + last_t2_duration_ms, t2_queue_size, cycles_completed + """ + self._stats = stats_ref + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + stats = self._stats + + cycle_ms = _safe_int(stats.get("last_cycle_duration_ms")) + consec_errors = _safe_int(stats.get("consecutive_detector_errors")) + t2_ms = _safe_int(stats.get("last_t2_duration_ms")) + cycles_completed = _safe_int(stats.get("cycles_completed")) + + # Slow cycle + if cycle_ms > 30_000: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Slow cycle ({cycle_ms}ms)", + summary=f"Last sentinel cycle took {cycle_ms}ms (>30s threshold)", + category=self.category, + evidence={"last_cycle_duration_ms": cycle_ms}, + )) + + # Consecutive detector errors + if consec_errors > 2: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Detector failures: {consec_errors} consecutive", + summary=f"{consec_errors} consecutive detector errors — detectors may be broken", + category=self.category, + evidence={"consecutive_detector_errors": consec_errors}, + )) + + # Slow T2 investigation + if t2_ms > 300_000: + findings.append(Finding( + detector=self.name, + severity="warn", + title="T2 investigation very slow (>5min)", + summary=f"Last T2 investigation took {t2_ms}ms ({t2_ms / 60_000:.1f} min)", + category=self.category, + evidence={"last_t2_duration_ms": t2_ms}, + )) + + # Stalled polling: cycles have run before but none recently + # The caller is expected to set "last_cycle_epoch_ms" in stats + # when a cycle completes. If cycles_completed > 0 but no cycle + # has landed recently, the sentinel main loop itself detects this + # and sets "stalled" = True in the stats dict. + if cycles_completed > 0 and stats.get("stalled"): + findings.append(Finding( + detector=self.name, + severity="critical", + title="Sentinel polling stalled", + summary=f"Sentinel has completed {cycles_completed} cycles but appears stalled — no recent cycle", + category=self.category, + evidence={"cycles_completed": cycles_completed}, + escalate_to_t2=True, + )) + + return findings + + +def _safe_int(val) -> int: + try: + return int(val) + except (TypeError, ValueError): + return 0 diff --git a/observability/local/log-sentinel/detectors/session_quality.py b/observability/local/log-sentinel/detectors/session_quality.py new file mode 100644 index 0000000..7bb0caf --- /dev/null +++ b/observability/local/log-sentinel/detectors/session_quality.py @@ -0,0 +1,96 @@ +"""Detect session quality issues from session_digest events.""" + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class SessionQualityDetector(BaseDetector): + name = "session_quality" + category = "app" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + digests = cache.get("ss_digests") + + if not digests: + return findings + + for line in digests: + fields = line.get("fields", {}) + track = fields.get("track_display_name", "unknown track") + total_incidents = _to_int(fields.get("total_incidents", 0)) + action_failures = _to_int(fields.get("action_failures", 0)) + p95_latency = _to_float(fields.get("p95_action_latency_ms", 0)) + plugin_errors = _to_int(fields.get("plugin_errors", 0)) + + # Always emit info for completed sessions + findings.append(Finding( + detector=self.name, + severity="info", + title=f"Session complete: {track}, {total_incidents} incidents", + summary=f"Session digest for {track}: {total_incidents} total incidents", + category=self.category, + evidence={ + "track": track, + "total_incidents": total_incidents, + "action_failures": action_failures, + "p95_action_latency_ms": p95_latency, + "plugin_errors": plugin_errors, + }, + escalate_to_t2=False, + logql_query='{app="sim-steward", event="session_digest"} | json', + )) + + # Quality warnings + if action_failures > 0: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Session had {action_failures} action failures", + summary=f"Session at {track} completed with {action_failures} action failure(s)", + category=self.category, + evidence={"track": track, "action_failures": action_failures}, + escalate_to_t2=True, + logql_query='{app="sim-steward", event="session_digest"} | json', + )) + + if p95_latency and p95_latency > 500: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"High action latency: p95={p95_latency:.0f}ms", + summary=f"Session at {track} had p95 action latency of {p95_latency:.0f}ms (threshold: 500ms)", + category=self.category, + evidence={"track": track, "p95_action_latency_ms": p95_latency}, + escalate_to_t2=True, + logql_query='{app="sim-steward", event="session_digest"} | json', + )) + + if plugin_errors > 0: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Session had {plugin_errors} plugin errors", + summary=f"Session at {track} completed with {plugin_errors} plugin error(s)", + category=self.category, + evidence={"track": track, "plugin_errors": plugin_errors}, + escalate_to_t2=True, + logql_query='{app="sim-steward", event="session_digest"} | json', + )) + + return findings + + +def _to_int(val) -> int: + try: + return int(val) + except (ValueError, TypeError): + return 0 + + +def _to_float(val) -> float: + try: + return float(val) + except (ValueError, TypeError): + return 0.0 diff --git a/observability/local/log-sentinel/detectors/silent_session.py b/observability/local/log-sentinel/detectors/silent_session.py new file mode 100644 index 0000000..50a3a17 --- /dev/null +++ b/observability/local/log-sentinel/detectors/silent_session.py @@ -0,0 +1,53 @@ +"""Detect sessions that go silent — iRacing connected but no meaningful events.""" + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class SilentSessionDetector(BaseDetector): + name = "silent_session" + category = "app" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + all_events = cache.get("ss_all") + + if not all_events: + return findings + + has_iracing_connected = False + has_iracing_disconnected = False + resource_only = True + + for line in all_events: + event = line.get("event", "") + + if event == "iracing_connected": + has_iracing_connected = True + elif event == "iracing_disconnected": + has_iracing_disconnected = True + + if event and event != "host_resource_sample": + resource_only = False + + # Session active (connected without disconnect) but only resource samples + session_active = has_iracing_connected and not has_iracing_disconnected + + if session_active and resource_only: + findings.append(Finding( + detector=self.name, + severity="warn", + title="Silent session detected", + summary="iRacing connected but only host_resource_sample events seen — no actions, incidents, or lifecycle events", + category=self.category, + evidence={ + "total_events": len(all_events), + "session_active": True, + "resource_only": True, + }, + escalate_to_t2=True, + logql_query='{app="sim-steward"} | json', + )) + + return findings diff --git a/observability/local/log-sentinel/detectors/stuck_user.py b/observability/local/log-sentinel/detectors/stuck_user.py new file mode 100644 index 0000000..8f69e0c --- /dev/null +++ b/observability/local/log-sentinel/detectors/stuck_user.py @@ -0,0 +1,93 @@ +"""Detect stuck-user patterns — same action repeated rapidly.""" + +from collections import defaultdict + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class StuckUserDetector(BaseDetector): + name = "stuck_user" + category = "app" + + WINDOW_SEC = 30 + WARN_THRESHOLD = 4 + CRITICAL_THRESHOLD = 6 + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + actions = cache.get("ss_actions") + + if not actions: + return findings + + # Group by action+arg combo, collect timestamps + combos: dict[str, list[float]] = defaultdict(list) + for line in actions: + fields = line.get("fields", {}) + combo = f"{fields.get('action', '?')}:{fields.get('arg', '')}" + ts = _parse_ts(line) + if ts is not None: + combos[combo].append(ts) + + for combo, timestamps in combos.items(): + timestamps.sort() + # Sliding window: count events within WINDOW_SEC + max_in_window = _max_count_in_window(timestamps, self.WINDOW_SEC) + + if max_in_window >= self.CRITICAL_THRESHOLD: + findings.append(Finding( + detector=self.name, + severity="critical", + title=f"Stuck user: {combo} x{max_in_window} in {self.WINDOW_SEC}s", + summary=f"Action {combo!r} repeated {max_in_window} times within {self.WINDOW_SEC}s — user likely stuck", + category=self.category, + evidence={"combo": combo, "count_in_window": max_in_window, "window_sec": self.WINDOW_SEC}, + escalate_to_t2=True, + logql_query='{app="sim-steward", event="action_result"} | json', + )) + elif max_in_window >= self.WARN_THRESHOLD: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Stuck user: {combo} x{max_in_window} in {self.WINDOW_SEC}s", + summary=f"Action {combo!r} repeated {max_in_window} times within {self.WINDOW_SEC}s — possible stuck user", + category=self.category, + evidence={"combo": combo, "count_in_window": max_in_window, "window_sec": self.WINDOW_SEC}, + escalate_to_t2=False, + logql_query='{app="sim-steward", event="action_result"} | json', + )) + + return findings + + +def _parse_ts(line: dict) -> float | None: + """Extract a numeric timestamp (epoch seconds) from a log line.""" + raw = line.get("timestamp") + if raw is None: + return None + try: + return float(raw) + except (ValueError, TypeError): + pass + # Try ISO format + try: + from datetime import datetime, timezone + dt = datetime.fromisoformat(str(raw).replace("Z", "+00:00")) + return dt.timestamp() + except Exception: + return None + + +def _max_count_in_window(sorted_ts: list[float], window_sec: int) -> int: + """Sliding window max count over sorted timestamps.""" + if not sorted_ts: + return 0 + max_count = 0 + left = 0 + for right in range(len(sorted_ts)): + while sorted_ts[right] - sorted_ts[left] > window_sec: + left += 1 + max_count = max(max_count, right - left + 1) + return max_count diff --git a/observability/local/log-sentinel/detectors/token_usage.py b/observability/local/log-sentinel/detectors/token_usage.py new file mode 100644 index 0000000..59468df --- /dev/null +++ b/observability/local/log-sentinel/detectors/token_usage.py @@ -0,0 +1,105 @@ +"""Detect high token usage, expensive sessions, and low cache efficiency.""" + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class TokenUsageDetector(BaseDetector): + name = "token_usage" + category = "ops" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + tokens = cache.get("claude_tokens") + + for line in tokens: + session_id = line.get("session_id", "unknown") + cost = _safe_float(line.get("cost_usd")) + output_tokens = _safe_int(line.get("total_output_tokens")) + input_tokens = _safe_int(line.get("total_input_tokens")) + cache_read = _safe_int(line.get("total_cache_read_tokens")) + + # Cost thresholds (check expensive first to avoid duplicate) + if cost > 5.0: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Expensive session: ${cost:.2f}", + summary=f"Session {session_id} cost ${cost:.2f} (>$5 threshold)", + category=self.category, + evidence={ + "session_id": session_id, + "cost_usd": cost, + "total_output_tokens": output_tokens, + "total_input_tokens": input_tokens, + }, + escalate_to_t2=True, + logql_query='{app="claude-token-metrics"} | json', + )) + elif cost > 1.0: + findings.append(Finding( + detector=self.name, + severity="info", + title=f"High-cost session: ${cost:.2f}", + summary=f"Session {session_id} cost ${cost:.2f} (>$1 threshold)", + category=self.category, + evidence={ + "session_id": session_id, + "cost_usd": cost, + "total_output_tokens": output_tokens, + "total_input_tokens": input_tokens, + }, + logql_query='{app="claude-token-metrics"} | json', + )) + + # Token-heavy session + if output_tokens > 100_000: + findings.append(Finding( + detector=self.name, + severity="warn", + title="Token-heavy session", + summary=f"Session {session_id} produced {output_tokens:,} output tokens", + category=self.category, + evidence={ + "session_id": session_id, + "total_output_tokens": output_tokens, + }, + logql_query='{app="claude-token-metrics"} | json', + )) + + # Cache efficiency + denominator = max(input_tokens, 1) + cache_ratio = cache_read / denominator + if cache_ratio < 0.3 and input_tokens > 0: + pct = round(cache_ratio * 100, 1) + findings.append(Finding( + detector=self.name, + severity="info", + title=f"Low cache hit rate ({pct}%)", + summary=f"Session {session_id}: cache read {cache_read:,} / input {input_tokens:,} = {pct}%", + category=self.category, + evidence={ + "session_id": session_id, + "total_cache_read_tokens": cache_read, + "total_input_tokens": input_tokens, + "cache_hit_pct": pct, + }, + logql_query='{app="claude-token-metrics"} | json', + )) + + return findings + + +def _safe_float(val) -> float: + try: + return float(val) + except (TypeError, ValueError): + return 0.0 + + +def _safe_int(val) -> int: + try: + return int(val) + except (TypeError, ValueError): + return 0 diff --git a/observability/local/log-sentinel/detectors/tool_patterns.py b/observability/local/log-sentinel/detectors/tool_patterns.py new file mode 100644 index 0000000..7128f36 --- /dev/null +++ b/observability/local/log-sentinel/detectors/tool_patterns.py @@ -0,0 +1,130 @@ +"""Detect tool failure rates, permission friction, and error type spikes.""" + +from collections import Counter + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class ToolPatternsDetector(BaseDetector): + name = "tool_patterns" + category = "ops" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + tools = cache.get("claude_tools") + all_events = cache.get("claude_all") + + self._check_failure_rates(tools, findings) + self._check_permission_friction(all_events, findings) + self._check_error_type_spikes(tools, findings) + self._check_tool_distribution(tools, findings) + + return findings + + def _check_failure_rates(self, tools: list[dict], findings: list[Finding]): + """Per-tool failure rate; warn if >15%.""" + success_counts: Counter = Counter() + failure_counts: Counter = Counter() + + for line in tools: + hook = line.get("hook_type", "") + tool = line.get("tool_name", "") + if not tool: + continue + if hook == "post-tool-use": + success_counts[tool] += 1 + elif hook == "post-tool-use-failure": + failure_counts[tool] += 1 + + all_tools = set(success_counts.keys()) | set(failure_counts.keys()) + for tool in all_tools: + total = success_counts[tool] + failure_counts[tool] + if total == 0: + continue + fail_rate = (failure_counts[tool] / total) * 100 + if fail_rate > 15: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"{tool}: {fail_rate:.0f}% failure rate", + summary=f"{tool} failed {failure_counts[tool]}/{total} calls ({fail_rate:.0f}%)", + category=self.category, + evidence={ + "tool_name": tool, + "total_calls": total, + "failures": failure_counts[tool], + "failure_rate_pct": round(fail_rate, 1), + }, + escalate_to_t2=True, + logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', + )) + + def _check_permission_friction(self, all_events: list[dict], findings: list[Finding]): + """More than 5 permission-request events -> info.""" + perm_count = 0 + for line in all_events: + hook = line.get("hook_type", "") + if hook == "permission-request": + perm_count += 1 + + if perm_count > 5: + findings.append(Finding( + detector=self.name, + severity="info", + title=f"Permission friction: {perm_count} requests", + summary=f"{perm_count} permission requests detected — may slow development flow", + category=self.category, + evidence={"permission_request_count": perm_count}, + logql_query='{app="claude-dev-logging"} | json', + )) + + def _check_error_type_spikes(self, tools: list[dict], findings: list[Finding]): + """Group failures by error_type; escalate connection_refused.""" + error_types: Counter = Counter() + for line in tools: + hook = line.get("hook_type", "") + if hook == "post-tool-use-failure": + err = line.get("error_type", "unknown") + error_types[err] += 1 + + for err_type, count in error_types.most_common(): + escalate = err_type == "connection_refused" + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"Error type spike: {err_type} ({count}x)", + summary=f"Tool error type {err_type!r} occurred {count} times", + category=self.category, + evidence={"error_type": err_type, "count": count}, + escalate_to_t2=escalate, + logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', + )) + + def _check_tool_distribution(self, tools: list[dict], findings: list[Finding]): + """Info: top-5 tools by call count.""" + call_counts: Counter = Counter() + for line in tools: + hook = line.get("hook_type", "") + tool = line.get("tool_name", "") + if hook in ("post-tool-use", "post-tool-use-failure") and tool: + call_counts[tool] += 1 + + if not call_counts: + return + + top5 = call_counts.most_common(5) + summary_parts = [f"{t} ({c}x)" for t, c in top5] + findings.append(Finding( + detector=self.name, + severity="info", + title="Tool usage distribution", + summary=f"Top tools: {', '.join(summary_parts)}", + category=self.category, + evidence={ + "top_tools": [{"tool": t, "count": c} for t, c in top5], + "total_unique_tools": len(call_counts), + }, + logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', + )) diff --git a/observability/local/log-sentinel/detectors/websocket_health.py b/observability/local/log-sentinel/detectors/websocket_health.py new file mode 100644 index 0000000..6512976 --- /dev/null +++ b/observability/local/log-sentinel/detectors/websocket_health.py @@ -0,0 +1,71 @@ +"""Detect WebSocket connectivity problems.""" + +from detectors.base import BaseDetector +from models import Finding +from query_cache import CycleQueryCache + + +class WebSocketHealthDetector(BaseDetector): + name = "websocket_health" + category = "app" + + def detect(self, cache: CycleQueryCache) -> list[Finding]: + findings: list[Finding] = [] + ws_events = cache.get("ss_ws") + + if not ws_events: + return findings + + connects = 0 + disconnects = 0 + + for line in ws_events: + event = line.get("event", "") + + if event == "ws_client_rejected": + findings.append(Finding( + detector=self.name, + severity="warn", + title="WebSocket client rejected", + summary=f"A WebSocket client was rejected: {line.get('message', '')}", + category=self.category, + evidence={"event": event, "message": line.get("message", "")}, + escalate_to_t2=True, + logql_query='{app="sim-steward"} | json | event="ws_client_rejected"', + )) + + elif event == "bridge_start_failed": + findings.append(Finding( + detector=self.name, + severity="critical", + title="WebSocket bridge failed to start", + summary=f"Bridge start failed: {line.get('message', '')}", + category=self.category, + evidence={"event": event, "message": line.get("message", "")}, + escalate_to_t2=True, + logql_query='{app="sim-steward"} | json | event="bridge_start_failed"', + )) + + elif event == "ws_client_connected": + connects += 1 + elif event == "ws_client_disconnected": + disconnects += 1 + + # Disconnect:connect ratio check + if disconnects >= 3 and connects > 0 and disconnects / connects > 2: + findings.append(Finding( + detector=self.name, + severity="warn", + title=f"High disconnect ratio ({disconnects}:{connects})", + summary=f"{disconnects} disconnects vs {connects} connects — possible instability", + category=self.category, + evidence={ + "connects": connects, + "disconnects": disconnects, + "ratio": round(disconnects / connects, 2), + }, + escalate_to_t2=False, + logql_query='{app="sim-steward"} | json | event=~"ws_client_connected|ws_client_disconnected"', + )) + + return findings diff --git a/observability/local/log-sentinel/flows/__init__.py b/observability/local/log-sentinel/flows/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/observability/local/log-sentinel/flows/__pycache__/__init__.cpython-313.pyc b/observability/local/log-sentinel/flows/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..512dee39459b5c85f6d38c5a3ecf89128ad19567 GIT binary patch literal 192 zcmey&%ge<81m2U6W`gL)AOZ#$p^VQgK*m&tbOudEzm*I{OhDdekkqYsXRDad;?$zz znDWfLl8l&?)Uuf3%v{~#lGO6Vq7)E2qclmkAg44vGcP7T38=IzF)1@Av!pU6CqFqc z2S}#t7N_QwWag#j#H8iqmlwyx$7kkcmc+;F6;$5hu*uC&Da}c>D`ExO2Xap_i1Cq` Lk&&^88OQqYCMSMGr~YJ4{TI zsb@OVv8U}AQd2}H9poqDr;xtXX*v^Ul1%$(MoCewo=zsoOeb$Lka69oetXABgvw3d zx-Q=`e0Bz$q@h9IY- zapC0?11|^(SPGa?;k_tPTdMPn@H zHb(F)7+FLvY!xkGem^T(^%?+zQYQb1?H;A+<}H; zIm!pBH9NVJA&swzDzutS0*J(uIJO}`SVM@}_^s^+WEOR!5R%Ef-a;ry*0;0}b$~La z1T>0nQ-c9o;}USFqPZeO_K{5o!Wyecasrd9DCA^8(0+KTCmd2Ftb`|%DfMDFE?o*M z$?*-ZJy5wE*3S-0x&d@% zE^B-wLKGf}XzsEsI?c%{auE-JE2z-D_bU5Kdx3YX@H>|I9oe6L%6F~768e^mK<2uf z*8T|Zo<+liKySg#k~HUtVz#uh#M(Ai;=pNiGbhWIEil}L%KO~t*>di#!BJ59QBf1%cNHB;X-N-@LLCPi?YiIsb}GhmNg9+#)M&dbqpIpEOfw4$+@sCrT3k_tG7W|fnQN)TR3#g)kG98W`vnuTcq&PF&O&9G%oy-B5|9 z1-)S+KuqMR`8ONtRv#kVlyy_eufK8ajcjD4renFLqu{8$?{d$1-}Gi* z{Hv?|>WPB8Zmwsh=iZL46?gZtyZf_kjrU#kH%8`0XGZ7b89DcKzTx|yxDMXgd%wXu zmwGdm3+5ZzrkRHfsy+0%+cS4$=15k_yLVshEwr><@4wbRt(_j)xYY z`_y&tUkb!`^ug1fQx5J!hy9e-hIf&-Sg@o{;MBKVh&+LWlC{r4HM3yYLFl(Ok?JZL ziyNJ%6}k&T2AKXuV*10SP!ys?ns^!XjL}!*#+GvhxJ{+=EZwE^9z$UjdDDC!Ltzu` z-$7ATewFArY<-Cy7&DAE#lWjd;UYwdPW?5NWm9dgmrkopSF`EWl_|DuqF^4Q3D>4~ zSLRYHy3L!2b%(7jpeX@m5#otjRa{bHIH@z$>`Hn9$D~L+9n(D0YZ)n~O7Tc6J)V&zaw@ro<`|7;0J$V# zNH!V^=_m!4Agu8o0`?NHj{p*WFv*(u0Du6aF{6@-iFadSsR7oA0=i33icYSO0*JW2 z;rTmYq`5YGz8*@vFs*z8;0h`>poXTop_!rVP;TUt`oLO!mCL?bhZ>%orqr_9EhK_D8EtaFRkKID4Y&)Kl!BX3MD(uq5zPQ zm`lcMod934zfv`ex*$b`XfqK+1%xgLK_J=Mj=)6J1u+CwS`F#(rHe|jFYewHcPi2b zOCDT4gIEM6(#fN#aE-5HOfU2@aAj9algAK!^zSLWY4{;&~dm7D|a$gOlJgijE46Io9 zaUy@1fo$>8Mz*lQ4Vj>?yx+uzx0~37^C6pPJ6tNk1hBb*V;fRnN->&;7;PUkqUuwy zSFD@>mi5r1o-au#q)R3tiF43Q3D}O1Jt3Okb|IJ;> z{6h5qLf`W&LdPgmLGe=%Tv3_FIp+U$r%Py-6VSkK48f052N_a*Vj}86k|-1vWAHfy zIWavc;Q*)6o!uJsV~x6GC-QHCT3LLx1=BRZJZ5MKuTlSmw#XXB7d0|i9zjwudXxV=# z@9+6j?C#F@67S3J%I~F?{VzO3)bc5}n_@FWrMYLhxhLOz z^dsi(i4QG*c6{LYuzI<9aGfC=zGyr8NDt>fJLx~!OikHOZzNrzb&6>f+NP=kLMp8a ziS$G&4su~h9>=@k!g`twFgmU|$D?ZOBBW}gq+lUKD}kDgf}2$9nuUCtOGb`03I-xw zLzJXo(3q$!LxHbK}VOj2{eVdQLx(g>3Xt68z6OvtLDS=A_p z{H_>RvTj@npCOco2mn*uI8j#~acWTBMtkNNLx;Snt#5sqnvJ$+ZQWJt z->d4fO}EBwj^&%W3m)Ix)XY>au1PkhT+i}~m($=%w*7Hf*(KRbf zKSZqHeV_w7+Wx&lYx{KHLf?E}zOH4>hqkvqV31cRwj=M31@CXYYfiqddewovEz>6o zp2oSU*{L-r>#8j}P;1+*_|5oI>+%za^{E?Hs8tG0+Y7te7tg*ubbDy=xh3(A>wV9= zo;$X?zI(#Jx)pXlumiKA6FFT_OVm{tYmu|=`rx&}w~jBW_s)ivedkvkLk|w3dVjG8 zZS!2MezfWVI;9q5{*kZ#Os(ak{nY?Jwmbf;|!$Psb-@ zf;%FSS0|$K#!5{jl1O4jmEkXmR5}vDPs3hJ@}cpC$HtQ}Rj&*&sr&GA1Q4UcX8~yS z5iswlnv6xFs)~~%6RM;{BDWDHAIwI%$PwYD^pMQ!Gsdyx zuz*doMo7&JPl8$lVLt%P0#cA<#dweY3f-gh>Q}$#%Y?Laca1-4i0wEI!vaYgiU6R< gP!#nU8u${~zC>03K)rd?`_S?{MR`^cfjWnO1ENL-Gynhq literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/flows/definitions/capture_incident.yml b/observability/local/log-sentinel/flows/definitions/capture_incident.yml new file mode 100644 index 0000000..3bd6d68 --- /dev/null +++ b/observability/local/log-sentinel/flows/definitions/capture_incident.yml @@ -0,0 +1,25 @@ +name: capture_incident +display_name: Capture Incident +description: User triggers capture_incident, plugin records the incident and reports success. +source_doc: docs/RULES-ActionCoverage.md +steps: + - id: capture_dispatched + event: action_dispatched + label: Capture incident dispatched + filters: + action: capture_incident + timeout_sec: 0 + optional: false + next: + - capture_result + - id: capture_result + event: action_result + label: Capture incident succeeded + filters: + action: capture_incident + success: "true" + timeout_sec: 5 + optional: false + next: [] +expected_completion_sec: 10 +gap_severity: warn diff --git a/observability/local/log-sentinel/flows/definitions/review_incident.yml b/observability/local/log-sentinel/flows/definitions/review_incident.yml new file mode 100644 index 0000000..f99e866 --- /dev/null +++ b/observability/local/log-sentinel/flows/definitions/review_incident.yml @@ -0,0 +1,34 @@ +name: review_incident +display_name: Review Incident +description: User clicks an incident row, dashboard dispatches seek, action succeeds. +source_doc: docs/RULES-ActionCoverage.md +steps: + - id: ui_click + event: dashboard_ui_event + label: Incident row clicked + filters: + event_type: click + timeout_sec: 0 + optional: false + next: + - seek_dispatched + - id: seek_dispatched + event: action_dispatched + label: Seek to incident dispatched + filters: + action: seek_to_incident + timeout_sec: 5 + optional: false + next: + - seek_result + - id: seek_result + event: action_result + label: Seek to incident succeeded + filters: + action: seek_to_incident + success: "true" + timeout_sec: 5 + optional: false + next: [] +expected_completion_sec: 15 +gap_severity: warn diff --git a/observability/local/log-sentinel/flows/definitions/session_health.yml b/observability/local/log-sentinel/flows/definitions/session_health.yml new file mode 100644 index 0000000..b633549 --- /dev/null +++ b/observability/local/log-sentinel/flows/definitions/session_health.yml @@ -0,0 +1,30 @@ +name: session_health +display_name: Session Health +description: Dashboard opens, WebSocket connects, plugin reports ready. +source_doc: docs/RULES-ActionCoverage.md +steps: + - id: dashboard_opened + event: dashboard_opened + label: Dashboard opened + filters: {} + timeout_sec: 0 + optional: false + next: + - ws_connected + - id: ws_connected + event: ws_client_connected + label: WebSocket client connected + filters: {} + timeout_sec: 10 + optional: false + next: + - plugin_ready + - id: plugin_ready + event: plugin_ready + label: Plugin ready + filters: {} + timeout_sec: 5 + optional: true + next: [] +expected_completion_sec: 20 +gap_severity: warn diff --git a/observability/local/log-sentinel/flows/definitions/transport_controls.yml b/observability/local/log-sentinel/flows/definitions/transport_controls.yml new file mode 100644 index 0000000..23bed78 --- /dev/null +++ b/observability/local/log-sentinel/flows/definitions/transport_controls.yml @@ -0,0 +1,24 @@ +name: transport_controls +display_name: Transport Controls +description: User dispatches a transport action (play, pause, rewind, fast-forward), plugin reports result. +source_doc: docs/RULES-ActionCoverage.md +steps: + - id: transport_dispatched + event: action_dispatched + label: Transport action dispatched + filters: + domain: action + timeout_sec: 0 + optional: false + next: + - transport_result + - id: transport_result + event: action_result + label: Transport action result + filters: + domain: action + timeout_sec: 5 + optional: false + next: [] +expected_completion_sec: 10 +gap_severity: info diff --git a/observability/local/log-sentinel/flows/definitions/walk_driver.yml b/observability/local/log-sentinel/flows/definitions/walk_driver.yml new file mode 100644 index 0000000..e3e5ce7 --- /dev/null +++ b/observability/local/log-sentinel/flows/definitions/walk_driver.yml @@ -0,0 +1,33 @@ +name: walk_driver +display_name: Walk Driver Incidents +description: User triggers find-driver-incidents, plugin seeks to each, results returned. +source_doc: docs/RULES-ActionCoverage.md +steps: + - id: trigger + event: action_dispatched + label: Find driver incidents triggered + filters: + action: find_driver_incidents + timeout_sec: 0 + optional: false + next: + - seek_dispatched + - id: seek_dispatched + event: action_dispatched + label: Seek to incident dispatched + filters: + action: seek_to_incident + timeout_sec: 10 + optional: false + next: + - results + - id: results + event: action_result + label: Driver walk results + filters: + action: find_driver_incidents + timeout_sec: 30 + optional: false + next: [] +expected_completion_sec: 60 +gap_severity: warn diff --git a/observability/local/log-sentinel/flows/definitions/walk_session.yml b/observability/local/log-sentinel/flows/definitions/walk_session.yml new file mode 100644 index 0000000..92bc239 --- /dev/null +++ b/observability/local/log-sentinel/flows/definitions/walk_session.yml @@ -0,0 +1,33 @@ +name: walk_session +display_name: Walk Session Incidents +description: User triggers find-all-incidents, plugin seeks to each, results returned. +source_doc: docs/RULES-ActionCoverage.md +steps: + - id: trigger + event: action_dispatched + label: Find all incidents triggered + filters: + action: find_all_incidents + timeout_sec: 0 + optional: false + next: + - seek_dispatched + - id: seek_dispatched + event: action_dispatched + label: Seek to incident dispatched + filters: + action: seek_to_incident + timeout_sec: 10 + optional: false + next: + - results + - id: results + event: action_result + label: Session walk results + filters: + action: find_all_incidents + timeout_sec: 60 + optional: false + next: [] +expected_completion_sec: 120 +gap_severity: warn diff --git a/observability/local/log-sentinel/flows/engine.py b/observability/local/log-sentinel/flows/engine.py new file mode 100644 index 0000000..8fe322d --- /dev/null +++ b/observability/local/log-sentinel/flows/engine.py @@ -0,0 +1,85 @@ +"""FlowEngine — load YAML flow definitions and evaluate event sequences.""" + +import os + +import yaml + +from models import FlowDefinition, FlowEvaluation, FlowGap, FlowStep + + +class FlowEngine: + def __init__(self, definitions_dir: str): + self.flows: dict[str, FlowDefinition] = {} + self._load_definitions(definitions_dir) + + def _load_definitions(self, definitions_dir: str): + if not os.path.isdir(definitions_dir): + return + for f in os.listdir(definitions_dir): + if not f.endswith((".yml", ".yaml")): + continue + with open(os.path.join(definitions_dir, f)) as fh: + raw = yaml.safe_load(fh) + flow = self._parse(raw) + self.flows[flow.name] = flow + + def _parse(self, raw: dict) -> FlowDefinition: + steps = [ + FlowStep( + id=s["id"], + event=s["event"], + label=s.get("label", ""), + filters=s.get("filters", {}), + timeout_sec=s.get("timeout_sec", 0), + optional=s.get("optional", False), + next_steps=s.get("next", []), + ) + for s in raw.get("steps", []) + ] + return FlowDefinition( + name=raw["name"], + display_name=raw.get("display_name", raw["name"]), + description=raw.get("description", ""), + source_doc=raw.get("source_doc", ""), + steps=steps, + expected_completion_sec=raw.get("expected_completion_sec", 0), + gap_severity=raw.get("gap_severity", "warn"), + ) + + def evaluate(self, events: list[dict], flow_name: str | None = None) -> list[FlowEvaluation]: + results = [] + if flow_name and flow_name in self.flows: + targets = [self.flows[flow_name]] + else: + targets = list(self.flows.values()) + + for flow in targets: + matched: dict[str, dict] = {} + for event in events: + for step in flow.steps: + if step.id in matched: + continue + if event.get("event", "") != step.event: + continue + if step.filters: + fields = event.get("fields", {}) + if not all( + str(fields.get(k, "")).lower() == str(v).lower() + or str(event.get(k, "")).lower() == str(v).lower() + for k, v in step.filters.items() + ): + continue + matched[step.id] = event + + gaps = [ + FlowGap( + step=s, + flow=flow, + description=f"Expected '{s.label}' ({s.event}) not found", + ) + for s in flow.steps + if not s.optional and s.id not in matched + ] + results.append(FlowEvaluation(flow=flow, matched_steps=matched, gaps=gaps)) + + return results diff --git a/observability/local/log-sentinel/grafana_client.py b/observability/local/log-sentinel/grafana_client.py new file mode 100644 index 0000000..a4f9162 --- /dev/null +++ b/observability/local/log-sentinel/grafana_client.py @@ -0,0 +1,49 @@ +"""Grafana HTTP API client for annotations.""" + +import logging +import time + +import requests + +logger = logging.getLogger("sentinel.grafana") + + +class GrafanaClient: + def __init__(self, base_url: str, user: str = "admin", password: str = "admin"): + self.base_url = base_url.rstrip("/") + self.auth = (user, password) + + def annotate(self, finding): + try: + requests.post( + f"{self.base_url}/api/annotations", + auth=self.auth, + json={ + "time": int(time.time() * 1000), + "tags": ["log-sentinel", finding.detector, finding.severity, finding.category], + "text": f"[{finding.severity.upper()}] {finding.title}
{finding.summary}", + }, + timeout=5, + ) + except Exception as e: + logger.debug("Grafana annotation error: %s", e) + + def annotate_investigation(self, investigation): + try: + requests.post( + f"{self.base_url}/api/annotations", + auth=self.auth, + json={ + "time": int(time.time() * 1000), + "tags": ["log-sentinel", "investigation", investigation.finding.detector, investigation.confidence, investigation.trigger], + "text": ( + f"Investigation: {investigation.finding.title}
" + f"Root cause: {investigation.root_cause}
" + f"Recommendation: {investigation.recommendation}
" + f"Confidence: {investigation.confidence} | Model: {investigation.model} | Type: {investigation.issue_type}" + ), + }, + timeout=5, + ) + except Exception as e: + logger.debug("Grafana investigation annotation error: %s", e) diff --git a/observability/local/log-sentinel/investigator/__init__.py b/observability/local/log-sentinel/investigator/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/observability/local/log-sentinel/investigator/__pycache__/__init__.cpython-313.pyc b/observability/local/log-sentinel/investigator/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a6ee28de73aaeb1da93b852acb5a79ce69a2354 GIT binary patch literal 199 zcmXwzK?=e!5JeNKAVLq~q8>oRjeB>3i-C~Z7zfg%B-56jLp+2B@ey;; z8~(tbDbH6d>U@7;n(t(P@gGd{2oI*&njfFc&*C8r%Iwk#UcYP8_Wc-qMl4blL{U8ISwi=Zr_RR+*@DiCT(&VvwWk%ZVZ!?@hlu4;|i R`$eoxZ-oS(jC1a@NMD%+ITHW? literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/investigator/__pycache__/chain.cpython-313.pyc b/observability/local/log-sentinel/investigator/__pycache__/chain.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56f57f601ac2a5d008918fa34475267934c80841 GIT binary patch literal 12150 zcmb_iYj7LabzZ#R1V{qlQ{;*diJ(YGA|*?No-{?tq$rZY5{@LogF@gEf(0(%}Lb6nW_GCs8Y?t{nK;q zVgZnXEj80C@!rLKoV|PR*>k@0-QBQQj1;7wzV)w>J?#|r8~hU)T87|(Y7>cJD z1*1Hz;1#4)@=8*wcoiwtyqc66UIV4-w02a->u4OKKCK@$@P<(%ZyYu8CQ_$4Z63Ao z7ILjUT{UXut)n*HHd@VB)0AK7?U1KYD%&VVw_hRs=4AYJw2zFxJj_guK;_89c%heA zH#s;pHS8S^42}RrVhN2Oci-R!TN$G~7Wz(ER<5Xmh{Tj5zt|lD< z^o?+A)Loixj1NgqE#>;ckB$F?i?=C11)D_S2Epbi{c1)9e`@&C_%-|VjJA<_Rn2P| zonOc3{rY`s#z1NejL~oO>og#vP;VmjCdLe7E$~+be|oLh~P}V*r$<<5^>0$NT3VqZtsut_^7pBiwLz~_I)L3#)HRIUSw}x@@wc$Efy;!qp z?Z|#Va)Lh=ObHPA@kBY{**bEyD%m#(Hd_BXm^P)Nm`w_w!%1H$m__r|y+1r#) z;Vo#L+yP5QA?u=#iWb|?(b0l7Qq;LUQItZ`x#Lm!eWJpYOF1qocDpp9mKWlDWS*~r zPP`Q}8)nszL91F|qqCLT1e`66Nem5n{62x@1^?9u7r*46VXydw$Xu@wXRikN8N9xf zxY#=%O@t$yKXwtiUI|`|L?iKQ{%9-|j6xpnl@-iiu3-KUzBumrYob07h;Wg3ARsn9 zz0z*Eu?{9Bv_p2C$~ShbHtxzc?phwoHSS5N|H=wAa%QjpYr_U~qeNZdZG&}S;|ZP% z71s`0aV5wuXyIw!rlxTONnZJ)tZbuX72=dW%3ECbQWsxwzc|%&akrmWs!!PqmXcBQ z!bo}FIrX$omd_{~DWA$~kbjp-d0w@}85YNDrit~%zFu=_gnZS8nunUPB|$B&&6WhU zHCw9SC)C<2)KXsDtSzauLa>4jTN>e~PbJTsQ4MH0?enf3rvrIS735Lg=BI0?TjV;Q zc7I_XIsKooW4a1EW`LP&m*tmApI&}4&Imm!%!)Jlj9%33Er|h>D*GQs<2grB!5_d% zQI-5xWyzkaFy`+lYI=t(+Ut_9N+qMMEK|NW{rvBT86;KSjS4=u;%->}gxxS#*o`Wf zNu}Mel7ZP-N%mAQehE-aD8|Lvug806FyT=eof=L2 zTMrGjd@vNJ!LuYnU{%=}nrDSXH14K{1?U}&vvfR0qwkZ3&@qmV#;&5s*~kpbg;@8a z|A03yRP<`FzUaOB;cw$0WbG7#Zb(ZGkP;WYQxJ`O zEEW%hf(e05w)(i>#b{wW;w(Q0Adkj&*i~qEi>6SF=ULb?fU#dRjmKb*ioXRKXBUsm zM%Wp*sEy3c!>)f}9U4X8qtDIY`;@MDaZjR+F zHW0ry&x*RjVA04HW*_73SolU*KE4ODUd(o5wimN*%=#b`b%18VVU`!oeDG=j)NVe; z39P6gswp}m+$_tZf(2$0Jef{lP7rGVrh`gvs&f`rJP?H?5du;VcE;rt)o}pEqOMRS zssQzfD%dhndnFi6umX?1UDVFT__<)5@4%^{X7FfU`JLEi3rLND;{ICY8hM}1utCvi zKthiaFhf@aK&bF`iK;LgCtyv~0)WCtR>RZ9ctij$EAPO&;ev_g!qW-yARiYko1RNF z6dGX_Oz~Je7?qw^)Xsw*&BsL(S+RgLRt0M+>ZKYOpq&r$0y`sPSWIewqeP$=(%+%(}mBNZ`yv_^NuI8 zi;Fh2BDpN9@| z!RWOlcv_;iMaqW;^G%2Nm>~3q5^*40!t{JF4sMwf+@iv`2@au|CvfO*;Gzr;9YQ$7 zVAv&X#HPFov>#fE8eM9H7A3S)&~Ru`L5sH3SH7yE`t=2nb)*2Y)C6Q{lB!;MI;nhx zPAWZg0(FD1U*j&d71zaxBKTVV&ozT|uC@K2O6jS2%M7kAwcmFE&)I+2K-8l2c2_Ea0+7@f~j~^GtmHmK+<3y z2?du$RKFUFa6GOJKZqH+W&SW`Com&q651yIC}zW$p$Q}ag+B>bE`x+pAZ7*mrDZ#V zy~Z({z-$t;vyh3}AQ!q6;|0|HB7%_cgS`wt6qQ2U`JCE=?~2d^F~oK1p}l@-WO3v% z>dU`7IyN*=Zcr+RF=cpSq-tEN)w}Ok?_R5M+>GUFcI4~o^DR3c>(s>QoBto3{_nI@ z*MXa@>FWE{?LPu9yfg1;%GY${?Y)l;nj;7?swmsGyuGpLD>Hk%xjN?g-R^j!04V~gwMF&Y!K!Zo1#=6Kw>kQy>MA-#k;u<=V++_$Nga+08e z6#~ME`Vt$Y=m-1cQ~H2SW>iRexRs37twBEnepc;MSF%uyfsh-Z^kp8gf|q5CmB&ME z1@Ft47=xOU{Vs%-X6T3Nz9q@To44qfOC1o1*oL3(lpfybDhROmn4utgSXH;LxL4Nf0+cv#AyaKVY5`uDrTu8s#Xd@Yg_W* z6=(>t3r%} z_z;4(SR(%DV;GQhN>hW0Ao&YCMdyqMLgaA~0xfxR$@XHW*C4u!(0jDpBGK~&GjU~UNAL=yy5@q`cv!HEEW3Tt#gmUCczA-u#0FNV*9 zMiN5cuZLLaQA8b{$-tdO!?0ZG(n>}Y-@~$1P~-GP1xtLFWGjiGB(_;>tzZ|+1I|AO zVS)%5I8JrF!MRr3v~=XnBl+68rDKc7GBxdh>TP=;Zfnc8bddk;4|jIwckX(uQPIxF zCWEtP-A*+$KW?ztt5PS{n<<-p)!Lf1wx;`Y)*Wj$=Tg;TRi^d8UHj_5NOoWZqL+bB z1~R_$nU~LheC&a3I$zg-Hycn)!Lo4c5hHh zm>&c^<2S}LO~>zbJ}|%ZX{}?;TEDraf9320^MSQm$N#KrV8jEgWWG$sE>ME zA^#W4zM&q~k5vZD&Ca2nsvqyvz%_9~n*s{t+=)^9XJXX$P#igjl*|%|cf zie{t8M|*+!+?wR4IM;#j?Mzq#e?Da;BZxtOY(6R(LRl7Y^g70L2-YkJGh>XL?i|k; zQ2v+5EejaFQm)>oXR3;Fk;*R zLyw%992}bJp}oUH6KBp0kDn;i4^51p962#OJ_G|s7{)gom^wc>Eb(u$g>xsrR+8)S z3*BArUB_Hq$By{9u4A6E+bl?TS=fA&h0;A1O1E%%7CKzXv9i%mN&Y`eJ1&iQO3?Gq zXJB*JrzCyFqqLUV$S|Na7r?|u5_1ceBH>G}9)BBYP{| zoSfKZvKv$S^%`nh=k4)##(&Y=b7L$uv}SiMjl4N> zEByTz-ygX%@+0MkmLFQOJ;OQs$<*+#toBqQ-_m~j@;jH8x4+A+w)ACN`f@G%Zff5$ zJ=Rg%de_aAvw7*l;)S#sI4#G%Rfi|*@Z=nak@0e5w!M%uy^wEc0sgAhm0HN^n~@Te z2Sjp?gR73CS;x_w<9K1fRL(S&Z)htGcxZK`EQBoDWXjQC5>x&yV#>=76u}5>P2@OK zy|tF2c2lx}gQH(%(ZIF=>MWY;N(Zwf7Ox;rmPS*1s1Eo8XQBj$kpQfwe&&I#y3&Dd zk-0&(qQcQ_MZQN}33d83-XeZ1tpwEKxyP0Ss2B<8Es0SABcweSg4PPhoW!BPfg~Q; z_*7t^Rms!d1V@*D@j?Th;vv2!bO{jma=^S(fHNiOG^M8i>F8+iA{(W9kJ5z3cPCpR zR+cDlT9{`;k_>qiKtDJg(nKS3kvLH3xHSZWNul>C_$g{;Sg-*+#xLAbq=Q?Q1X9EWRgj`bY+~qhD?rs z1Dn-&EovpI0cQv25rPg{3L&tB>wtlw#KuavPI}xApn^*0wtlhQT@qYK6?v>de#6F1KyYg?}{OFq*|DUb;XS4p< zjNO&7hf~A(?K^JnLq<4fZUGzMXk0f^HmCfKqG6U3zC+|4{V81FpN0b$hAvBj)??Za_JP5e-SRl z_DZ@$d~LjVZk2(K0s|Su30leqz`Pay=WBPZD^&*98X$Pjo1Rov5Y|9?qU0?JXi_Co zqWvm)YvGK!9T2P9i`?Lr#Aq7JRYZl+0Z<2p`BXS=P=DoJ_0vdwkp#qlK?=+UPUwM* z)yg$6qo`y;8Dphcl@2Pvz?zc#3aTg>3Q)Vk5h1D~gxVM3#B>Jirw|utfIN~(bth@m z%HYgQ1f%R=v^YqX;u00pN%Jcnx@(VQd|mtnnE6w?M@Vegf^rhuRoIalB(mX~Cul{2 z`kC%oIbC$8@cP0&^{Wd6H2^#E6@W}oE8)cdI*3gw-88%b--F9>z<(s{UWOn19mt@9 zhC)6!d?a*R zrw=UmWatAAYzIHJ)x$~U(zV5FS(<;~XEU>xGtpRPE|!^{f8c*DNAtNl9u6SQ_BSSO zOxy~lHMg#&L*H*-X8*B!xq0QnJ^kHerg`)i=F^W&)V98Lph}%@eB;J9ZXL{7J2LtX z0tA6lM`BL;;L(a#NX?A9?D(}I7o20^YeQoo031&uir3~q;5GPGq0nLtz-c3VJc)Y2 z#R37MFg$t#9uK?9sP32HC+ZKF;4rqFAit5kl1Oplxy?ujMM86I{8DU&7+{_(wj+Rt z>Ch5@lL9_+F%gH45dwE8@&wYRAVbMxhQLht9b|t&tt-`D}Z$-5-b+_Y#3oF*_uH#RzZoQ4t z)~qXa8drMfIt9gYXakE4m0S-&2KKOMI@9sxp%wee%mXbeG2V^es{n5YpHkp^E|wiB zK%?|Mgdl-JwDjcULp0&HyMYk-n40W9M34BDuTZ7{MwDiN&o#!#cN&E6BOAtlgmUl% zo9v{hKP)NRQT`LCL-Q_-K?eR%q4<;<`3+_IjH>#KGJHmvenVM*OYQl+Qms%vrXc%o sBc*8n6}3N0?SE*ntr{Bd8yYiBeK|w_b?xu9UX`Nrzu==(g|tZj3r*^e%m4rY literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/investigator/__pycache__/knowledge.cpython-313.pyc b/observability/local/log-sentinel/investigator/__pycache__/knowledge.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67ac45e53ee80b1ded72bde8d8235740a4f2e50d GIT binary patch literal 2602 zcmb7G(Qex|6m{AT?TiJ({@?<8NmkowyRI9&wSen7jaxejEEhq600J%1HfKr{NXm|j zybRbM4EuU{z=aL3y zMuAMdN~I>1E~WEoCcLpNaec2=duwtMR+6AtiCJp!HX@M<4X^?uP)XqBO4zu8c~T=0 zsdNUUNX39{TDeqmMcx?Qs@FTC?oof->yF=ydhL3Xx?d9jj`9g*I-h~=UhB9wrpK)x z_Q^^SyDgxbgGNk=OpIOAQVHshf)}p?OlQ)zfOT3T$cxN4=}1IgnbeV4N(=gjT1E>x zlas(i3+ZWM6$fSSL96)RXN2e0tQ@|Mg_}v@~)tD7BEytYfLAZNf87`GEttKw|V4qKE|uysaPn) zXmIk9g6>go*x?8b#6)V>CXr>d=Ty=}5;@wJkrsI@o3UIr0fgAi_8TPAWrMU_O3kdQ z?J>Osiw4DoB$!?)4u&nF)Sz57NZBHHjY=sk>TVG5VVR#s6*pjQVubzFR7UGaOGbVz z*X4Fw@Gw>`6F!u9_2J;;ENIssw&1UV2hF9>e%`)0HJMC8qRnS?7Tl-^!^#a6 z=A#ykN*ir#)h2`pdWK+mHPZ{d2{*4{)=kLybs6ne{o59u3B91$Dwv{8Qm-ptx53aRpO_x55!x{~wU=ATN=z8@8L^Q02@P=M(36(xR~ysmqM@95!za zTZA)pkcr&M11ncJ+b5QPkBle{fgx^1V%pm(v(*(%a!;v2y%8y_HO+OUYGk-O=)8H+ zqwWcQ4rtgtEh<@IM@YmhD!aqcFzwU-`jl46&u{GcbnBl>u9BBYlr_J(6f+QHLmdH; z{5q33VZ|3Upb+$C$U;VSF&V^YWs{8$pJ4#KHW{;tm@pQ&7L^!**4mMW;TL5+_k+(3QTY@VX~7?5TpV5zQiOMJ44xIvM4jL&oc1C>Dy(Q5}$V z3-aEsnaRYOg-ojo%m^Qwpej25_Q1A6Q;Pa*(X_ZLg9}Jo`EyZm8LVe5CXwX@@1MII zG}q8Xrp+23&OnQ6F|XX*5jv-R@+y%gFLcnuwPa1*(nniomWJYkZ!F1D<)hIam`>G< zAw>>HDd%#DMqcmm{p6iQy7>-*9MW@Ke=-@Vsfti{l$YFmD>>lWg7ym@a8;7I*75$W ztGi(snM<>J6@gKEozH(j7y6y8f2(CLvnep>;G~x3PdNV)I^GUM| z=((Bjy+KTrR(^f14a|q%S<}JceC;~_@Y;30y|A^i_0_$f(3Ns6pV_dQ Wy|c6PSMBBdJB_dI{IY95!`weayMgNf literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/investigator/__pycache__/prompts.cpython-313.pyc b/observability/local/log-sentinel/investigator/__pycache__/prompts.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27e697b3c056656712183f21fad37283611055d7 GIT binary patch literal 5749 zcmcIo-E-4c7MJA@94DlOBuyYKtD4dhHV(8vOKD*?4F(K}V=O}n7=$cc+iGM?kaCB-o@C61p4MEYT}332a`Y*Ks; zWUq&M$D>k2+{d*2l|h94;uL64huTbUMtoxkVV14Ti3f)K9TeyLwF87V`(-SKx7f<4 zEDnjk0`0*VMr9!|goEPSLov*V>3)AqZxq7eAi8&k5RQoNj)>vCepHslQHJ1a4_0zg z9MJCbG}wy;wsK7T^-v5mB8{u3&GZ(&3vL`|y88_~<1FJN{_I zXZ{;)yr-NjkCjv9SRD58DV|=-T+wvR-YV;+BI|IiWe75Ch)XZ`x20^cwvr`l3)8}|3%wx%Tz<>_JU;o& zm7myUZvN5g>ifp6+54KZc_C~gtD{X*~p!*0zlpwsmwzgbkGN;jyF^E#x8@UVF{7Uw0>H)g1 zsmM?OGIeRWbU9nhUJzE+^QG*~609nwVWY2XSX?$_eXE5S`>L9uYDTS(r$`k7)g_^b zh-CtUo0?r0vNxAkN~;^xj35Ep&v<+`l`5{Um!y^D>tgn-@TqB_j3r~ahGnZRVAHe( zMRveIre29g&Zkz^i^c5faw)f-2Lp`gPEiHO(y)mDyTXDFBMe)_uFJMS9Lq8>>?znQ zswUSAlY!*c3J?z112)c7Wrb#;H7r@NSu%393K0Ts)5M!q-E4y6Vs>SHZ7rL>5c&?c zqySd1DdmcegsS${BCSAGs|iY7Hfm^*24z6)1mKcI$&d_#r?wV`+pZE4#vHVw0BoKHEIff^8BHeV6~hKRyZ zXqsc#+b|;pm<6`*0I02+O{t+7+X7tZCRl*p23?d0qlJdZV3(o#frdYJ6l@ZbQ5+kn zUh9>6Q6O88U_(;`jSvSBmNz1>HDKR?qK%QElBLuyU_}qPj}Tf_0I~?^D~;H`ue4!8 z%4}(CFAt^@t zs;ub_M!=08pX6E%BbdfWpv>w~iq%Yzb-m|n1fM7aV3=ntteLdGluUxGs^EdJ3Ic~4 zCNwf^$Nz^+-lU(q6r|UChBzGECaw1$$d+|3-8UZ7i~a^YmkvyQ)%3Q|7QP^+f$72c z=@Ynk`vMJBBd}AII#OGwUyiq<-NW<}2xAkI*1{-r9R`AaE}bnF*NZ)qrytM=#+jW& zENv5cNto^i6qmG)z5wR)*ARxm z(heRmlGUy7f!L;H!J3LJ-Q1F3MsFgV_9ls@!*qjpCh)*CycF`P}$e;(K#29_SMrFFS@k&lK%%*Lhi%>G)puaw8FGXwzP7QQV zd1H9Qk}{o$+1x(hQtzGH_ObV@wUU zEeuR}M?=fcrF(?lsZ-oGO`Wtpc_n~{X{z0BC{||aSP!Fh;swSdcd0iO#82URLug-5 zxQPs@;$Xu|*e2YQ*5LsUOqgjPIOZv&7lxORYxIQBIyIpD++r~89qZ6L6H9Q4wY>d7 zK>gEXiXIihx`)&%`X2ko`mwLH?m?W2gY#JlM2Q4wL;08 zEi9Kx*z#q|}98)#L##8K8hl2s0rf~F>?;oS*=t}Za|EI-yd>$U* zX9-9C0B*a{NF?HM7yro}?It*W{NH>mGSlVY@+=CEUG}(tdM@U2)7=mD&cpX?_gEpa z#Qz+d{#$a&oqorCcf-AX*S)87k}8wD=PulHE6A;V(MfKCd2;E68zQ2E6>?BL{`HCxCd9vbOD|M3B>GRbt$E82x;d+6;M89(URUUr9jIHtj zH=XFd#^C0-_zYEFb<^j6i2eX*xt?`k1_m6L=L^jG8qb#9x7#56Iubaz$ksrg1$uxX zr@8n(&>c?p@m2Tjf$VqZI+f@iW)O>9e6|hG%EfkZdM@XiFuLIAhjJG9=NWR)6Ra^Je;mL*qKJ4qBE6x`x<_w$WT@|K^~#|+}L z2p2#2aucf2^Fl!ZG6OYRLQ)}2oBE~O2$ekYAOi+TG?yWnn zROuuYh#nZR?ZaH;{Z{-U7^vhWo^`YUFmJv}G?2rgME9C+brY-(y`q(n=x#8pM-$(w dk57Id`~LQia|`a&!jF^3{>2p{5uOdM{{VmL{dfQX literal 0 HcmV?d00001 diff --git a/observability/local/log-sentinel/investigator/chain.py b/observability/local/log-sentinel/investigator/chain.py new file mode 100644 index 0000000..d293de8 --- /dev/null +++ b/observability/local/log-sentinel/investigator/chain.py @@ -0,0 +1,232 @@ +"""InvestigationChain — Tier 2 LLM-driven investigation of findings.""" + +import json +import logging +import re +import time + +import requests + +from investigator.knowledge import SYSTEM_PROMPT +from investigator.prompts import ( + GATHER_SPECS, + INVESTIGATION_PROMPT, + PATTERN_ANALYSIS_PROMPT, +) +from loki_client import LokiClient +from models import Finding, Investigation, TimeWindow + +logger = logging.getLogger("sentinel.investigator") + + +class InvestigationChain: + def __init__(self, ollama_url: str, model_fast: str, model_deep: str, loki: LokiClient): + self.ollama_url = ollama_url.rstrip("/") + self.model_fast = model_fast + self.model_deep = model_deep + self.loki = loki + + # ── Public API ── + + def investigate(self, finding: Finding) -> Investigation: + """Gather context, call fast model, extract structured result. Escalate to deep model on low confidence.""" + gather_start = time.time() + context_lines = self._gather(finding) + gather_ms = int((time.time() - gather_start) * 1000) + + total_lines = sum(len(v) for v in context_lines.values()) + + prompt = INVESTIGATION_PROMPT.format( + title=finding.title, + detector=finding.detector, + severity=finding.severity, + summary=finding.summary, + evidence=self._format_evidence(finding), + context=self._format_context(context_lines), + ) + + infer_start = time.time() + raw = self._call_ollama(self.model_fast, SYSTEM_PROMPT, prompt) + infer_ms = int((time.time() - infer_start) * 1000) + + parsed = self._extract(raw) + + # Escalate to deep model on low confidence + model_used = self.model_fast + if parsed.get("confidence", "low") == "low" and self.model_deep != self.model_fast: + logger.info("Low confidence from fast model, escalating to %s", self.model_deep) + deep_start = time.time() + raw_deep = self._call_ollama(self.model_deep, SYSTEM_PROMPT, prompt) + deep_ms = int((time.time() - deep_start) * 1000) + parsed = self._extract(raw_deep) + raw = raw_deep + infer_ms += deep_ms + model_used = self.model_deep + + return Investigation( + finding=finding, + root_cause=parsed.get("root_cause", "Unable to determine root cause."), + correlation=parsed.get("correlation", "No correlations identified."), + impact=parsed.get("impact", "Impact unknown."), + recommendation=parsed.get("recommendation", "Investigate manually."), + confidence=parsed.get("confidence", "low"), + issue_type=parsed.get("issue_type", "unknown"), + trigger="escalation", + raw_response=raw, + model=model_used, + inference_duration_ms=infer_ms, + gather_duration_ms=gather_ms, + context_lines_gathered=total_lines, + ) + + def investigate_patterns(self, recent_findings: list[Finding]) -> Investigation: + """Proactive T2: analyze recent findings for cross-cutting patterns.""" + summaries = "\n".join( + f"- [{f.severity}] {f.detector}: {f.title} — {f.summary}" + for f in recent_findings + ) + + prompt = PATTERN_ANALYSIS_PROMPT.format( + count=len(recent_findings), + window_min=5, + finding_summaries=summaries, + ) + + infer_start = time.time() + raw = self._call_ollama(self.model_fast, SYSTEM_PROMPT, prompt) + infer_ms = int((time.time() - infer_start) * 1000) + + parsed = self._extract(raw) + + # Use first finding as the anchor + anchor = recent_findings[0] if recent_findings else Finding( + detector="pattern_analysis", + severity="info", + title="Pattern analysis", + summary="No findings to analyze.", + ) + + return Investigation( + finding=anchor, + root_cause=parsed.get("root_cause", "No common root cause identified."), + correlation=parsed.get("correlation", "No correlations identified."), + impact=parsed.get("impact", "Impact unknown."), + recommendation=parsed.get("recommendation", "Continue monitoring."), + confidence=parsed.get("confidence", "low"), + issue_type=parsed.get("issue_type", "unknown"), + trigger="proactive", + raw_response=raw, + model=self.model_fast, + inference_duration_ms=infer_ms, + gather_duration_ms=0, + context_lines_gathered=0, + ) + + # ── Ollama call ── + + def _call_ollama(self, model: str, system: str, prompt: str) -> str: + """POST /api/generate to Ollama. Returns raw text response.""" + try: + resp = requests.post( + f"{self.ollama_url}/api/generate", + json={ + "model": model, + "system": system, + "prompt": prompt, + "stream": False, + "options": { + "temperature": 0.3, + "num_predict": 1024, + }, + }, + timeout=600, + ) + if resp.status_code != 200: + logger.warning("Ollama returned %d: %s", resp.status_code, resp.text[:200]) + return "" + return resp.json().get("response", "") + except requests.exceptions.Timeout: + logger.warning("Ollama request timed out (model=%s)", model) + return "" + except Exception as e: + logger.warning("Ollama call failed: %s", e) + return "" + + # ── Extract structured fields from raw LLM output ── + + def _extract(self, raw: str) -> dict: + """Parse ROOT_CAUSE, CORRELATION, IMPACT, RECOMMENDATION, CONFIDENCE, ISSUE_TYPE from raw text.""" + result = {} + + patterns = { + "root_cause": r"ROOT_CAUSE:\s*(.+?)(?=\n(?:CORRELATION|IMPACT|RECOMMENDATION|CONFIDENCE|ISSUE_TYPE):|$)", + "correlation": r"CORRELATION:\s*(.+?)(?=\n(?:IMPACT|RECOMMENDATION|CONFIDENCE|ISSUE_TYPE):|$)", + "impact": r"IMPACT:\s*(.+?)(?=\n(?:RECOMMENDATION|CONFIDENCE|ISSUE_TYPE):|$)", + "recommendation": r"RECOMMENDATION:\s*(.+?)(?=\n(?:CONFIDENCE|ISSUE_TYPE):|$)", + "confidence": r"CONFIDENCE:\s*(low|medium|high)", + "issue_type": r"ISSUE_TYPE:\s*(bug|config|performance|security|unknown)", + } + + for key, pattern in patterns.items(): + match = re.search(pattern, raw, re.DOTALL | re.IGNORECASE) + if match: + result[key] = match.group(1).strip() + + # Normalize confidence + confidence = result.get("confidence", "low").lower() + if confidence not in ("low", "medium", "high"): + confidence = "low" + result["confidence"] = confidence + + # Normalize issue_type + issue_type = result.get("issue_type", "unknown").lower() + if issue_type not in ("bug", "config", "performance", "security", "unknown"): + issue_type = "unknown" + result["issue_type"] = issue_type + + return result + + # ── Gather context from Loki ── + + def _gather(self, finding: Finding) -> dict[str, list[dict]]: + """Run GATHER_SPECS queries for the finding's detector, return label -> lines.""" + specs = GATHER_SPECS.get(finding.detector, []) + if not specs: + logger.debug("No gather specs for detector %s", finding.detector) + return {} + + result: dict[str, list[dict]] = {} + for spec in specs: + window = TimeWindow.from_now(spec.lookback_sec) + lines = self.loki.query_lines(spec.logql, window.start_ns, window.end_ns, limit=spec.limit) + result[spec.label] = lines + + return result + + # ── Format helpers ── + + @staticmethod + def _format_evidence(finding: Finding) -> str: + """Format finding evidence as indented JSON.""" + if not finding.evidence: + return "(no evidence attached)" + try: + return json.dumps(finding.evidence, indent=2, default=str) + except (TypeError, ValueError): + return str(finding.evidence) + + @staticmethod + def _format_context(context_lines: dict[str, list[dict]]) -> str: + """Format gathered context as numbered lists per label.""" + if not context_lines: + return "(no additional context gathered)" + + sections = [] + for label, lines in context_lines.items(): + if not lines: + sections.append(f"[{label}]: (0 lines)") + continue + numbered = "\n".join(f" {i+1}. {json.dumps(line, default=str)}" for i, line in enumerate(lines[:50])) + sections.append(f"[{label}] ({len(lines)} lines):\n{numbered}") + + return "\n\n".join(sections) diff --git a/observability/local/log-sentinel/investigator/knowledge.py b/observability/local/log-sentinel/investigator/knowledge.py new file mode 100644 index 0000000..cc4b62c --- /dev/null +++ b/observability/local/log-sentinel/investigator/knowledge.py @@ -0,0 +1,47 @@ +"""Domain knowledge system prompt for the Log Sentinel investigator.""" + +SYSTEM_PROMPT = """\ +You are a diagnostic analyst for SimSteward, an iRacing incident-review tool. + +ARCHITECTURE: +- C# SimHub plugin (.NET 4.8) reads iRacing shared memory via IRSDKSharper. +- Plugin exposes actions over a Fleck WebSocket bridge (0.0.0.0). +- Browser dashboard (HTML/JS ES6+) served by SimHub HTTP, connects via WS. +- All components emit structured JSON logs shipped to Loki. + +LOG SCHEMA: +- Labels: app (sim-steward|claude-dev-logging), env, level, component, event, domain. +- Domains: action, ui, iracing, system. Components: plugin, bridge, dashboard, lifecycle. +- Key events: action_dispatched, action_result, dashboard_ui_event, ws_client_connected, + ws_client_disconnected, incident_detected, iracing_session_start, iracing_session_end, + iracing_mode_change, iracing_replay_seek, host_resource_sample, plugin_ready. + +USER WORKFLOWS: +1. Session health: dashboard opens -> WS connects -> plugin ready. +2. Review incident: click row -> seek_to_incident dispatched -> result. +3. Walk driver: find_driver_incidents -> seek per incident -> results. +4. Walk session: find_all_incidents -> seek per incident -> results. +5. Capture incident: capture_incident dispatched -> result. +6. Transport controls: play/pause/rewind dispatched -> result. +7. Silent session: iRacing connected but no meaningful events. + +CLAUDE CODE / MCP: +- Claude hooks emit to app=claude-dev-logging with component=lifecycle|tool|mcp-*|agent. +- Hook types: session-start, session-end, stop, pre-compact, post-tool-use. +- MCP tool calls tracked by tool_name, session_id, duration_ms. + +iRACING SPECIFICS: +- Incident deltas: 1x off-track, 2x wall/spin, 4x heavy contact. +- Admin limitation: live races show 0 incidents for non-admin drivers. +- Replay at 16x batches YAML incident events; cross-ref CarIdxGForce + CarIdxTrackSurface. +- replayFrameNum/replayFrameNumEnd are inverted vs SDK naming in plugin code. + +COMMON FAILURES: +- WS bridge_start_failed: port conflict or firewall. +- Action consecutive failures: stuck user retrying broken action. +- Silent session: plugin connected but dashboard never loads or WS rejected. +- Error spikes: usually deploy regression or iRacing API timeout. +- Empty Claude session: hooks fire but no tool use (config or auth issue). + +Analyze evidence. Be specific. Cite log events and timestamps.\ +""" diff --git a/observability/local/log-sentinel/investigator/prompts.py b/observability/local/log-sentinel/investigator/prompts.py new file mode 100644 index 0000000..2976c9e --- /dev/null +++ b/observability/local/log-sentinel/investigator/prompts.py @@ -0,0 +1,142 @@ +"""Prompt templates and gather specifications for the investigator.""" + +from dataclasses import dataclass, field + + +@dataclass +class GatherQuery: + label: str + logql: str + lookback_sec: int = 300 + limit: int = 100 + + +# ── Investigation prompt ── + +INVESTIGATION_PROMPT = """\ +FINDING: {title} +DETECTOR: {detector} +SEVERITY: {severity} +SUMMARY: {summary} + +EVIDENCE: +{evidence} + +GATHERED CONTEXT: +{context} + +Analyze the finding and gathered context. Respond with EXACTLY these sections: + +ROOT_CAUSE: +CORRELATION: +IMPACT: +RECOMMENDATION: +CONFIDENCE: +ISSUE_TYPE: +""" + + +# ── Pattern analysis prompt (proactive T2) ── + +PATTERN_ANALYSIS_PROMPT = """\ +RECENT FINDINGS ({count} in last {window_min} minutes): +{finding_summaries} + +Analyze these findings for cross-cutting patterns, systemic issues, or escalating trends. +Respond with EXACTLY these sections: + +ROOT_CAUSE: +CORRELATION: +IMPACT: +RECOMMENDATION: +CONFIDENCE: +ISSUE_TYPE: +""" + + +# ── Gather specifications per detector ── +# Each detector maps to a list of GatherQuery objects whose results provide +# context for the LLM investigation. Queries are aligned to the cache keys +# defined in query_cache.QUERIES so results are already warm when possible. + +GATHER_SPECS: dict[str, list[GatherQuery]] = { + # ── app detectors ── + "action_failure": [ + GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), + GatherQuery("action_dispatched", '{app="sim-steward", event="action_dispatched"} | json', 300, 200), + GatherQuery("errors", '{app="sim-steward", level="ERROR"} | json', 300, 50), + ], + "error_spike": [ + GatherQuery("errors", '{app="sim-steward", level="ERROR"} | json', 300, 200), + GatherQuery("warnings", '{app="sim-steward", level="WARN"} | json', 300, 100), + GatherQuery("lifecycle", '{app="sim-steward"} | json | event=~"plugin_started|plugin_ready|plugin_stopped|deploy_marker"', 300, 50), + ], + "silent_session": [ + GatherQuery("all_events", '{app="sim-steward"} | json', 300, 200), + GatherQuery("ws_events", '{app="sim-steward"} | json | event=~"ws_client_connected|ws_client_disconnected|ws_client_rejected"', 300, 50), + GatherQuery("lifecycle", '{app="sim-steward"} | json | event=~"iracing_connected|iracing_disconnected|plugin_ready"', 300, 50), + ], + "stuck_user": [ + GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), + GatherQuery("ui_events", '{app="sim-steward", event="dashboard_ui_event"} | json', 300, 100), + GatherQuery("errors", '{app="sim-steward", level="ERROR"} | json', 300, 50), + ], + "websocket_health": [ + GatherQuery("ws_events", '{app="sim-steward"} | json | event=~"ws_client_connected|ws_client_disconnected|ws_client_rejected|bridge_start_failed"', 300, 200), + GatherQuery("lifecycle", '{app="sim-steward"} | json | event=~"plugin_started|plugin_ready|bridge_starting"', 300, 50), + GatherQuery("errors", '{app="sim-steward", level="ERROR"} | json', 300, 50), + ], + # ── ops detectors ── + "claude_session": [ + GatherQuery("lifecycle", '{app="claude-dev-logging", component="lifecycle"} | json', 300, 200), + GatherQuery("tools", '{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', 300, 100), + GatherQuery("errors", '{app="claude-dev-logging", level="ERROR"} | json', 300, 50), + ], + "claude_tool_failure": [ + GatherQuery("tools", '{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', 300, 200), + GatherQuery("errors", '{app="claude-dev-logging", level="ERROR"} | json', 300, 50), + GatherQuery("lifecycle", '{app="claude-dev-logging", component="lifecycle"} | json', 300, 50), + ], + "claude_token_burn": [ + GatherQuery("tokens", '{app="claude-token-metrics"} | json', 300, 200), + GatherQuery("lifecycle", '{app="claude-dev-logging", component="lifecycle"} | json', 300, 50), + GatherQuery("agents", '{app="claude-dev-logging", component="agent"} | json', 300, 50), + ], + "claude_agent_loop": [ + GatherQuery("agents", '{app="claude-dev-logging", component="agent"} | json', 300, 200), + GatherQuery("tools", '{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', 300, 100), + GatherQuery("lifecycle", '{app="claude-dev-logging", component="lifecycle"} | json', 300, 50), + ], + "claude_error_spike": [ + GatherQuery("errors", '{app="claude-dev-logging", level="ERROR"} | json', 300, 200), + GatherQuery("lifecycle", '{app="claude-dev-logging", component="lifecycle"} | json', 300, 50), + GatherQuery("tools", '{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', 300, 100), + ], + # ── flow-based detectors ── + "flow_session_health": [ + GatherQuery("ws_events", '{app="sim-steward"} | json | event=~"ws_client_connected|ws_client_disconnected|ws_client_rejected"', 300, 100), + GatherQuery("lifecycle", '{app="sim-steward"} | json | event=~"plugin_started|plugin_ready|dashboard_opened"', 300, 50), + ], + "flow_review_incident": [ + GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), + GatherQuery("ui_events", '{app="sim-steward", event="dashboard_ui_event"} | json', 300, 100), + ], + "flow_walk_driver": [ + GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), + GatherQuery("action_dispatched", '{app="sim-steward", event="action_dispatched"} | json', 300, 200), + GatherQuery("incidents", '{app="sim-steward", event="incident_detected"} | json', 300, 100), + ], + "flow_walk_session": [ + GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), + GatherQuery("action_dispatched", '{app="sim-steward", event="action_dispatched"} | json', 300, 200), + GatherQuery("incidents", '{app="sim-steward", event="incident_detected"} | json', 300, 100), + ], + "flow_capture_incident": [ + GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), + GatherQuery("action_dispatched", '{app="sim-steward", event="action_dispatched"} | json', 300, 100), + ], + "flow_transport_controls": [ + GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), + GatherQuery("action_dispatched", '{app="sim-steward", event="action_dispatched"} | json', 300, 100), + ], +} diff --git a/observability/local/log-sentinel/loki_client.py b/observability/local/log-sentinel/loki_client.py new file mode 100644 index 0000000..ebbc357 --- /dev/null +++ b/observability/local/log-sentinel/loki_client.py @@ -0,0 +1,176 @@ +"""Loki HTTP API client — query + push, with structured sentinel event helpers.""" + +import json +import logging +import time +from datetime import datetime, timezone + +import requests + +logger = logging.getLogger("sentinel.loki") + + +class LokiClient: + def __init__(self, base_url: str, timeout: int = 5): + self.base_url = base_url.rstrip("/") + self.timeout = timeout + + # ── Time helpers ── + + @staticmethod + def now_ns() -> int: + return int(datetime.now(timezone.utc).timestamp() * 1e9) + + @staticmethod + def now_minus_ms(offset_ms: int) -> int: + return int((datetime.now(timezone.utc).timestamp() * 1000 - offset_ms) * 1e6) + + # ── Query API ── + + def count(self, logql: str, start_ns: int, end_ns: int) -> int: + try: + resp = requests.get( + f"{self.base_url}/loki/api/v1/query_range", + params={"query": logql, "start": str(start_ns), "end": str(end_ns), "limit": 1000, "direction": "forward"}, + timeout=self.timeout, + ) + if resp.status_code != 200: + return -1 + total = 0 + for stream in resp.json().get("data", {}).get("result", []): + total += len(stream.get("values", [])) + return total + except Exception as e: + logger.warning("Loki count error: %s", e) + return -1 + + def query_lines(self, logql: str, start_ns: int, end_ns: int, limit: int = 1000) -> list[dict]: + try: + resp = requests.get( + f"{self.base_url}/loki/api/v1/query_range", + params={"query": logql, "start": str(start_ns), "end": str(end_ns), "limit": limit, "direction": "forward"}, + timeout=self.timeout, + ) + if resp.status_code != 200: + return [] + lines = [] + for stream in resp.json().get("data", {}).get("result", []): + for pair in stream.get("values", []): + if len(pair) >= 2: + try: + lines.append(json.loads(pair[1])) + except (json.JSONDecodeError, TypeError): + pass + return lines + except Exception: + return [] + + # ── Push API ── + + def push(self, entry: dict, env: str = "local"): + """Push a single log entry to Loki. Fire-and-forget.""" + try: + ts_ns = str(int(time.time() * 1e9)) + stream_labels = {"app": "sim-steward", "env": env, "level": entry.get("level", "INFO")} + for key in ("component", "event", "domain"): + val = entry.get(key) + if val: + stream_labels[key] = val + payload = {"streams": [{"stream": stream_labels, "values": [[ts_ns, json.dumps(entry)]]}]} + requests.post(f"{self.base_url}/loki/api/v1/push", json=payload, timeout=3) + except Exception as e: + logger.debug("Loki push error: %s", e) + + # ── Sentinel event helpers ── + + def push_finding(self, finding, env: str = "local"): + entry = { + "level": "WARN" if finding.severity in ("warn", "critical") else "INFO", + "message": finding.title, + "timestamp": finding.timestamp, + "component": "log-sentinel", + "event": "sentinel_finding", + "domain": "system", + "finding_id": finding.finding_id, + "detector": finding.detector, + "category": finding.category, + "severity": finding.severity, + "title": finding.title, + "summary": finding.summary, + "fingerprint": finding.fingerprint, + "escalated_to_t2": finding.escalate_to_t2, + "logql_query": finding.logql_query, + "flow_context": finding.flow_context, + **finding.evidence, + } + self.push(entry, env) + + def push_investigation(self, investigation, env: str = "local"): + entry = { + "level": "INFO", + "message": f"Investigation: {investigation.root_cause[:120]}", + "timestamp": investigation.timestamp, + "component": "log-sentinel", + "event": "sentinel_investigation", + "domain": "system", + "investigation_id": investigation.investigation_id, + "finding_id": investigation.finding.finding_id, + "detector": investigation.finding.detector, + "category": investigation.finding.category, + "trigger": investigation.trigger, + "model": investigation.model, + "confidence": investigation.confidence, + "issue_type": investigation.issue_type, + "root_cause": investigation.root_cause, + "correlation": investigation.correlation, + "impact": investigation.impact, + "recommendation": investigation.recommendation, + "inference_duration_ms": investigation.inference_duration_ms, + "gather_duration_ms": investigation.gather_duration_ms, + "context_lines_gathered": investigation.context_lines_gathered, + } + self.push(entry, env) + + def push_cycle(self, cycle_data: dict, env: str = "local"): + entry = { + "level": "INFO", + "message": f"Cycle #{cycle_data['cycle_num']}: {cycle_data['finding_count']} findings, {cycle_data['escalated_count']} escalated", + "component": "log-sentinel", + "event": "sentinel_cycle", + "domain": "system", + **cycle_data, + } + self.push(entry, env) + + def push_detector_run(self, run_data: dict, env: str = "local"): + entry = { + "level": "ERROR" if run_data.get("error") else "INFO", + "message": f"Detector {run_data['detector']}: {run_data['finding_count']} findings in {run_data['duration_ms']}ms", + "component": "log-sentinel", + "event": "sentinel_detector_run", + "domain": "system", + **run_data, + } + self.push(entry, env) + + def push_t2_run(self, t2_data: dict, env: str = "local"): + entry = { + "level": "INFO", + "message": f"T2 {t2_data['tier']}: {t2_data['model']} confidence={t2_data.get('confidence', '?')} in {t2_data.get('total_duration_ms', '?')}ms", + "component": "log-sentinel", + "event": "sentinel_t2_run", + "domain": "system", + **t2_data, + } + self.push(entry, env) + + def push_sentry_event(self, sentry_data: dict, env: str = "local"): + entry = { + "level": "INFO", + "message": f"Sentry issue: {sentry_data.get('title', '?')[:100]}", + "component": "log-sentinel", + "event": "sentinel_sentry_issue", + "domain": "system", + **sentry_data, + } + self.push(entry, env) diff --git a/observability/local/log-sentinel/loki_handler.py b/observability/local/log-sentinel/loki_handler.py new file mode 100644 index 0000000..e8a9dcd --- /dev/null +++ b/observability/local/log-sentinel/loki_handler.py @@ -0,0 +1,66 @@ +"""Python logging handler that pushes log records to Loki.""" + +import json +import logging +import time +import threading + +import requests + + +class LokiHandler(logging.Handler): + def __init__(self, loki_url: str, env: str = "local", flush_interval: float = 2.0): + super().__init__() + self.loki_url = loki_url.rstrip("/") + self.env = env + self.flush_interval = flush_interval + self._buffer = [] + self._lock = threading.Lock() + self._start_flush_timer() + + def _start_flush_timer(self): + self._timer = threading.Timer(self.flush_interval, self._flush_loop) + self._timer.daemon = True + self._timer.start() + + def _flush_loop(self): + self._flush() + self._start_flush_timer() + + def emit(self, record: logging.LogRecord): + try: + entry = { + "level": record.levelname, + "message": self.format(record), + "timestamp": time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime(record.created)), + "component": "log-sentinel", + "event": "sentinel_log", + "domain": "system", + "logger": record.name, + "func": record.funcName, + } + with self._lock: + self._buffer.append(entry) + except Exception: + self.handleError(record) + + def _flush(self): + with self._lock: + if not self._buffer: + return + entries = self._buffer[:] + self._buffer.clear() + by_level = {} + for e in entries: + by_level.setdefault(e["level"], []).append(e) + streams = [] + for level, group in by_level.items(): + values = [[str(int(time.time() * 1e9)), json.dumps(e)] for e in group] + streams.append({ + "stream": {"app": "sim-steward", "env": self.env, "level": level, "component": "log-sentinel", "event": "sentinel_log", "domain": "system"}, + "values": values, + }) + try: + requests.post(f"{self.loki_url}/loki/api/v1/push", json={"streams": streams}, timeout=3) + except Exception: + pass diff --git a/observability/local/log-sentinel/models.py b/observability/local/log-sentinel/models.py new file mode 100644 index 0000000..e815079 --- /dev/null +++ b/observability/local/log-sentinel/models.py @@ -0,0 +1,100 @@ +"""Data models for Log Sentinel findings and investigations.""" + +import hashlib +import uuid +from dataclasses import dataclass, field +from datetime import datetime, timezone + + +@dataclass +class TimeWindow: + start_ns: int + end_ns: int + duration_sec: int + + @classmethod + def from_now(cls, lookback_sec: int) -> "TimeWindow": + now_ms = int(datetime.now(timezone.utc).timestamp() * 1000) + end_ns = now_ms * 1_000_000 + start_ns = (now_ms - lookback_sec * 1000) * 1_000_000 + return cls(start_ns=start_ns, end_ns=end_ns, duration_sec=lookback_sec) + + +@dataclass +class Finding: + detector: str + severity: str # "info" | "warn" | "critical" + title: str + summary: str + category: str = "app" # "app" | "ops" + evidence: dict = field(default_factory=dict) + timestamp: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat()) + finding_id: str = field(default_factory=lambda: str(uuid.uuid4())) + escalate_to_t2: bool = False + flow_context: str = "" + logql_query: str = "" + + @property + def fingerprint(self) -> str: + """Stable hash for deduplication: same detector + title = same fingerprint.""" + raw = f"{self.detector}:{self.title}" + return hashlib.sha256(raw.encode()).hexdigest()[:16] + + +@dataclass +class Investigation: + finding: Finding + root_cause: str + correlation: str + impact: str + recommendation: str + confidence: str # "low" | "medium" | "high" + issue_type: str = "unknown" # "bug" | "config" | "performance" | "security" | "unknown" + trigger: str = "escalation" # "escalation" | "proactive" + raw_response: str = "" + model: str = "" + investigation_id: str = field(default_factory=lambda: str(uuid.uuid4())) + timestamp: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat()) + inference_duration_ms: int = 0 + gather_duration_ms: int = 0 + context_lines_gathered: int = 0 + + +@dataclass +class FlowStep: + id: str + event: str + label: str + filters: dict = field(default_factory=dict) + timeout_sec: int = 0 + optional: bool = False + next_steps: list = field(default_factory=list) + + +@dataclass +class FlowDefinition: + name: str + display_name: str + description: str + source_doc: str + steps: list + expected_completion_sec: int = 0 + gap_severity: str = "warn" + + +@dataclass +class FlowGap: + step: FlowStep + flow: FlowDefinition + description: str = "" + + +@dataclass +class FlowEvaluation: + flow: FlowDefinition + matched_steps: dict = field(default_factory=dict) + gaps: list = field(default_factory=list) + + @property + def complete(self) -> bool: + return len(self.gaps) == 0 diff --git a/observability/local/log-sentinel/query_cache.py b/observability/local/log-sentinel/query_cache.py new file mode 100644 index 0000000..2a750e4 --- /dev/null +++ b/observability/local/log-sentinel/query_cache.py @@ -0,0 +1,102 @@ +"""Shared Loki query cache — run common queries once per cycle, share results across detectors.""" + +import logging +import time + +from loki_client import LokiClient +from models import TimeWindow + +logger = logging.getLogger("sentinel.cache") + +# Predefined query keys. Each maps to a LogQL query and the app stream it targets. +QUERIES = { + # sim-steward (app detectors) + "ss_all": '{app="sim-steward"} | json', + "ss_errors": '{app="sim-steward", level="ERROR"} | json', + "ss_actions": '{app="sim-steward", event="action_result"} | json', + "ss_lifecycle": '{app="sim-steward"} | json | event=~"plugin_started|plugin_ready|plugin_stopped|iracing_connected|iracing_disconnected|bridge_starting|bridge_start_failed|deploy_marker"', + "ss_ws": '{app="sim-steward"} | json | event=~"ws_client_connected|ws_client_disconnected|ws_client_rejected|bridge_start_failed"', + "ss_incidents": '{app="sim-steward", event="incident_detected"} | json', + "ss_digests": '{app="sim-steward", event="session_digest"} | json', + "ss_resources": '{app="sim-steward", event="host_resource_sample"} | json', + # claude-dev-logging (ops detectors) + "claude_all": '{app="claude-dev-logging"} | json', + "claude_lifecycle": '{app="claude-dev-logging", component="lifecycle"} | json', + "claude_tools": '{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', + "claude_agents": '{app="claude-dev-logging", component="agent"} | json', + "claude_errors": '{app="claude-dev-logging", level="ERROR"} | json', + "claude_tokens": '{app="claude-token-metrics"} | json', + # sentinel self-monitoring + "sentinel_findings": '{app="sim-steward", component="log-sentinel", event="sentinel_finding"} | json', + "sentinel_cycles": '{app="sim-steward", component="log-sentinel", event="sentinel_cycle"} | json', + "sentinel_t2": '{app="sim-steward", component="log-sentinel", event="sentinel_t2_run"} | json', +} + + +class CycleQueryCache: + """Runs all predefined queries once, caches results for detector access.""" + + def __init__(self, loki: LokiClient): + self.loki = loki + self._cache: dict[str, list[dict]] = {} + self._durations: dict[str, int] = {} + + def populate(self, window: TimeWindow, keys: list[str] | None = None): + """Run queries and cache results. If keys=None, run all.""" + target_keys = keys or list(QUERIES.keys()) + self._cache.clear() + self._durations.clear() + + for key in target_keys: + logql = QUERIES.get(key) + if not logql: + continue + start = time.time() + try: + lines = self.loki.query_lines(logql, window.start_ns, window.end_ns, limit=1000) + self._cache[key] = lines + except Exception as e: + logger.warning("Cache query '%s' failed: %s", key, e) + self._cache[key] = [] + self._durations[key] = int((time.time() - start) * 1000) + + total = sum(len(v) for v in self._cache.values()) + logger.info( + "Cache populated: %d queries, %d total lines, %dms", + len(self._cache), total, sum(self._durations.values()), + ) + + def get(self, key: str) -> list[dict]: + """Get cached results for a query key. Returns empty list if not cached.""" + return self._cache.get(key, []) + + def get_by_severity(self, key: str) -> dict[str, list[dict]]: + """Get cached results grouped by level: errors first, then warnings, then info.""" + lines = self.get(key) + grouped = {"ERROR": [], "WARN": [], "INFO": [], "DEBUG": []} + for line in lines: + level = (line.get("level") or "INFO").upper() + grouped.setdefault(level, []).append(line) + return grouped + + def filter(self, key: str, **field_filters) -> list[dict]: + """Filter cached results by field values.""" + lines = self.get(key) + results = [] + for line in lines: + fields = line.get("fields", {}) + match = all( + fields.get(k) == v or line.get(k) == v + for k, v in field_filters.items() + ) + if match: + results.append(line) + return results + + @property + def stats(self) -> dict: + return { + "queries": len(self._cache), + "total_lines": sum(len(v) for v in self._cache.values()), + "durations": self._durations, + } diff --git a/observability/local/log-sentinel/requirements.txt b/observability/local/log-sentinel/requirements.txt new file mode 100644 index 0000000..c5a68ee --- /dev/null +++ b/observability/local/log-sentinel/requirements.txt @@ -0,0 +1,6 @@ +flask>=3.0.0 +requests>=2.31.0 +pyyaml>=6.0.1 +schedule>=1.2.0 +sentry-sdk>=2.0.0 +pytest>=8.0.0 diff --git a/observability/local/log-sentinel/sentinel.py b/observability/local/log-sentinel/sentinel.py new file mode 100644 index 0000000..6fe0c78 --- /dev/null +++ b/observability/local/log-sentinel/sentinel.py @@ -0,0 +1,377 @@ +"""Log Sentinel main loop — parallel detectors, async T2, dedup, circuit breakers, 100% logging.""" + +import logging +import queue +import threading +import time +import uuid +from concurrent.futures import ThreadPoolExecutor, as_completed + +import schedule + +from circuit_breaker import CircuitBreaker +from config import Config +from grafana_client import GrafanaClient +from loki_client import LokiClient +from models import Finding, TimeWindow +from query_cache import CycleQueryCache +from sentry_client import SentryClient + +from detectors.error_spike import ErrorSpikeDetector +from detectors.action_failure import ActionFailureDetector +from detectors.websocket_health import WebSocketHealthDetector +from detectors.silent_session import SilentSessionDetector +from detectors.flow_gap import FlowGapDetector +from detectors.stuck_user import StuckUserDetector +from detectors.incident_anomaly import IncidentAnomalyDetector +from detectors.plugin_lifecycle import PluginLifecycleDetector +from detectors.resource_health import ResourceHealthDetector +from detectors.session_quality import SessionQualityDetector +from detectors.claude_session import ClaudeSessionDetector +from detectors.mcp_health import McpHealthDetector +from detectors.agent_loop import AgentLoopDetector +from detectors.tool_patterns import ToolPatternsDetector +from detectors.token_usage import TokenUsageDetector +from detectors.sentinel_health import SentinelHealthDetector + +from flows.engine import FlowEngine +from investigator.chain import InvestigationChain + +logger = logging.getLogger("sentinel") + + +class Sentinel: + def __init__(self, config: Config): + self.config = config + self.loki = LokiClient(config.loki_url) + self.grafana = GrafanaClient(config.grafana_url, config.grafana_user, config.grafana_password) + self.sentry = SentryClient(config.sentry_dsn, config.env_label) + self.cache = CycleQueryCache(self.loki) + self.flow_engine = FlowEngine("flows/definitions") + + # Circuit breakers + self.loki_breaker = CircuitBreaker("loki", failure_threshold=3, backoff_sec=60) + self.ollama_breaker = CircuitBreaker("ollama", failure_threshold=3, backoff_sec=120) + + # In-memory stats for sentinel_health detector (avoids circular Loki query) + self._stats = { + "last_cycle_duration_ms": 0, + "consecutive_detector_errors": 0, + "last_t2_duration_ms": 0, + "t2_queue_size": 0, + "cycles_completed": 0, + } + + # Detectors — app category + self.detectors = [ + ErrorSpikeDetector(), + ActionFailureDetector(), + WebSocketHealthDetector(), + SilentSessionDetector(), + FlowGapDetector(self.flow_engine), + StuckUserDetector(), + IncidentAnomalyDetector(), + PluginLifecycleDetector(), + ResourceHealthDetector(), + SessionQualityDetector(), + # ops category + ClaudeSessionDetector(), + McpHealthDetector(), + AgentLoopDetector(), + ToolPatternsDetector(), + TokenUsageDetector(), + SentinelHealthDetector(self._stats), + ] + + # T2 investigator + self.investigator = None + self._t2_queue: queue.Queue = queue.Queue() + if config.t2_enabled: + self.investigator = InvestigationChain( + ollama_url=config.ollama_url, + model_fast=config.ollama_model_fast, + model_deep=config.ollama_model_deep, + loki=self.loki, + ) + + # Dedup: fingerprint → last_seen_timestamp + self._seen_fingerprints: dict[str, float] = {} + # T2 dedup: fingerprint → last_investigated_time + self._investigated_fingerprints: dict[str, float] = {} + self._proactive_hash: str = "" + + self._cycle_count = 0 + + # ── T1 Cycle ── + + def run_cycle(self): + """Single T1 detection cycle with parallel execution and 100% logging.""" + cycle_id = str(uuid.uuid4())[:8] + self._cycle_count += 1 + cycle_start = time.time() + + window = TimeWindow.from_now(self.config.lookback_sec) + + # Populate shared query cache (one Loki call per query key) + if not self.loki_breaker.allow_request(): + logger.warning("Cycle #%d skipped: Loki circuit open", self._cycle_count) + return + + try: + self.cache.populate(window) + self.loki_breaker.record_success() + except Exception as e: + self.loki_breaker.record_failure() + logger.error("Cache populate failed: %s", e) + return + + # Run all detectors in parallel + all_findings: list[Finding] = [] + detector_errors = 0 + + with ThreadPoolExecutor(max_workers=4) as pool: + futures = { + pool.submit(self._run_detector, det, cycle_id): det + for det in self.detectors + } + for future in as_completed(futures): + det = futures[future] + try: + findings = future.result() + all_findings.extend(findings) + except Exception as e: + detector_errors += 1 + logger.error("Detector %s failed: %s", det.name, e) + + # Update stats for sentinel_health + self._stats["consecutive_detector_errors"] = ( + self._stats["consecutive_detector_errors"] + detector_errors + if detector_errors > 0 else 0 + ) + + # Dedup and process findings — priority order: critical, warn, info + all_findings.sort(key=lambda f: {"critical": 0, "warn": 1, "info": 2}.get(f.severity, 3)) + + escalated = 0 + suppressed = 0 + for finding in all_findings: + if self._is_duplicate(finding): + suppressed += 1 + continue + + self.loki.push_finding(finding, self.config.env_label) + + if finding.severity in ("warn", "critical"): + self.grafana.annotate(finding) + + # Critical findings → Sentry immediately + if finding.severity == "critical": + event_id = self.sentry.create_issue(finding) + if event_id: + self.loki.push_sentry_event({ + "finding_id": finding.finding_id, + "sentry_event_id": event_id, + "title": finding.title, + "level": "error", + }, self.config.env_label) + + # Escalate to T2 (non-blocking, with dedup) + if finding.escalate_to_t2 and self.investigator: + fp = finding.fingerprint + last_inv = self._investigated_fingerprints.get(fp, 0) + if time.time() - last_inv < 900: # 15 min T2 dedup window + logger.debug("T2 dedup: skipping %s (investigated %ds ago)", fp[:8], int(time.time() - last_inv)) + else: + self._investigated_fingerprints[fp] = time.time() + escalated += 1 + self._t2_queue.put(("escalation", finding)) + + # Emit cycle metrics + cycle_duration_ms = int((time.time() - cycle_start) * 1000) + self._stats["last_cycle_duration_ms"] = cycle_duration_ms + self._stats["cycles_completed"] = self._cycle_count + self._stats["t2_queue_size"] = self._t2_queue.qsize() + + app_findings = sum(1 for f in all_findings if f.category == "app" and not self._is_duplicate(f)) + ops_findings = sum(1 for f in all_findings if f.category == "ops" and not self._is_duplicate(f)) + + self.loki.push_cycle({ + "cycle_id": cycle_id, + "cycle_num": self._cycle_count, + "duration_ms": cycle_duration_ms, + "finding_count": len(all_findings) - suppressed, + "suppressed_count": suppressed, + "escalated_count": escalated, + "error_count": detector_errors, + "app_findings": app_findings, + "ops_findings": ops_findings, + "cache_queries": self.cache.stats["queries"], + "cache_lines": self.cache.stats["total_lines"], + }, self.config.env_label) + + logger.info( + "Cycle #%d: %d findings (%d suppressed), %d escalated, %d errors, %dms", + self._cycle_count, len(all_findings) - suppressed, suppressed, + escalated, detector_errors, cycle_duration_ms, + ) + + def _run_detector(self, detector, cycle_id: str) -> list[Finding]: + """Run a single detector with timing and logging.""" + start = time.time() + error_msg = None + findings = [] + try: + findings = detector.detect(self.cache) + except Exception as e: + error_msg = str(e) + raise + finally: + duration_ms = int((time.time() - start) * 1000) + self.loki.push_detector_run({ + "cycle_id": cycle_id, + "detector": detector.name, + "category": detector.category, + "duration_ms": duration_ms, + "finding_count": len(findings), + "error": error_msg, + }, self.config.env_label) + return findings + + # ── Dedup ── + + def _is_duplicate(self, finding: Finding) -> bool: + fp = finding.fingerprint + now = time.time() + last_seen = self._seen_fingerprints.get(fp) + if last_seen and (now - last_seen) < self.config.dedup_window_sec: + return True + self._seen_fingerprints[fp] = now + # Clean old entries + cutoff = now - self.config.dedup_window_sec * 2 + self._seen_fingerprints = { + k: v for k, v in self._seen_fingerprints.items() if v > cutoff + } + return False + + # ── T2 Background Thread ── + + def _t2_worker(self): + """Background thread that processes T2 investigations from the queue.""" + logger.info("T2 worker started") + while True: + try: + trigger, payload = self._t2_queue.get(timeout=5) + except queue.Empty: + continue + + if not self.ollama_breaker.allow_request(): + logger.warning("T2 skipped: Ollama circuit open") + self._t2_queue.task_done() + continue + + try: + if trigger == "escalation": + investigation = self.investigator.investigate(payload) + elif trigger == "proactive": + investigation = self.investigator.investigate_patterns(payload) + else: + self._t2_queue.task_done() + continue + + self.ollama_breaker.record_success() + + # Push results + self.loki.push_investigation(investigation, self.config.env_label) + self.grafana.annotate_investigation(investigation) + self.loki.push_t2_run({ + "investigation_id": investigation.investigation_id, + "finding_id": investigation.finding.finding_id if trigger == "escalation" else "proactive", + "trigger": trigger, + "tier": f"t2_{'deep' if investigation.model == self.config.ollama_model_deep else 'fast'}", + "model": investigation.model, + "gather_duration_ms": investigation.gather_duration_ms, + "inference_duration_ms": investigation.inference_duration_ms, + "total_duration_ms": investigation.gather_duration_ms + investigation.inference_duration_ms, + "context_lines": investigation.context_lines_gathered, + "confidence": investigation.confidence, + "issue_type": investigation.issue_type, + "escalated_to_deep": investigation.model == self.config.ollama_model_deep, + }, self.config.env_label) + + # T2 investigations → Sentry + sentry_id = self.sentry.create_investigation_issue(investigation) + if sentry_id: + self.loki.push_sentry_event({ + "investigation_id": investigation.investigation_id, + "sentry_event_id": sentry_id, + "title": investigation.root_cause[:100], + "level": "error" if investigation.finding.severity == "critical" else "warning", + }, self.config.env_label) + + logger.info( + "T2 complete [%s]: %s confidence=%s model=%s type=%s", + trigger, investigation.investigation_id[:8], + investigation.confidence, investigation.model, investigation.issue_type, + ) + self._stats["last_t2_duration_ms"] = investigation.gather_duration_ms + investigation.inference_duration_ms + + except Exception as e: + self.ollama_breaker.record_failure() + logger.error("T2 investigation failed: %s", e) + finally: + self._t2_queue.task_done() + self._stats["t2_queue_size"] = self._t2_queue.qsize() + + def _t2_proactive_poll(self): + """Periodically query L1 findings and ask T2 to analyze patterns.""" + import hashlib + if not self.investigator: + return + window = TimeWindow.from_now(self.config.t2_proactive_interval_sec) + findings = self.loki.query_lines( + '{app="sim-steward", component="log-sentinel", event="sentinel_finding"} | json', + window.start_ns, window.end_ns, limit=100, + ) + if len(findings) >= 3: + # Dedup: skip if same finding set as last poll + fps = sorted(set(f.get("fingerprint", "") for f in findings)) + set_hash = hashlib.sha256("|".join(fps).encode()).hexdigest()[:16] + if set_hash == self._proactive_hash: + logger.debug("T2 proactive dedup: same finding set, skipping") + return + self._proactive_hash = set_hash + logger.info("T2 proactive: analyzing %d recent findings", len(findings)) + self._t2_queue.put(("proactive", findings)) + + # ── Lifecycle ── + + def start(self): + """Start all loops.""" + logger.info( + "Sentinel v2 started: %d detectors (app=%d ops=%d), poll %ds, lookback %ds, T2 %s, models: fast=%s deep=%s", + len(self.detectors), + sum(1 for d in self.detectors if d.category == "app"), + sum(1 for d in self.detectors if d.category == "ops"), + self.config.poll_interval_sec, + self.config.lookback_sec, + "enabled" if self.investigator else "disabled", + self.config.ollama_model_fast, + self.config.ollama_model_deep, + ) + + # Start T2 background worker + if self.investigator: + t2_thread = threading.Thread(target=self._t2_worker, daemon=True) + t2_thread.start() + + # Run first cycle immediately + self.run_cycle() + + # Schedule recurring + schedule.every(self.config.poll_interval_sec).seconds.do(self.run_cycle) + if self.investigator: + schedule.every(self.config.t2_proactive_interval_sec).seconds.do(self._t2_proactive_poll) + + while True: + schedule.run_pending() + time.sleep(1) diff --git a/observability/local/log-sentinel/sentry_client.py b/observability/local/log-sentinel/sentry_client.py new file mode 100644 index 0000000..607e285 --- /dev/null +++ b/observability/local/log-sentinel/sentry_client.py @@ -0,0 +1,100 @@ +"""Sentry SDK wrapper — create issues for critical findings and T2 investigations.""" + +import logging + +logger = logging.getLogger("sentinel.sentry") + +_sdk_available = False +try: + import sentry_sdk + _sdk_available = True +except ImportError: + logger.warning("sentry-sdk not installed, Sentry integration disabled") + + +class SentryClient: + def __init__(self, dsn: str, env: str = "local"): + self.enabled = bool(dsn) and _sdk_available + if self.enabled: + sentry_sdk.init( + dsn=dsn, + environment=env, + traces_sample_rate=0.0, + send_default_pii=False, + ) + logger.info("Sentry initialized (env=%s)", env) + else: + if dsn and not _sdk_available: + logger.warning("Sentry DSN set but sentry-sdk not installed") + elif not dsn: + logger.info("Sentry disabled (no DSN)") + + def create_issue(self, finding) -> str | None: + """Create Sentry issue for a critical finding. Returns event_id or None.""" + if not self.enabled: + return None + try: + with sentry_sdk.new_scope() as scope: + scope.set_tag("detector", finding.detector) + scope.set_tag("category", finding.category) + scope.set_tag("severity", finding.severity) + scope.set_tag("issue_type", "unknown") + scope.set_context("finding", { + "finding_id": finding.finding_id, + "fingerprint": finding.fingerprint, + "summary": finding.summary, + "logql_query": finding.logql_query, + "evidence": finding.evidence, + }) + scope.fingerprint = [finding.detector, finding.fingerprint] + event_id = sentry_sdk.capture_message( + f"[CRITICAL] {finding.title}", + level="error", + scope=scope, + ) + logger.info("Sentry issue created for finding %s: %s", finding.finding_id[:8], event_id) + return event_id + except Exception as e: + logger.warning("Sentry create_issue failed: %s", e) + return None + + def create_investigation_issue(self, investigation) -> str | None: + """Create Sentry issue for a T2 investigation report. Returns event_id or None.""" + if not self.enabled: + return None + try: + finding = investigation.finding + level = "error" if finding.severity == "critical" else "warning" + with sentry_sdk.new_scope() as scope: + scope.set_tag("detector", finding.detector) + scope.set_tag("category", finding.category) + scope.set_tag("severity", finding.severity) + scope.set_tag("model", investigation.model) + scope.set_tag("confidence", investigation.confidence) + scope.set_tag("issue_type", investigation.issue_type) + scope.set_tag("trigger", investigation.trigger) + scope.set_context("investigation", { + "investigation_id": investigation.investigation_id, + "finding_id": finding.finding_id, + "root_cause": investigation.root_cause, + "correlation": investigation.correlation, + "impact": investigation.impact, + "recommendation": investigation.recommendation, + "inference_duration_ms": investigation.inference_duration_ms, + }) + scope.set_context("finding", { + "title": finding.title, + "summary": finding.summary, + "evidence": finding.evidence, + }) + scope.fingerprint = [finding.detector, investigation.root_cause[:50]] + event_id = sentry_sdk.capture_message( + f"[T2] {investigation.root_cause[:120]}", + level=level, + scope=scope, + ) + logger.info("Sentry investigation issue for %s: %s", investigation.investigation_id[:8], event_id) + return event_id + except Exception as e: + logger.warning("Sentry create_investigation_issue failed: %s", e) + return None diff --git a/observability/local/log-sentinel/tests/__init__.py b/observability/local/log-sentinel/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/observability/local/logs/claude-session-metrics.jsonl b/observability/local/logs/claude-session-metrics.jsonl new file mode 100644 index 0000000..031585b --- /dev/null +++ b/observability/local/logs/claude-session-metrics.jsonl @@ -0,0 +1 @@ +{"event":"claude_session_metrics","session_id":"94406f61-7d81-49e0-8b78-cea2011dff2e","project":"local","machine":"WIN-PC","env":"local","timestamp":"2026-03-26T21:22:07.242Z","total_input_tokens":669,"total_output_tokens":142861,"total_cache_creation_tokens":1867125,"total_cache_read_tokens":86930799,"total_tokens":143530,"assistant_turns":422,"tool_use_count":0,"model":"claude-opus-4-6","effort":"med","thinking":true,"cost_usd":176.1294} diff --git a/token-cost-dashboard.png b/token-cost-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..742ae8288d24c9ebdc466dd8c3f7fbd5b7781677 GIT binary patch literal 117274 zcmd?PRajeH+cnx2N^vU^oD$rF28tB70!3TgU4uKNI1LV^xTR2v6)8|$6QsBmmr$U% zJIUtxp7;I!Yai~Dee$2Jxn{1pvhF$Nm}87JW8Y{f5#UneK6>|%3b^L_;gVcI+oe$W{#L$IzN0g}bh#KiOyQyuiA&C@`}IwsEc(>p!+)JY%0yeIo9qTNA2dBy_g4 z+Ej*!pU!*!XEcxLMf&XBuENY*WFWo&M2*s3$1VlUhA24IuSFS*WPYkGm!88>TT%f z97DftX_Ko@Tl+t!yJOp9klDP3t-E2-3^y?^r7qi6wEr`6+cL^lXOsnzZT?wwY^9%q z)*|fauj}nDQ3-*UL>ZIaOQu05YiH(#cCF{rn!j^Cp;wm>_{QVZO$K)2g*g~Qv%L=&2suM`LO>$ACD+w{{R^xdK{)2OSuKcORcsHg?=W54F<#uhZQm5B67B1I`0G&Wh;V z?xtu$zfaQSbzJJFUM^qco|@fOG3*7N%&|#W-9`%ixXYky+jZF@nku_)-3`B__dHqq zxOJ)7cCSvU(SB6P#xUKIq`!!&)XWQkvxNL{TBGL+>?tQ)@Lb?w4Sem|eltOo=e_2g z9;BB@*%myo%*IfA-%TTZzE|ynK2FVVI*Ll9zl10HFEvX4y=M^f$~NK+J*_!JiT!6K z($L>4*gCm3o#!n>ZZVcUrYt+2G_2n;yu6fNzQ5*r=Elm#wx#KMUf-B=1dD&W*m~Ab zHcTmfJ#E*13NzVlXvsYqsWUs?$fc-ix0xGm91{s-@SopWL4VsX_e?!yn7Cayw{es7 z2s8}YYD>*_uG{roz8QJGTi23z)h=juoz|EAnx`Z9@5yyw{?VuK#=(KB=)l0a&hDiI zaf|%ndnM_PjqpRM%N{d>nNUgJwGRUWPiZuxE-*4FNAit2GNr|S?pwI|&uMtw@^G<= zng#U_97`^oqjWuQ%4qv@m{`&NKHqq5 z;@x<%vy7XDP>2X&IHJwDgN~K$u=VDYrIh1;_>G4BUST5Z%U0ff8A~~9W5C2N-_;^{ z{?AnmyS%%i;Ks3d3ZABO3y6;fWGd|$PMZxOdYBYx`iO< zZnIJog1dJ4c9uGPCC<*3deta-?~vDge#;;PLujw(XiLCT^@FPjN1DY*cQHtQ}v(pZAsi~>wS!TxFETxoh*UOOrh#N&jRN-~#&1lBu z#-Ym9p9TB1gaysuzvtnNh@ib24#TFN=lS$T6X$53DM+={MN>vz;63NOW2sA3<+9Y_ z)ZzK2L&MVjm`u4OL#^%H1Wo&#^m*KD>rKpT&^|@hqQi<^*c-fAV|10=jhbz>bpCcN zpUEX#)}qTw#uULt@XWH&_|*A|SSZzh;mBIKRlm6!o_~Iv7#cFf(^$29VVZM4;u*-W zCqHF?dIb3Thifodv|)4ZlDgVK&aaJP=*q4Vnn*Pq-X?RJjeX5Ggxq?d`^_SD`At!!kN9oGm}UA;T(E6ODdJmz3y z_&K@EmUDB%K#xA?_!WWZF3mYh5T$MI*-S--HwvnRXL1ydOlX>UpROsnwp@}dO#XaH z|6_}yem!eQO1n1ef8MBmZQ^D{AZQVybjj)*kU6YxEiHB)&-RxBuW`k*bT}}T zVs@GT_g0JL-%BZ9SS5QdfS!tT+2K5A@#|R6n%e5|W%>k96}xh#g9t*2`H8JFM{By!3oqaieo(T`p<1eTh9enLpLS zHR+<(z7%+$Dt$KZT(@-4(SDIt{w%b^v}y9W@8ZTXdF3hbTi$)1!PgJP8*g9fG4CyNtDjF!Y0_pLM{yW1A=APQds&TI%qV-1XAe?r{{3In%?ID~x!?6?lPfK2 zNyWYa>$}n?0~l4UH#2&vMu*q+!MA2X?Q4Pg&EoZ0^Xo~1qrv^5roKZT=<+#%~+1wzsO9F2_eh=SUJ4Y)>wRGfW_GqHG((f&8hzKFmu81`!BzWP9jI`t3%|kjt=2=qm#Ed2HfGClE5b#)MupV+yIV?= z-_q#)d~Op+XVHFQ(3X-&$GY7mZ^(Ms%U`)gx9M1B3=2MoRx9qmPqCX|0_?&qF#q)y2%Ews+XGj4fAkOxON=AP@=oe>A4jf zKA~wpyGWJ(E+k@j@x#}oWt&5Z;j~CD&|oNzhBx?@hPPtV>Qmk=ywR+F;~?@Sy~l*Z zA$Wfq;UegLN+jhzLq&Hm$0ZdC|4WGq>f87f$Pn=BV?xM;$WZ&y`5tekTMjDg(xk!p zdZTD5&tt8!k+gw>PEzM|TrsE!uu;fOP;| z+L6ecl`PYmijHrje|u3?$t-4m6HX&Edyp3$cuEu9E|<&4g#$lK>_6sS7`H-xi%!1Q z;}be9Wtrs>&f;4PId1(jMBm8}N9}y`(|1l-l>5-H3%Pu9>`U9aoxu>;(@Qocp0t+7TN@(4B2E%P9o0`7PKjtD*B}v!lQ7st!YMTQ*M&{#C5?UE|>bmH+P74+B%@4KnHeN6Y?S z?LrDCv!3Enx;FnUu|gvMN6q|Gb}#hU-mhQz=)0|SE`5CO=`ja&zS5t!d4VORrT1Z>L{KH43=auoJFzjbNpj>cDe}Iu~0fVYAI`|XO&axXv>(axSL}e4H%6MZ4Ef?LgYi%Wt8$f*7Xn_PRy?V zveuhI^gzAoWR^6~&jZbH%51vHm#-`D2E1qCsdzrp7eDwy;Jk^3Lf*G-A!p2u2R;AI z0H5)Tt3)4q=6T^a^}m<6|xiw2#v@tHZ-2mXq+yGL?7J8X6D ztFCH>*d8PE18?TsepiR>C%U%8+aFJANdGCze#>C&-DRZVx8D(5hW4oIYuRf;N-5*(LDvp@O9A&9tvT(gr+83FzO{P%!2;~1yvU;7Y}ZXLt=sErTL zHQSmxc5ON)rGsWr&vLJ(5-(~$Rj%BAvGQp`wp0iA_}@omqc@B)G(&ddJb6*xhr_0& zAxC?q2SH=>Uen%G?Oc3*hfhC_fJx_0y4Ciq~Jb-vv*XvE|*D zFP9^70=hcro6#V|M55I79wkFF2G8xFbo+X12wzK{$MP+q)OPx4`|4D+{_New^Xj|X z(evKL>om>0;IZOMf9l|K+<-37Ggp;HlZ!+|-gDkU??p_L|Gb#t=vgOBcy#^`iO_jn zId7HkiRt;Y*-_-Z+>UQR*Ypsf zl=FX8StR}IvrfrI+W@`0o%58yk{F6FI71)8mjjkp=$ok2m%Mjt7ujv4=wBH0}SfTaf?@3)5`shSXV-kGBq}WaZEv{t+-bto6L& zy>Sucy`DTPUT8LGTi+Z*?vedR5k}Hm|5LDzpS$$)77J|+UxuD#c6Dz_25=SGh_bCD zNbM!vi)8;q3Gzu_Bkyri`$}_st$_=b38Aw}dG|@>BmUDy9eTk>yJp;GOG73Ut_#_v zWro}b5PimymgsH^4cuu;?yVi|Ijs*zChtCL&xqf zJbDlK@w423O_}>fu8g3+ZEn>K-;Gg!-6YX9O0-@(RL@L(y9(VcN>u;iXd7)f6@D%Z8Rt_$a*Dyr8#IVxitU!LsuC?-swyt z@9rfkb8=*P-K1J_`}YHPI-`3f)!IFl=cHYCqFvip+ZYZH)uk?`mJ+{AtMQ^{Qnu)I z+wt}OEx2S;ze|JtP$BPcpO*cDCZrE_JO%eMay+}b>6%S+XRDoi{(?kD^p-*Kw=<%R zVp6^54!?V~lze;HW|lXW=%@d@LB9V|NTZ3zm=oW2K1=yz34=9%89Ez>IpDv7W=Z<& zd$f!Hsk$hE_W;JNP6k(oiSNF^ZtqEUD8tI)j}11tO@bk}R-fx5yX@*(b_uud|0;jZ zv(;jPOUl+#{fc*z!X_8z6#vO=SH_~UXM)fb$PUf_-E%_id(@r&o8+SNwN(DML4%!M zU-F+3>%-psf9@D#cNn&)n+$F7IWOsLL-%VPZqq`}n?v2(o`+j`YcrHN+GzKE4V|Ji zFNPW)f7nU4os$-~Jzya42ak$5Sg~(XS35VTB(QlWRcdWQCFMHA%Cc?PjI@vj zNBJ>YOnPreCP)-|sEakU4UPgvEmGB$&?;HQEG`a){NF}Mg>N-Kw)-ao2T6~(73nM` z?3ZYfo3Z$c`cR#QTh_=0O9`i%TQT@T5IujuQLo>0HAS5Rtv+pSjstW4p_iL>-W++c zVx|#%?fn!sKsf#dk2cF4i=u^)EBx!QTRIwl?bM4>Gr#SmgoIvNR4NqJ{eTDSN%hg@ z-3xWA`7wpwZv)#0Dc-&O5kK&gsj7;b)wZT8{bQV`ng~2LW{y8SuO9YU$kF$4U*F?V zDtUVClr6SPl*NBDRPs~+Hfv}2T}mg^uJkF;Jgxrk`{dRWlro-p|6(4+n^M4A2CwG1 z`d+^@yKTUh);%IVrTMACuBcdkYrdt#$F$AXJgZhm0WYW;N-PhE@RL{oATP~!+!_z~ z5B`EY+i4b@ERmzo70=Nn-${OAKr&`KeaFTW86nfj)7NSKksK@yS97F%UaMQ@aM!pW8~aMucA$7PwP+ zWmi7Q;_=700)ddU@-G7f+njLEpRjHEFCOpB#cjp8$huYbrN3$N$WTIa1TyPmH*(C$ zYE?56u=7SXGR>Kjm@2iR0kg%Bh_O}|t`|9S9WL+~Qs^%L8=%ueu4!nv90ztsgXf;NprK%dvNZaa$3(-kLvN z8-JwG#U2J^!f;DfyYu;-uz;V8Uwt#ACS-?O$JFLag@L-s7Q7C^E}CB7R&qN4jwbqPwM0tH`SpyV)b?4Q7X^DAKiWlz}vTB6Wa z$bhVBj1qf*2%PGk4ZG^M)rJ>eEkkHb5M$xa5m`fTp?rP6UZeD%mO_h`z;YkE2;#4N7z41IMmRRb{ct zDbh`&iEl$QI^)dMLilBd$KWyGU1@o3&XmnrP#z;Wj5<$oL7;*Qqx5nH-;C<2;yIs2 zorwVz*qYHalC$zwysCel&VxxyAQl*x7q?cYq5bE1adT0QmkMVdHm)bVgUzG$&w@gP z^^vrTWmcAKh3~&|=Ssy57L-j;w+npddR5m-V?SUySC%Ebj`ZL?onll+7rlhB_>QZ z!wf5FK-Y77IZBuKpp&V^eqfU}58ftHAPIbjRsJX;7)Oa-Y9YzhQ-&7(t&+mP(k^6S5W8jDO*` zcS*L!3c#f1sqE?yEoAIb6 z%YS2*EKYPrM;ne^5lD&a4i0GaEVzD^XDE`a&l&gu{mLfkk|8!3+aV61@cF5B8JhXU z@MDfYjZcbIHO}&T^)2=3Kp(ogZ;5^jn(KOg0_c&d!tQJqW3-4ro-y^m0Yus}@ff{S zhh-!q-o1Ix>p0!R|Ox%c5B$p<>a10UdsZ-q1?z&Je) z(KI6PzL%w9cFiXs*Mn)sR(dAvt?6E8^$b7;;II7$5nHI8trIbad)IdV*9>wbe>XM^ zCYapdx%zc;>% zN(39+CW_Se#_&MF#))}y>}k@7iPt0|>)eV@-dr`!;*5bOJ(lDPh~B^h39dLNpE1Ydz<*DHCK>OIm`NV* zRTahmp5_dV*aESDG^T_I*_2t>eLg};pn=|n$+k{G9BF3J9|s&K#jCb#D|-fFf7~`?{hBNbeTeuvBuPX4d z$>Xzzyp4Ongtkd0rESz*XWWdl1OLqeWG$}P&l&WT-wm{PK3q5+F<8)kPk$a&ppSkr zvN!$8iFvx?;O({MQsD)$mR+NMkH+D&eH=v9k$W=U$kI6WF_yx}lrY>AP-rL4wzdOJ z)*U^>u+J%cp^+flKQH^~eJ1Kpn(mDYpZqt=!5R#^czD+@QjSop-q-ihWAoi|Q&rUO zFiA+Q@06}wjvW7fAPh3ij~8%4(@($J^p~wQuS5!XKG7A@F8$$?Vu&bT6Nbw#)o9?k|Ctbt zowWshYxb7WsJDBAkyT5ou2J+v8^VZJ{Wy~PNFSX%2h^&f%qQ5%rrlY2)yF$qniTNzw$`_;Uq2Jz}0tUqE<)FpFy>Tt^$>c~5 z?avf2z|*-X1beJk4Hr`)iD(~~=sD6fj9byW)t&X6t@``()Mtdv6Utg6q2GNQl2aPR zf$-_i9Bgm)>`z$t_oU6`V<2tiQKz81B|W(YJ5^|XWsqS~FL)mJ{u!8v4wCmv5g zII+i4D1qH84&pM(3*vRVa)8#wYO%t{uS^{1E@uGg1dGAO&f0d?8)T^kdFgyFxA9F= z>o~<;!E0Y)Z%g`8{6i_ zQFLg~_&faQ*XAoBRs$PL8=x>L3Yl|Vk!4?g#is&GtCUTD#OnI(Yd<#^EV^m0&aqxG z?5ST#Z@&#c?)Tq0yM^^Qz}fdC9u#%KgfIbWJZP3BZ5#C_B)GHz?MU^`? zPD=g>RQM-aDY#XIcRZv%N|@RyUPw`9lo<}>CPqFf?x)HTevXuK(l~Uorxqjhg+h8l za}q8V!8yNm%*j-9rz&&Q_R2oXZv6#w-zf=U72}=-cL+(u-z{c|50hsChlFLd>WC{y z;-rh*aZsX7j3SmAcv9PW5kl*9705wyUo^wV)PZRkKZ7XDV5c9@k7CBd-R=*}!99?2 zfACZ*DXb>KfkokY4Zr3+B?#9;8Oo$hHmuntZw@9!#E7%0J~#i=6Vp1P{f!# zUQ8P$_eh+S3 zHE1}N2TdX~Ljo+C@&x=Tlu(rKzN(N-jf#U5FVHH^hl0}wyJJ3R$_sKrW4>thGk%X9 z2SHt|Ep!(Q8UK){pyWu0G(aQcaovMqHMH>@H|p}7ZQ|@vBC&wS8$19GG>#!C1_z`8 zexSB6U7W`!r0)$sfuSj{RK;oCV;NM~KNAnFN{c9J2U-pdia}$5+7UHe{u@>~7?c#+ zn6O@@27{I{`#`4{>bFRDy4i2|UhSDx$dXJGC!5s@Z{r#NvTdoiRIuHqri^S%u5n210ZbkyqiW~smU`(oMi zvzLmKIm|c$QpkCn&-kM@YICnyQWIMe@kMqX04k4VE;EO-=|Br^h%RU!7dMlCY!e;^ zgiX9p(|v`*YfK?2&-gTKM}2ui`Y*l^tsHP6PXIoxrTk?N$MuCY>k;u#sBT9VZ9ExL zUtWHJZ5$>)@?bRi?8`xp0eMN0BrmWT8$Vh$SdrZ$jO9-ointXuc9wbI!iwIj19Kc< zB>%JFQqk@NsWDsYBH3oJk#&wAIK6ecV(JaV=mmG1+-FNH^7M4P;k~iI5AHQ87Y4#I z#j73ohD3V;kNT@iN&EHN%4o_$aKv63OVilU&4y#mxI^e>KPLGREhvitlWi=zyJ%wC zzh?MVVd;F(dNpVy!#z>*yt^WMBcF}gO|V``3713{zm<#%D(YaZwob@>}b`)WsQX{f)8Zp%-omvqPObfNbed&ojqE7%Kd_QdTUutBxn}W!Q2d#)@PCS5(XMq@`u>=yZk_gr(uoQX{ zFJT0nnoOtdThx(2bX&uQ2y*}EWyFQLH_hOe>kp6CybtYB(ATHY)c(5B8{#(Y=I+)- zWz2~6ik8a5X}?p3#~>x6 zYLsi6fKq07?w90!Ck-WJio=CriqI|i>v~bvu?oj}_ezj$$~revIT4lz2}+IM;r3&)jzC z_cqqYRdT`vrByn$v1sEcGACOS@TMYC7C=lKoJRV#W5#g|&8fEO5=YY02}rym%uPjT zJ$h>9IFvTB5fpc4kolqk0}99r3{-^@b9Bb&K*8X~2o@p7PUR=BR!gADDk{Y~@*QK~ zElUZ`f_QAWRV2wA0OT`TV`X0m8-#-uXat3OoA^mNV69*rMIhV{3wRBY?>S?*ATYp# z>RX3toiXTU)Yh(*o|DTAN3~J^>P=H6AI|Q(|?o$JuXv+_CB)|=?oS_TK3VPWf z%jdv2^W>BKnB)XVweZ##)w^h|ceL>;=JHR9bI_{=j0&RLt z>+sbgB#Mzht#-s6KmI5}k4G=lb}8V>oz-~U~9Na}sbU;z$Lny*jA=FRy1&xBprlb_y;^P!wrIk{IccaMP|GM+pW z7G_hApe0vKGw&s)?H(Y7wib~T+mFg~xMA=sEqHRXKVd4jC~R{INr)s@Z>O%}$1PxE z-Mgruo|+94R#r^KHVhQ`BH*tE)oC8%D8?x~p};Mg169Gkg9PrPwUx+bM$9p{a)_U6 z(sbFx_n2}XcZQT!35tJIdc_G6x3ROQwlemMx?*oc)JVdEGp@&*#NZ}dfLZ#KkB~Ra z9zHXo;y+e!acV&a)K)pKzRdu9|K24nx(gUu3aT0+PkExO&Q+!qP!~FtZ@izpn}XiW zjpeL;E_rIq0K^bg-lHXn6$3_vIK2SV6DUn-d(S_6lv6kQt*f<_;suc0ArmRiJ2W*c zT^AX>bxe~6OcjMts^RK|Pc_iQIJ zgy!y&M z?e%>s1f0&p#B#&J6#w8mB1BAso_zy==n;}6$bbB;1-E?MYZ&{ootVlJ7D~=kFJp1m z15i11`c>RqxAbWUEf!1wbfBeDqymMjgG`lD$&qyYxV4cGfEM7xjEx&X1WN#GEFDe7 zN$ZVS=Co>CY)V`lHbqXZsegsW`|+ii(QnB`Qzl|7g~#fT?f2IC0lkoBvo&&zkqS0> zQlm)Kk;3m9#s!UE0qM`o$;3r3XTbgrgg`uw?=V4rZP~AG^GDXZ1Qc8|D zk2!1}wj+I!X0twO2_4*nPUeQuXCvvHYXS?QN7}+;*iiAC#I@7!XpTK;p?iedp9n4{ zU}sF{8v7zmWaYf2@w=XHQXJFRS{g8CICi-vS+OKMa&xDFr*}JwL`WaRYb@iVFm+Lp z<%6!V%o%gHKK053u+bqsenk*IOc50lS{e4#v_L)mhG_b784gG+Qh{~q%Rji4m*!J_ zq4WG6%=jBoUHyOAsh;@m0X zoFwB&*0(t@m`Pc$F2ia6by4~Eex9ZXvT-~<(mi(p_5@0`OdB5psv1*7JhxMkwXkRq zaf=GE#kdN;CY~ZYO^k-DF%3|bkwc|C8^kmXaGw36^3DSo!oxlxC z)AeII&JTEl(NOr(%N!4=jl$sh91+aX6~k^RgQZPEqzYdP5W*s7H+}M2=+E6V`KMRe z{J1DNF?2GZrn5hVV0bEm#OXL#s0*Vt3{ji zosO%gSX1&3QChIJOTc4XliNpUc<-^%g}r&(LVNz=P9!iMI7f|N?ZW|0^gx-FO8|v7 zE6U2*oKX;NA_onOwvRu?1Zsm02-u(s`q1L&4gMWkq&`duioMvyuk9Y-(d{Y2lPc!KG=HqE>@NE_{GRl;u!g)|a^5*U7Ba(pL`DYjbt;`8cE>nY&~ z^+nkKwwsB!e@G?m_#vKSAKmN{~Of0qe78~0777uDKM14-#igU{C zW-BlY$!-61-pAYEnb}5d(WLR&zhg@9@qp;l2rw__`U=9YviXC=6v2-KF0r^d>Vw+1 z7%uqfIh2bu-KGd~>YwLr2{aXifN40}KBa2XU^#>CZd2_J9X;^uSAEKmRhmcaYAjoU z{2!PvF7#a%wiyTXTd|8~&Ijdk7x{0^GXV*aQ~dW-tp|W^R^3w%ud^!u_yn+V54OP3% z_#qgIzdn52>LTUnOdP2s{{xbRF|BR{ z)7>eVR9PA`l_X72?gBLV)gbUCdfBIERmB zVGUaMn&^)u?GIdSVXFwbq&4ZBJVrbYQq=QAF{rX(S1pzi0Av}V>`?YO3t`c1q1@fw z%^mwri_9J$U;)bp#JW!__qB)tGR7Y>oRS4I{1>}B}x zGQd2$P4-;+>0x5o>+94#HwK8x`2=ICtIbM)Onw}BtZKhZ*rqP<@2R#>XFm%dIn_0# zF9uvULrBO|N{0{a|GFHbgC2ie%&%SIaUw?HD2t%-iO20tXN%R^m!4L!ZTZ@8$97Sk zOiE}WNO0Uug)TFMyn4NQAjU?Y0tah3Qcr;B^(Xqfu~!e*rT>bGLk=GILS=zKC`61p zm0l_mFbvWdXT}B2Ro5ryr$g#PENRn56MNSkos!>B6)d&9r6j;JhSRTzh(TC?|LP~MjBMG%IBIjRWa^1~DJosgY6z;~BBX+hPprXQU9h5n zPr1K-ieCea!dGZxp&xW}9ztc^1X8E|dVzPf4JuO^@mn~Lw+?ojx9->^eY>cZjuww!M#!+_+t6mK$spVVwKaWPmCnxm>7=$rm$x>N zgDT~TcYdlW1dq7oUi37#9-GA8{T^_A;u^;BL$VwzE6O72&V9Qgj?*s^u^77`B5hgP&3bRTpQ;p3QB_PYw5gx9 zE9ShNfFo*6`UC@_&5_f^Ubdm#LB8&n|CUuLw=W^SH&jS_tj5VN?@i3Z4UxlbC}wi z>EBskpz;(uS8}9l3>aSfY!nS7Jv6}XUSs%~@KFh!zrsoUlmFx^OZVFXbq^|`_z#1p zg$ii|_+;`nYUkfuxtd94j&>YQdq2L*l1%2LRx$5|^rfi`=!|yS9PBl-iqF1D^`~ zuWbVST;3^BWUt5O$x3WrLoIif zlRAKGoyu>qzL_x@RQuUf#KG1V9>k$WPq=NrrLGp448UY$YpDIlo)r02L_UeembT&Gi3!4j$;f_>?#l!Ui1FM?B9`@wShARH$wYF;>5#$iO;9S+ z)|6X#-`>4u@jvD3o1uQ;a4sKZV3P9?fi^{xQCg!{PFX0gyDO@Mc~ih ze4fMlru`v&#*(gavtQM_#xky!insrlt%$OWMaO|u(4vjiwFkJoBV5)g+Jo z-KKM;kwV4CK-VCBb?2Ac(D*=V4k#y!HuRk(8J1pej3cZy+FgjvQg=beQWvB#3pOCM zfV`FOjd}06E=^sG`!Z6?r%%#SC<6W7^u$1zRq@>0JJeh=x7ojp7M%a1JgT@-7wSa5 z4`3rf79_W_$WwNP@StBr3OMpyJpTQVuiYa$V^|bXDD%Wj4`&A`#3NR*4DA&U0ok6o zgGRPb<=}p$TUy6A@sRwGSC}@SI21;XLPtEnJJ9ywKt-6a_yci)iZFpLnp8;oGG%f_ z&a7V#q^wD5ShuJrHy=8-rBrgxZ!{T9CkpK>j4Y?=bDDYhX7(#a(uqtxvwi3gw~&na z8>ZdjvZ4th(lE?;!t%=7_!(f$lptjt=D_eJotB!k({q4W(Pg)34I7;zAE*7`&y|(( z_$$=FTtrkm2Ex>&e;dc^OiH(K#Da*lSysqi0sUb@ia&w|poZ%`8ib-xfRSH7&;Igz zW%D!XZ)Tw6RG@6cADSKCs+TBxTXR(0=q+VU7p_T@0(CX|k|)R0-Wa;zWn=EAJFAGA zSkjVFiNPP-a!JFS)EPseWjWff_#@yjBKc3FM?(n<{KfY^dv3CV27uClctVc@6iI;5 zoe;AJ`Q5-q&5~Ow151;PC0&sJUTCpM%0Y+6=Mw?IG4+9F7Ybs=*EZTF8Fba48uOi* zog!Wkgrjv^ZU|zZu(VtHkr-(OKb_6r_BLLbX5VdUr@3&XqO4;fG-PQ>7}~X<%NRd9 zLQK`pXn2n%tAR;)lK~_Hlb>{{Ic)xzW9u}mViN#x(2s4NysF?$byr1`;A3i&S=fcj zC+o~Lo5Sv&slQf7J^zWV2_1*sm-<)cZbau;PIZO8DIZG>BQfRu`JJqis+c`Hl7s_l zc!$^)i6-HA3Qqq<2R!LXBtdEuDh!6gk^yiL0Z6EKun*Gi~Jc?ExF&IFep>&v6MH0^NJyQZ9>*&D(?QGJ>H4HH=oNPQxfy#iWLG6&C&Mg`g|zXOHSdSBZL)eDs6v?W}aiboQ>9H0}(?L7wfhuqtwYYm|p#0=tR68&$?};$(xgfBUIqdI7sQ zwdTWWtUKNdcqc%}zw)509*AuIoBywR%f$Qc+g6InK?!m0cl$QrU+moa2Waggs;YM zHeGSqNG{VGe4%ECuf%70R_*9zLs!qlvs&P|Anv-FAEY=)()LUB?oOPy>B+N7Bq>ko zJ4kd|hYOu(O{^h;pe)-MalW4VBLPq3v2T~z?_*-yq$g?t1D`om^rLQvNm2liHc_T6 zX?qvN9-7^zZE152LPK9u#2;SN^L(qqJAJ9gEankcxX=$<{1TeV=L-oWAM0O9K4vLF z{itNZ2-95`;{il2hvzI(6`O1J2zmlkrX!zc0~8qEhs>q%9Q)$|W63`Ab+ISoCZ&Kz z#LugTD}qgbM>xaI`YGssmZJg6KP1Xosp4fgV@0sOCqiiBUzmcC(p?q2YZAZe9I35Z z>44V@7Q{#%Kv?^gbxGF4kwND0l;V_c^r_j1HB62q^@N_Z#AD)|%#AXXGHb8xQ!j8x@5ETY)i^9N+>#1|KDeK|9$rlIr*Ur1k*f<*x+dL)$u-VCn&VNJ_IOyTX zvh+tSK4f?*;ONXpo&5t&cQgjTn-E~OHTt=Jew5;>^vDrIY})RH#EbWt@mcVN!6qtp zJ()pFpnMY9fGfCBX&A(nxB1^Jpp)Gu;_)NEgMyuKUmf!+a)J)|YIuq?PoU(|Cb92I z6M(bS$O6|ve#eqzZN*>EOhONe$GLHkSxNYhXhv;bLE8^Z0%01Kt&VWI3$RaDe{#bH zKR`i{in|O?Q6qMd1C9gh9{>CrDht!u*(OU;Js|mnJ6?Nzuc;B|2KTdci929C_*}MY zghKX$Uw6K5a!5pu{o@+$LX(Edq;VhUiep;M?OZ%7xk#Yh@;tdC#hh-WnnWKF=alwZ z`DsxAi*j4hyWo;oW*Y5*(=Y-n%@=|$4?+6fbAGYfw@igk0_a4O2jT28oVdNR404=9 ztmXqTX+q*&k&if?gsghYNNSS1V=(0uhgHmV=SU=9tMrlL*pMDXC7&=P#YNzZ$1Gdp z7MnY0OnsqMoR~3XG6)v6>K$iQfj)!O0+NIJCL$(0b)kZwEEy|#5ydAYM+_8$F&~8# z<)hHOfNIdXEfiZEh{5sVRlP966|*#k>?-lbDfZZ&Q@_^!#j1P^Wf)nVFri=_6%x1X zD@fOb z5aHK4Z=_)ytU&^sur1uo2#})B=I8Ss-03GQDKCZ>r7W<`gcr=^0dg3rzX=|l zlOS7O+pNAzmn;&%c5_DZ$HaDsrT7!Z^{)t~hZ#fO9+YsU+~5juVZ0G4PC4^Z?A4D(apm%h|9k>shYe*>Q$=16znU3zC1vhR9kXbS$bMN1XWIANC z84YlNK3VmHNT2ezQrm)T(1Zog%6s7*I;}ax3L33;bh6qg`^!2{ z(f(lU)9){PuPk5*8u3&q3el2|OBdh(FGhGSofC!Pk60l^73f>aAqc=P+|Gw4dVuN0 z8yZCvuE4#L$oR|rF{ZR$D^Fqvdp6b%%&zec+Ix1%E8oZ0y$p%@C`RfusCa`L+_Ge;H zqy*kbyy$Sl0I7<-<>7yXHS^Ub)##|`8`*w1Ku>#G@3FaVP1VbXQ=^4wMn#xpboY-n zAXa5~G%VfbC>&}^zjOZ2_GD-0?A&o(?|6Nn7a0g~ z|K2h?IAG<<^n6Qzy~=TbhxdoM=g4tb@C~dRomklC>^v3aNVIB|hf4A;)3t4K96a)I16DW@8y zpQ3bf?Umtc9;lI2Rp6>QQ=dSnh}l?$RG=`2%|Fon@Njv)yYOqfh*6!5MF~iMcK34p z&xKHL%|IbFCvjANdX87X_POqH!TF3d)jP(~D2t3mI`xL=H+Y5o4Wj(B(GwDbqS9_f zJ!LJEpZ^zMRe+UIr&2=0mL;u=WPI4+VJ>di^$^DUuMUi55(tXqvj$PX%>GYB;AUf{B}4aILvJ0pZ+KAF(b6x@X+!O1==&QWG`Lg zNs%86aN%6)R9Z|eh9C&-7)|hMAD6dH-yq>A+2$B1-yIkWExLPD2h$T_dvXiJwa=C8 zF2a0J6w*`=17ddcY{0(x4$M4m-PeOy3W=^qzLo)n6Hp4oth~f2>o_{GtqhPes(^rapg?}7(cbuGJ(YBc!wQow8;#FP$QF-v(jLo?S{jU=Y)*8sN1 zgh4DbH%0dBn)Hm`Aq%p_$-Jcl(JK!MqKXjlxoh@V6?#(Q;$ng`+bNHd6_asL-r z44aq%9O=|30m(9;=R9`XP2J-O-Fq7L`qQiykeFGHqJ+JF$IsR7t&I!sNH-l(vK|HMiB{#UWuld(zst- z1O7SNcDea3fWsZGTaf@^^a#Uvju7{aIW-+)M@Y#e^2D6}Pveuqu@iU6sS)|NO6T?7 zH}cIxvTX9ijhI2=TiH;O-mliiqR;c}Sa^BB^+?mGK2@P=5+&Eg?J=(b*aCZCJ&kvPzc(J=1=s8OenHLiHmmn zaE+>3s=ylB3Bn{!qQfZsN{e@pWtU?g zU}M6SEv|p2X(~XA)TW<&LGYk5e-q~LtHHHTmER(?fROw}sft~_nvb8&vald)>AO`B zVG79KbQ3SpEw3(o*KZM|;rm(~1ac@%eS?yU z6!C38o<5B>4^f>ZXZR?putup9A*M!+<37cRyto#6LuQ05lE#v$;}5>K*O+0epkgN& zrH!EUiV*!;`YkKdWPl@;XOb?Su1^ zvcTk*%uFhX5wA^r>{0>0y6lZ-V+ind85-6tqQWbOMGJ}@OEF0t1T8*@W^jN z&XngD-7`_#qVInr(Yl!WdWIZ>Nf22qDAv;v)wJPf?xXAqJgU=RBxSI9zqoREQ@=Qw zn#^DV?3fhUJ0*Es9b-}o3?~x`2I=`CkDd}`T3D+s7YhkGzO?P6a?)g)u6oIRLmzGza^A=FbY4X0Tkux3H?o@U5?ZAUxqXa9yhg=EH>KZ^Q!&6-*H#^1#)%M;D6k_I;Y$N1_tr7E>ffer$v|snPrw{XTA{PoaG==&vXqgaq(?;Q#fV zDN-dXhkd4mT8%OBA<)xHxSb@-@oqyfqkLB}Jpmsy|7 zR&SdV8GT9|B&rMET$Vt1#H%8%TTW(~=?fDe{P*cqPs7o|w`kd#Q~Hj zCgl6Ff1XqVO%$@*9n+-7`KS^D(75__5F;%2^ zWuX9o7Z@@M3?>aE;Gc{3JCY+gsaThk7P2_Zl7uwjM>{c-&md2QH4ilU| zpP#`O5O@wiq2U%tq&QPdQsBak?XbX^n&sr39PxVnuWXdM8R#H7IF=4kO^CQT1NvQsObgZVn>iXw^ALRIS-k_{(YG>fSgqTT(Qea%Zp zvNYua;r5eH>R;9f4H|fTz}!Aji?Or8N|V=Si_zPVv#c<6F)4GJU&3FHdH&eM!gi>S z9dfS;CzWU2I#&muGpSU(Ap>#$TOB4~Bhuvv5U*II#2x++&W=hMZsUh&mt&?dzS?gm zo2VsaYK5?|md*Z?&es+$QJ9P-10Ly?d=O9OS(G$Ox$koXHnb>N|G^YuDM|rh{HX>n zqh;eXq0Os6JYs4CNRFe4cb0U#jojW+U#1xDYCt%LfcC@5u zDw#=Ksc6Tst*d=u;K#b0p2as^a1*u@ut)=cR*+jo2q@t+qHrWO$egW8&KvMvfk4U6 zEVi_nS&y9b9us6SrRpJOnUyg8i`kQwhiz2@na%{l91HVc@P&ye(ZIgFCj-rGfAFKO z#r}RndN#*XX_gu1-5q?OoFffQ>v+=Ys$gm@qr){w{Vpbas)r5-YDi&@Ru$YJg6bXo zT|#i(rjbAPpN*yvjIa@Lm=24s$6fB9?~B)> zpBx=FzN;ho3MAbGvlQTL`lbh$petJIEs2~}_rc1n#8a$tAmR%qDyj#?e0o-bDp&)? zFlPp$2~i*w(oD;?C3oQr?!mT40yCRF`WX4zR}})}<$;%SVI83{cTprzA_$}Wqk05k zbhomhjQYnP1lDsy1{$Qsz~&ydZI>k^p1&#a{aoGc&)WWVIkt6VFxtSCWl`!57-OsQ z>B$+K$LeAzc#p?kX=D_KHWGgT_2;t>Bx!3>wyKnaKNl54-;hGWug1h69o!$F|LV!$ zOnPgSY^A?wIlm_#G!y)K1I(LP_JZ|161deCtPLNN^?}Mw$p>w@N`okucCHTGBynV$ zzCcVe*UOLoravO$S1+%85@idnY#oPAjwKGUS+jDWvE)JO0u>KpnKO@SpI^$U#T#6A zw`Kih@?{V;%(JjFH{Ta=di-S?!VhXDf~tv>SNt%82fZMNe=??E{idPtI7$7!)HW3+ zBPyTKjrSE$NT-wrg`$$Zo^4DD5|fD=<&Z?HmLpE9DQKjEE4v?bT&Lr0Sk9TG5Ny(p zU~G?<|Anykiy^Tc!shS+k{-r^C9K}Q=FMjU$M@1a49Eg2UPR14I=$G*36a}XI&>I} zp=S6({5b(F`#g=a_C9t_oQb7*6q`94xa~5oEm^}^Bh4Mf8e9J{MHA7l5Xwx?7QXOt zH>SPWmX587TAw3}UR|G*Ju6d>n48MZ3tUW~tJDj@HKv4?(ASB)AboE7=`j(t5F{oM zQ{WqPEQA#b2Yt6un!2`Sm`5waBW5X^$vnGFv&>P-6cD#?-|jL$W`Lh6aL(kuk=XLf zdHX4R@HY+w!%f6yGhPFDK1mlF=$aw|g3oJ~A^`Pdqo^mk2ra159K-%yUb$e>HisT) zy^=$l9-@lTNxHvYsOP@$9r_fXI)Wlde~nLnBmYs*vIr>ueEp(g5Ja4hAz$%KAA`-c zFM9J=j;N_%ENYq#`4{&UW#|reY>u#g@;CeqTG}6d0lP13BlO9hFd*ISitO6RNe65S zKuAp-sN%HbQT33$U((Sh{vV;Z1MV5-LXFzq-jA)Ec*8&ciFN%j#mSITZCD*@Nk zx1MC&QixfOE)ztvu91yW43u@lE@PDmTb~6%5k!|mAM!Xh`%>=bx}7;R#fO~D$!?QN zu<=3Vd}7ap4Y&p4GvJ7>3LiM8IW=_At8575e}w$zM_o+QXt$YtH}?-wxXdC(%=sW0 z%od856pLeKB0Rxm>(Jzf)~iU?aMHAhLzC{=U^~D*B%d=eAT9D#K2pTEz)3U?I(|4Z z6FsX6w8^CCmbiU7Kk)cP__I;vbV-NG=E!;940}#2%^4s+m&>Vf*hUVfuEMRKZTCsi z62+(#T;L|Qf7>r}t;5HX@=AR8slHQ`)FU1s38kb=;`@p;joClvnU(qhY>mZ>$Vvkxo{dF>6qEY~kBiA`& z>yv@|v$D-UeX+uvpupho``l0>?^`O<@t5bTs4BYdz(J?khZDS7?kyux_U1%D0wA@>I17~L~2^FO8Arc_bMD1et7c8 zUOs;OSa3$t^Do~g-eYxzsUIE{103aE53n4}Zu(RNxP{*fZ)|C|| zfPM!?OUhLu8f%fm+=Bm-x4#4PIy)o`T9+Qn!1dyL^&^HC>Va*5vcX^Es8R%0&0`j5 zi~bZS%4zY;hymx4;nKAzj7`78YN*}}?w0#lcd>O#&i-Xz*;|@qA={i!pdBTPjBZuJ zRZ^s5qlJG0zcWa(KYGH6N?%@A1ngjE&Wf#%4_E49uPT#*4*g~k^qz8fHU>gsFmUqk zN)mIb^kxkkxR7b@xoeRlOQ5HW>`*)XWD47y22?mkOi-~H>}^oPy$bd)^G)^6Ea`re z(=68Iv4F4{QrJ^((snMBsUDL=zc*s!#C!y5F_OS?Q1zvi)r)}pcu2#f?4j9}*?o&i*f&;gm=A9tw_qY{D9R?J4 z(w@hVs6;uxA9Z|8bS=BnBZr4FS7Z14k$V~fR%bso+UjV#B20MConj`hiGS;KTnAF< z1l%vonGvAeoUV^s=Pg5eXLk5AQ3(Hd5U$ol?vO{CHF1|+A2(F33V-~K-EWz9tJ&Ig zK?{m%QzJUHR=$W53NLj{JUG{EJ$9x{sgxv%|f?G)-Q)OZIo>O6{Wjbedx$yO-BLtmx@y*={0{$(>@u zDRnWR9qFCedJv3Izy5`tsD}nW5+fe8_3P|adA(xkRnBWKSmB;j5gJ%wZDw}m>W&XS7Ion`anLONybgSxlTDBxKKJEe;P?mZ{Oh*cdeyE`V((sqYvdnIhIgwi^?}RN z$f+dI*0>|0c+*l413G&3)iEgha$ti7LgW^A~V4dfZ|>(xJT@ z9Ti(q>Ycd0zaflju};wv3tpG`l}2i!LV3_U6C6#HBh?(YSY&6KgdFnl&&Uz-)4V59 zBfMpLAkiX}R7=ZckoAr0JxEft*RF_>@v{mTliHTuSv+Q;{{dh(>FDWy@7wqg2i`99 z{;&+1ofVP!f&{mkf1A7y2|%`W28{*?c+j~W6zmXWdkUQvJs=o6W=9O?zwNBOZrEFN zT_w!>F=@6Y-tnt&r7wUNU8kI_C4GbMc@U*#b-H%c`!>jQ0*s7w8VkIf-s$+K>{%G- z;?>aE>AU~w*P&anvBGya+CYox4`_VNdrtI8OP+0xbDzlW``A;zF+LDBu}RDs^0klY zpCx4RO0M65C(=^!TjPL+r9A`Ec`31F>uNbz?1|#R-)vnyNMnGJS&gjZ+UF79<-t_A zi(=^r1AS#7X<5soq77!1jv!rT>yxQ`VPYGbI-_oGHY>XR(8raq$;Kx(Z^fgfp7(hr zeArgk1_30{>@Wr^o%E!Yw#69L5HJ8d7M1~O z_wSQI<5>;pjxXLNgCX9>J|sRq+sjuaKgtFFdXgt+`QK}7x8I8YI3>l_&qJRPAGpBQ zvd2=7z3}nv@ZJmHemz6I)Dh)+(Gw?M_>q1zc+zL!d9doMCh-oAN&C3;mAdup@5@b6 zh=77*`@~5sPJ6hD#uC~32n-Nr@u>j8usR=#1VgnT#GVVX#F#q?kQ%{?)&2DfqBhg~6Ps7NlqThpl z<2KyA#Y)WEAxY-Iw5fO=-E^0juV2a0-{_NjUxbZ*_cmnRMa0|le*O13e;x|)gZ_MF zH%}+;-EB&ckMth5WqahD=EY8sqvLJEGxdKEvCF}u<+h)0wQZ7isE!Vw+?&*!TB&@o z?&-b|zd0>-DuYDyW!%8_=$DRvbbrdHg{HBs!Ost;z{oSywgUyOTz~xU2KDg8EVPis zTg53lxl%CXtMTg-$ZMkt+Jq~Jh(b!UL$^!m%2@;na+d7p|JQ#D^oEpaxAOAB3F^jtPS8YUMk;G!F{U#K9; zfpmKo!HsE+(_S6Uuho(lx#Is5<6bkc-PDBt@hlUKedZRR4=)%s_rvp{V>@TM?ugV* zB!_Re-+#0-+vlrnSI*1KL%qA5`Za)d-5J_pgoc+Atz9)WaH4O&O6UYnEu@oBV6I&+ z!e`Z}^=Tn4UtWhItSP-KS3R%AvD$Pm`>c!{7RE{N+5>bNrXoC_*kk z(<^Qg)r)jP2nS@Di(GuCWbYeNkiOa|7Y)+H?NOT$-&dl9GW~6z2UAU$gzrDP`%Nkg_PFSfJ-R^oI4+q7P8ppmrpl8&w%6mQ{MZ&(LA}1v%qFg91zN$2IL2q|ef&}f@dVnluJ3%D1D!uRBeEXpb;r+Y(~ zlZb+}n3dk~MoN6pxQ2lY46sr@QtnZ&7d^Li?)wQ+;V#&Y*1NF5gqm8}#FtfsPLcv~ z%%?yP@^ezwJ*OXsVejN;EGRn!U}5Q!J;~};jp3;#E26P+$g~hU@PU)@%nr#qjln`%?p0jM1_)c64goOVx<=2?dg5D}`wIA5?VE)c6 zl5q5&-F^RYBG*TtI&j9%j~Qu_&sZJUJ&jX0gT3_Ch1i zjXL#$q75IeoG(kVoih08LpC=87B_QFj~Mf}Z$!*3AQgqpEsr2scNKO6OxRz>M#kQq z-QC~)=4!0RD|BXYO@Q?)^d-}2E5VI2mQZX#T|b>@{fPU%Ba;j8XT_`hXAQJkk7n^U zTHhEwDchfdq-I(t8h}Hv+6&Zvdii}7L1$qdw((&i1>+;YozTVeWlbbc2 z5sc z3}rJ`ywQnxj+9Fqq4&~k_M0wTG|A1ZepAt|s_#%4+nXg|UVnLO^+Z42bZt=z6@B)0 zcD7RPuDIMQST1(0H5q*?W-dSQ&oPDCslvZzl<&Tj^TT}@m4MZ8aBnOukxT&XQ($4? zRNwU1AY2ai($4DbzUvZ5L0Q|3hV5o@Se&Y0!LA&B(xl?D^LPc>N;n%9eBpa( zx5;>=0}l__xuhiEEnofN$^DA6=WAl8d@{?iXC_N`6k!2yjPzwI@7q(;P`wL*0uQ^Q zGEN??+R0g`klOL#yc3> zW7hSJYBo41kSMz&xxwjwOk|b_XQB+7$d3_KjzeKh8b|@ zh_d0T)izOub2#*JnRyKDr_uQTmHa|hPI=)k6Hk^2N)FYVZKc>?gZi1`%*K?hSJ*KY zey!8EkLse^mE(r`_NU+Zf?WH!wWDNr-KFr(BGZu ze*3w>CeN2L<(5sQUi-j*(UOUmfRu~2QKP>Q{;Q7<2i$5~97j1jA7QB9Ay`If%J_i_ z99)_gI|dnE^~Vl(6;uu3w;jH=V8MCTJHq?&Q#v<`H}c!187&gH(kG4g2n*{Zh~37) ztO#5cr+2NF)Pe4u>GqWjV(Y@d+icCVO|)I)}n zQ{I0OhPcL^?sEhftpt05p=cISev`??7yx<@-z+w~nZm^&*`E`Y>>EprXHF)e*lEZq zy9PfK!I9%VsIsYjTm(w8iT_4D_E}3WC-~y|?rP{@ex0Emwn?#l{_m!JEuts3HVxHp zeR|mOp{raTX`zo^{HCp(W#Pr#tRb+#-7guhQ`MrNjcX@!rf^3nJs)>A_8W?A{cAKA zYaAp$XBm8S$oC`Ow^!$3V?`v0B z1BjoTjf}q+^_f0lVPsHyXMFkzRF_jq8**qBFQf2IF7Cvl`xlDz&uK6-lm~&Y?HW%B zrO;XR`e&lUc1>^#{FYAe-eG^kp!=*O)DU}RV@s*>rKq0=DjC0nCpD!V+&-Z%?1`w^aqA5R92^ms}e2pE^j|Y&+fJlfYX9ir=T>{e?z)yi4 zh6TzBUFG96NW!cN$AAviRBNCSyvi_R-9=_(PF&!%?=aKL3%qCP*>H@8Pa`tbRq5}_ zcexC_EqP+N@seIYx6rLdnkJB&G!HIHh=iYUqA?Dd1L0sj{Wth-Oy45^Ukd=VBszvk zsyJKr#N?``FYa1oZ;~g}Um6!OMXH*h5T9UI(y~K}^ndP(DC$!29_=kE} zn}%kA^3EcHpI)x|T+VAap8Om*qGUmUS>> zeNNBk`J-I0o3)iwcD9Z@M&>lZ*L_1y(k5?UYZaL%=4tK*Fi;m+IjQ|fJ$|`!Mb>!m z;@JSx4=Kr|k;YErv$}C?GYJYir@nxT#bp{s*`1%`1riS@U#lMwS95J>WCBqh(;Dn_ z`K8VqlIrR5MJtr!tXz(kYee0B6y0EXy@h-hJz$k|r$;mZ6wI7#d_g#yB>a>Znd?rZ z)jzQ0)$e?m1I8E>ApuX%KW$pv z*ZECr@4buGzY7kJ41PK^j*-^_mD`+$eyOI7p2+RLqtOn2#w(#BG)r&GkWpt^VJ!Hj z&>YL2n)ANcm!C)JLTyjgEiy%}jqIEv_n%d$e?%4q2v-L-;S{;&iqPo}F1$9#kL!Ghy^DLoD&+ zd~kny(d_s$YW(E-@sLrwI4<)8Zm?xV|K!ED0xDCnfdC~SlX&=UxUU&4&gbn7;!p*k zu9=e{i68yqyV<6Nj=;;yJpRkKP10 z-pK^YK+qstbim{VdkD$!k!O{J*vl7-fHO>2n&Lru4pwKVPg`a;Rjd1g$fm=e(_PtO9>$@sP!aoYmOksVDa0EGs?8%f z<0Yrp!}HIpcd&wpPS6`4I>^)JHc}aI;IPi1pB*R=DVSn^6ML>RSuR7uhBW9{d*$4=D)iUirb z!zHNoF$T+WVg+ok)@Oj+^*##vJVau8V?d&YC1 zxDfN7nAM1Ao><~m4zDH(A|U&6;ZaM;mc$&1tEQ6hz=KZ4O!u%o$;5{;ax)?Gv-~6J zdEu1$tO%J$d=Od6_~&N5Rqfa-kdiRj={H3U+5{>t7~;syQN-Cf97iki{i`Q>RkjZ; zS87=va4jgirBy6`?+ zZ8#ONdh8sYMf_ou7eWYw3^^U(0~hgtw1D|ej1JZWH8N28R<1OA!<-}bwmyqq=$(m- zxbMu5uB0sqrs>U3vF3hE%}?|ntk7Ax_tYefc_S(#!x(`xGRjuyc{~ zWbUQzT`5K6q-OpV7&>M`htIMkIJK)waG3jFeo6gh z$9^A0geC)F=gk*jA2<9!!?t2-^L)A}0DEU4rkycP=Sr-6-g*pmr+wJIb1v4=e*5L* zV08JqK9ih=Vhx|wVS*^$qjeOD-r2mVmGr!gJ*h@rkzDl^jzRdV9?$OF&$qjdS^Z)n z&&&LRx}~aIyJMkxZY{*xr+S-6q;e!@m-w!4^Nhrwet?ttN=!CnV`H(n(+D*!ZI*&) zF86j8IvM5El6F~BAGOqO*luUayPgWJ4uZ@!*|fX-GFXFK8D z{H4UzwPE6El@2VP_PvmlVgn|1`|6J71nyV?i%Z`kofl+U8I79{sD6 z0w`)qvT}COvzhODdaQW*?I=jO+RZH@6Tjq5ZKSAuRPxIc_u2^70!>ru2xh8%3~tB| zrAJa@!<)GYNw!KK$T&dCXq#{Lb0Lq|&I3b%>1%rJPX5Hp{TJU($8}i@rwm{*$IX)1 z@Bl1NP1(ClWb0Ae{>aribwjl3$?!i=zCjv~rpHXf%67IEWAJrOIPoE$a`nxElO&?K zuMFynasi*nFNVAV--4?Ax-N^U~L%#dOrZOON5CQQsn|0&#Yp2O|t&5A9CzhhV zr}IFGxtG#hv_4N|*19dQR_gu6pVAo#Eicx6VXNHRLnC&p`VJFJ>dI{8@$=)H7}#X= zpz8Ht7jOX_{Q&J5iy1>0?Fm7T^;7$!@39N43V`Qox)N`J{d$QF7)Uasxuh2lR<{Bb z`f#AuPwc>s_o^3!s$^S&LiZ{vwvs7g9ixi#tAX<)GQ1N{|Hxs?^o1A{K8yEidtGJp z>EdNNm#)>)d%BtNXq><2mKXX(xuAxJwZfTr+zsOTDtTn?(}wSUd{1-$a0%j$_oG?1 z=jV`D@fCDmuIFRM@fs}jD6acf1tsmP_|m$+jaQUovwk`g=@2%M{prDZrpTpJMKS6$ zyF05=3m8P9)jjmr0~2Lrg@Vk1DdPF8%I!WNpxLYeR}Bb_7BG?W>%)`AeVsh3PY>0X zX{>~@fs_UN^j@kZ2br4ahiq>r7@h>mU7(_=?+<8GRPIGZr>^~ed!CPbnO-EG?DB1i z$LsmM%X+vGF#2+_iu$fAcs@sxebzUScybHMS>F{~b)OEb4Q>n?L{zR_v9MwstFQEh zwCU|UjQ--Z@=hIec`Ga`cJFubdrjr4I`-*b>+iQZ1y2Iu`_5VK=BBr$HAaX=SKl4X z;L4TzyLUwtI_`}ZL29u7x{k}-Q28VTdloyQ5A0~(!$$99$v1TU4(K5Iy)Vt+g}UNI z=>b>9zRUJqL%h?mF=K(6nl=6r-AU>5(dE{HtNG2B`c`*#o^})Q zb2eV_b?tE}kqNf3L?%0bFsSQE=+yil)4+@0pzx`mVak|;c>`qF$i4rHM=7x?lT@=R z)0{V85cxx^1Z*5xz0)7&fW+uXpV~Xc?3E%Ko6V<@xYfQC<;f?6)3${$L>$N?7mX=vwhK*G*&H_i0o+9!W`fbE)g6x z4Jt+a$9Ppd;X%+sFf6`TgWV?v2Y`(*_gn*#J`Vs-SYscvMSdD#6qddxbm+Of4XgfVT@nU0Q)w~&bEEt6mCezPg1c#B~)~2FMVwzW_zzys;NyXD^-gE+}s)qR16TG<3Tre z5yPFd1Z;ov@*(QU-n(*SY=NPD(Uj2L{IBsE_8QK9eiyj1Z%=#@%uYKkf17#j3s`Fa z+eJp;KqtE?(}Ri~&|j#NSo2@oZ&+=(Jw*QrI#x!7W0&_bPj#E)KpQ>J5~SX)Rwfuu z%l-G>zL0AVI=ZUyj`^2MwL~&WK)kdm=VNuTak}!yDfhEe&CT9Sl_Rvx(*A~o#a4+AqFoo{y}&W_r(0JKg=wL( z#bVF~XGJI$iP=QB8l#ZFaSJVkPsot`>RYOj3cS#<*i+M{Xv4!%V4W{|;g$3xNvWJ& zpLnJFv3}X~hyLr5?7!#d)>aw@I3ogRRkmX7H_WB)bS&L+)2fBEA`6nI=*S(g`i-i* z`4ETV2EW6%np${b1BK?9<~gxns>YZU)v{PI=A5ed!kRxoup^``sPVxjuclt}(Us$T zW!lkVPmSpO8a+5)Pq;7UPBGy2CyA`pl*EZ z`p{IBC{VRR>YU>}KS~g1eJ_ z(3e#0v%L0Gzg%8DoB;+?P;m~=i*nYGITS7F=P4yJoZD*}>#lEt zti3bQspth~sgB-_pCSJD3+ZMC)$^#`rCBejQnWcZp+wjDv6Yp`>j8y-p^k0^d6ZtLnsY1CbOIjT4I{d|MBtYy9vr+Bt znSAyfN*VD&i|fN$1R2COnYCY}X_uv;@>OL`A5*bP`o2?S;MnfsD+k=5T- z=R!*xu>dN$xZ0MZ*%$AHw0s7KI5z-nLk@d*_*1Pr7oLuvzUKY0x}vdZj}^-wn{eWG zf9!$Ra~w2Zf5Bt*VAOpf&BZ(fYws7jB*OUVdC>eQgTn2o$SlIwyo*c3Z%-E5-0aB% z@ur2}bHqTI)vLe$Wpby4sQ)k~K*IpUOp06xRI_d9Ek=KXFE$3>{T#Pd<|j)?G7q;T zY4_2)B?`93n>oF?WN-gSrPn^J>kN7a?;vmz%L47I|Llvpb)G5iKY;`2#)v$Cn_C5( zL}B|Ud3J0br6(MLmhNs;`g;Iyf#p$o6JvT>!VB6JArGatL zC}H;aPjlyU+x-kp=*VzmpoS&?XHcNyjW>;|@3}x_V@82_ZuR-PdRor%73 zW^WS9tK~wPSyo}Sk6Xl42?G_s_s>kRRIPboeF`E2Y^q!Tc#q-Zb$HL$iS#E&6xK#0 zJt@h6@d`k#A%-lJ;BH5U{_f1cAa(cyh$ItH|J%cx6dVOfv{yp^w;nQl3fhwR+bWn= zEfQp7or^kQ8Vg1xAFrSKe_QGK==p2Z#E$Y@A8smWdN+OG6oSUpbX(rsT$m#{8=H!l z+&s&bIq|Y8-*G{`8_&85ao_)y5t@0CD-G#r1Q2=FR(2vleg)wl{Jl9)6MjWbnOFFx z__WT2}52+)hLp-j^$ls~DI88dyoW#pk*dCkorNZ3_m^t#uF))guKj4F<$T zZLxAb+7F>Px#^WS{;Sj0ht81-K;b^kkE85r*K;fwx3)*^=5E4a<^Q3SIb9Xh-&+PJ zElqEWWBhl9!v5Mm6Kj2gH13^?Vc^KVBSlTTX#5TwoX@*sN5@0|F_Z#rD_dheF)zZL zj$evN@mer~+Jwj^v{Y{~A~8QEL!Ao~VRM%)Wtqg)k}(>}k*QD7O}b47@DPfF!_;~t zMzBDj6rue!g$yBtSa@ERf+r*V^M&F2R zFQ*?!0r}Z?KAW34N&&#j-Pa$y7ho|9kgAQBdrW2xM}+0D-vm zFPL?!D18#|(Xh4L8N3(FnY;bxJ=a$@h`?_S#FvCq(!EoFxoxfpR-~~4w zWEi+4&0qUjQcfvx$>XX`R!S6*za7PARHup-XZ6)zbG?qn(IS6XRYzaAOj(?cb63B6 z>bf1+fOP+!nNq71;8#hN>DT>hvtvKfjY1^*_PivhAT;=MZ19xoh8PG{A7$=Gf_hmY zt}Sb|xTg(HmA#q(o)B{1bDPZv^fJ zo)6#jO`nI(jI|uM`xOM8@J!{Nfr-#E<64M-5Wv$|C(7^tISKVfuYO4KWMft(8J za`JgV?K?*SdnHdWvtIZcF2PlKB#N8Qe)jw{0v3Cm-GoF)TM71#(nDD~W5&Q7(nOmu zI51M_;k?uqp#A0Y4C#!aL1I;1ZjBSohqmgTHpO=xv6OKae3aUr4~Kz5euE-rR9zzWh6aoH3024sV-D|Pc*gEP>b z^yeQnw}ge2{{KX}jgB7$k;I3`te6Xtw9FV4M&@<0aq23OXeOG6C9!%WJU}h74sm~c z3;X>r31?mjSw#K;e!`b| zS(%60N_ub3?Ls!Awg3I^j3PC0gmG<2kTDZXNN_wy!GlyJF7(lS@G5a)#yxI3K7Jr6 zH0V&|Jmz(hw6^vsV}{B+@&1jmmdsV`DQ4visO=5ReE!A>Gq!r{Hf9hlDNvK&hl6!x z&>_S}cWSF^kxE>P36UP4>Lk?hmV zz^}zZXX&yK)+SmLG?H>jzcO-#oDLEliU%=@V^AwQbnF+Vw&Q(KgdyKWfD9YCsxSt`7I{th&$8mX=|_*D0^!>+9Gniy)+0beX`p~G0wWB3pk z3&WiD^FNl$ML}zW+0D4+An(l`cpte_?f7rgy`u>4q^JhmTl-qS1=W6Y-~9S( z!Te%OM@ZZuc)mL5Jkd1+6L3DfAwLI9N0nbO4*~IjO3a9<;2pyVPCw#vV4@(A3LRcG6(%>4$PU|fLV9^2r;XX6 z!&zWP58r#gv>r#YFC-e{Te_x=N)rztBEFN$NLN}-U_)|R7iBR;Cd84w1%n6GXbD*x z*QS;Qob zLIlH$49lFwV}ZRTO=9xrF=?3|?8Enb8b!5mCxW_kK_mGzSbAIJFINmG`&W*9m~M5p z=+8NxP^shQzSb7#7TEGO2Hzdim+l^PG3X;Z?OPO+JbzM1cQ@`t8MM#-_^&42?LPHq z&ve`u+Lp(6ixpEnoBZX1G9p_^r6b=r&L=U)*7cR_nXlzemyWW1qp%>6YfbenScwP{UjJ8%)iJ)xz z-LRx;=V2L}EXW@>dcf2l1r(FrF~H)eJRCW_C=b?Th3A%eA%6^rAV2zQ>BSANc@$*` zsmDJasQH(as4AXX3XZ;U()4s;`%zX5Z0)_*Kv1N+Mxe-TT@;eN|{{Iy7MgIUx|hi2dPN5-|8>wH!n21{Or`4`zH5$q!0POJxE^D=yyX zSOcm=tLrikpZ)61!J9VYv#Uo0n?On@KlmPWswMt3SV%xEi2^K7ltQGaZ1q^+jxa}| zvdP$7@+|B5G{IlHq-dGftK!i>q|=LHZ7kG-x3e-fMfFipoe;Acnm0=91e$=Vw^flq zn|{G+q@`BjP%In*JJ76IsA_SzK^LOmBRueHMIXe1Sg6RTp{wkG6-Mu#GC2hs(t8ob z!v9W3tjlo_8h&lduxEUY8lDJ##_?RhhLr-Q992AO?*gkL>uK84p<(SN*BxvS*QYV~ zYoeh7ZscM;Q44Fyas{LWnhMs^TU~%=McAcku>y7c`Z+JIlJK%8vein7)ueJ(Y*NX( zIQv&hYVKyP(Tn;+!2-DaOEO(zy9&0~QQn2A&Ap+jmyhCDXg3NtfDKSW%#N=6Q@f$k z4t~s8C@M7EX)V$#Q9AIDG=Dyaff!yiZ*v+Qd>{yaW)nviyq? zX1w*s5c}Nx<#FDLJ&xtrZ$Fj^IxfIS-Q2u#aPNq`PIM2roPm7t%yFxrD3LiU39IRF z8zPj|h2z}|)~`Xf3hw8Q8NTQnH;a310&6Vovb!3Mez%B8CwI%B4PkD)EqX-ET=>G$ zx0AtA4RcSL=XW6#Ylfsd;_=W!JzaVlnZIKI{4>9(a#3%hl*5ot?tIN1I zrGYdvFY+aC8ZCzxn>9}G7Gp8ZN+QuI90{NPg#~C<-H$5^HBYhS7A`s48KawDk0?E( z(Ij7^TKfWaNZCBU(TQ0_Mm=r;`a}9?>-BeCY39@5jBQTG$sK#-I(Jbwz(6OCWGIXl zvNAZhqb@N}iL_cR(=6uQ6eu86Rz;T_{Vl7ek5>6OI+iW}5Wi6o0+;{;myZeg$XpKbO8F(AknETLT`L@?CFn+L9Xyw@p!%FdSV^(9<&>dvyW< z|A*>gfg%7X{y5|P=N&Oh=t&Ze9&1t@uEQPLG z;_q#$L}cH{vK%Mz!bVdb`YHEwm{fHfodSDy&L`VH5L^`{vB&kV*Ao8JYceY2$V6z^ z=G#8qzP5h+{H4$JcUDANKfyij8y+})U;7#GXa;qTg|3PXx+b74 zDy&4JbzPBYnQTtvk0EFcYGA30$J?aTt$CVI%!}JlfL1>S@#LcE3%HgQxAc(BSTg0b`4LQJpGW(U|;iMdz46~%|_+pWruiSuF*<(Q-sMr(VepM@AAURO+V$MVoS@Sls3zhunXuO%_< zCrzcsZO&Mzn2G(APZ#Z1gH5s>A`A)hu`PZpA?{MBIjUFT`gdzR^Jj_@Xu=QqdBQI} zF~r^AonwU*)mU&T9%PFbYCt~#9#P$2$-g@o8ZWq}SZXW^edFr7`tfe-=?#5c4Mn#o z-P-RYCNx|K1jMvcV-TT>1CHJK^e}$_Oo2y6CWuz+X-2ZgCpYqYromDLi_|I(m4uQh z!G%895YXCi972T!sB|M>c#Y-Y>3hmQDpu>NQM%vogx|072#Uq}k;_|8R>sSDkSgc~V|$f2Cx_ayT2Fs%Eyqybm7LPC8k-O&jSuG;eObTKu(JTs`XgLnH(pg z$C?k83Vk>Y)dB#%j(TWKo7_)C*zp8N6Xyi5ZcK(5Qr|Yk1X58kC}@i$MV5gecq#p9 z+~FYN;+S=^9N9T2-)jZ`#Ilhy2N(NsQaL#-3vZ53#(_q@_Hl&|G)W+U?0WIlIHq6^ zE?d32KCf+&d^BbX#Odq=lPT3~H|MoE33_j3Pjeu3I?5;W+CKV8C2!XDk}BZL=}9ea z46VhTG}pa})l2l&R+~jMV54vU8DS-l4L=|eo%F7JfN-c^wMm)8EoZ zFaBjb!MUxbBx`nd-mTz%x0mcpff_*E}p-XPgAVYP7^_Db7e*Ub$q}Gb3}Y~d5*U|7Z^_E z296MxC!p+sw40eSG?!mScJBKWc=TvpG{sCh!*FyVHoJJ=WNbls_z_&r)4Dx&4u9r1ObyTFDFu zmpNX2v=BV5DMS|kVTyOstjbx^=T3y$%-u=dcN^I#JaMo&hahqR1RWV3b#?wC5oXJ> z$id(BzwS3x1z~F6r?%&PtLtKKCpu$#sgWir>(Tg9RkGiX5#su0aoe{6`^JupIv-TJ0z9%PMI~p>a$5L;@a=hOOu5ML z?0a+BYSTu0Bbljhnw$2t0_dR^r!oaL|9YP1in_iOoLi(l|5_ZF1W1anQ6_znk^0LN zAGliXS-&RyiXg3S0E7&J$MpJuUgy__R3W8zDmZ+N26ie`yQY`Moy~1VlCn9CP^&}Y z6p|lT)ioLG*RG+PeACn|pJi7*(bIWml@|XX_5qT!`>BX>U2g{!3Xf|?HXf{;6Y&Ct zvE|2^5EqVUh`ML`NntyoT7Oh9LA%HfD)kWG21ukU#E;yQA|rKQBPsp@DD}*|;(lZ+p+U^#4nFSG@cBZ!bhX{-RR%>3Omq>cz+b}1^}{-0J%jyFEJQ>M-#Hu-`vh+gDQ zrRnIm3AhkVJkJNds?V%)#7RwgryG{`xfl%`2Ih9D0>lNb|Bz3=7(eZE!PLKSzDbIs zgN|`e4plO0|1M(FdmwLKGnCsz&mtd&aHRR~U|RErmifz;3A4eK9q%u*)&x=&!SYJ| ziDu&;8(v4&Q;2XN15AC5y76K#{)#^5e^_nx1K6ZyavIRF^qh)usk{}`RLm3_YE_UMK| z*v`o=n?dK!9|3&uD{y4ZoXtXK+3tXR=uWpAJ6c*X4%`bizDsHUy@Ld?b59_XX^T&w zixJ6RV~+|F*7(gdA3|C1;wdl#m*2WMo3q=~6`mK(Dn58a04hEgV~4sSe@J}j72Egz zu0Z}y*G&A0iZ#1B6|*Ed&&6c>Gg_lpJTT@zVqWw}H6rbp^p_@?bH|ivc-1(y4{fv} zrKFSdC*6Wq{7ued_;|9f7i<+~%a zRXUG|^sI?{)xP6RfwyxoL&=@*Y$J=#&U}}Jm}55a;^+8DZo{!#ZPuB6beR)Db0q#O zFskQ-yRg|^SlW-W6iRn-Dj2bQAF=49U^&=Ll+i1008}YcLGsYf3AvX5;BO1h; zR1b4JXeT|ONhWy$f7W=5 HHC-Xd?Bkh>ceGFDif#k-DNs zQ!#M}z}ouR6?a$yowY6(gH3Mt#3>H6CjUi(zW(~d{W$rW#bf4Itw+Ec1ia7rjf~Ow zutmjYz|K=-Dc=H4gdmX!on~|q6fKP^Ui#oLsf0NOBlyq*oiWjXsfG{jf@B$dFhW+z zrWn%XkUbEm<|{-4!TMqPmn1I!wlm>4&;c z84b$TFXqDAs7{o9Z2D`tAaEfr8EhU_E+@hkg;qVUx#O5hsxkqYbbD#>h~trOXlOh> ziz|uiD=ux9n;YBNj1ab~;`@Al`o3`1Q|9DcLt}9LNg@)tma3ZlVn~ z2F?78j2R@N6#fkaNu#onQVQfih5<$x;6wg5{1|}WDZuEIsXlo~&nLtW(gS2>!hcdYg)W;hU)HPA~6D8(HPuQkbg^Cy6+^&iIcG_v;J zy~hIDakVG^o=P>{CSvjn)Ry#ujY!WKVE%#N_Q`!Q3~a|?s_HNUD@5?S zc-ViA>9Kvj3vWD12nUtUytxANYcgwMwc?kRqCDTp^^_AxAbMamj1yXw;m^?s<&W&1L9PDWh#qj0=}*XKJes(+S|RgcB-VD|Br%7(go ztS%f0*%Cr?#UeO(IfYV`h5Bj+f!+lYG!0IUPxhv%E7s}udW5zK0u|qg(TnTS|HlFS z#A+jz)f8qz$v!&#Y)NSZn?p}dk76y5-Yo{9pIH@WJ zaJG#N-gU<;rYP0|TC$cUdQhWMO#s7J=kAr&hu`m5bv8Rq&w2LJ)z5@*fDvwxFuzpK zEuP9ZqW@@|v1xxtZ?pwrRDN*Sv-TlYFn%=^EY~XHgYC_Sw}O;F4ws+BPCG1xUaF^x zH}ws0%L^R@muetg3aC*khklU)P`+dq22BYxG`rKtY6PIeqm_`HEF0cUnrcnYfJ3;X zcaH<)&26+NxWDs|-u9u1C~;_hxhmOnnF*2y_2kBhz5CAx8RPA89wP|V zIxa?Y{(BF3H6R&I~9{%j5R!$cidp8(l7jC2zr?PA2q7aG45Hm%|g|e`qSM|!w9Z$ z0(@12d7N@4PFlXiWcqkYq*&f_SX1GWJyJl2*iPbaDb$qvohO*)+yJ8VAB=Dd0r;oEfF{LYv`|C(%t}aQ^hBd0=|6$^JS`@2T}`$_;I_wR@Y+JQ2}dFz z@R1@nf#MTmk8eU|EM)&NcXK#Cod}=qb5Sb(&4x?1R}8RP?kpzi$tt?%9)FotEvaVAH!1leV?f+{cN*< zBULa;H)=ot|C6%@OL3U|gxFv#G?rh>6X13$MO;%p3I`z~;yh9HyK*|N99S_Vz~iQ$ zi&p0*QK*p3@b)*qI>j3ISUCR`m^#VkeVNbY?~E^Lrv+{k!POExBWr5m@kn;X4PD2y zSz1FHooCnv?(uugGF^5B36wQWqKBVr_uc`7_uOi-Wli&kyhGpL-40b*58Xcj3Nz1b6H?+-U(tTW6 z8ptA>0b5R=AN}Rq8@4}3yURqdlcBVz!2skx^N)LCy!;dwAMsFD>_WIsMoi>uhp_hF zYPt+5s>()3>WM$nnG!VGBevmh_8Sqdkm<`p5Ni*O;7#J)-X_=*5K2|6Ig3?94Aibg znUI~JMQq~9{qkKt?)m|ehLCH~<`0y!EL)xV115cExHN~jF@UVAza21K@5o@;ClPb# z^0?H2>EJIv)*jR)(Cxpp?mU(EUPBu<;7ebti+P<#pqpK#<2zfc1yWYp*SY$%P~tO#X}Gf`>PDWmy0mSk|V8m@h)r ztk*Qdt}yAiAP(e_jNSqL9(AHl6p8mxb0KyqF zfNp+Nn)JZV*Us>Nz@@#%YjSWnJ=!mWwlzIhn6~XhW7f<7!k2Yxm@ORSXbx1BC%g}z z@Ab+Rg!h_*_9MgL&0%}mx8eyLcL5>w?KQ+?bpjO9(8Kto10{27x^bP9Fs4Cu|Jexr zSrc_->)!TVW_raB&nMLXJrp42A>6(jOon- zs~!^F&eS?yN*2=8U$S+8p(r^bjEV^dC})yylRhy6!@75m3CkZ3i@+j^IKcSjWXPCK z1yDIqpuk$EV)ct7I+SoA4R?elv#LLe&HM?0$YdYe&ca0F5XhYtaK{VA?k>PNeO5_{BEvxFb%EBUbZyA&FyUU z#-p|SV(;1&Hk*Di(8~UeUGJ)dPn7*u>cPwTbtRj`hJKX5u#yeh;r%^jx;4>9J+X1VhVA9)r4^wloH?Aqp z1i>COOXQjNRmZ?nuQP$Oq5$>KORyHu$gR}^kN!e+Gh3VnF@i0+A?WY zX=-Ko{sp$xUr&gll-~NjFk}Dtim9;7s81(FaZ$`;j&lQMO%UV*c=kIfC2srXkEW@o z68qEK!(&8NvAQ7`zuf=@Y_9K>>}3Zmur5ImQao=pE&f&B*VsDPXki+t@>^cFGmmg7Bbd7TL(LdfXYv@hEPS;voavke}R)h^qW zavri8nDHXR>WpE^_ApjrGYlzTxS@cO=po$t2p+7+rb`W~SD>YxZ5H%V&|h1`_2*Z& zj`Y&{vX5YWVjsWd!|BCH2mb5r2J>zS|Eor;GiKbK0ndf^qkH{=L(%>AJUhWcX9%)n zf%A{+2z?(30Ii=Qt)6$)q#x38$_jd!t|Fy2>gUnRz(*`)>J|0Iw2i6$`lQWCfV{|w z!MK@8xP=K=_WYp8mhQ6ZUSm`zpnezCO*iX)H%6yCI8`^e6wKqoknz|dRx6hpL7c$! zS&8KO>}al0m<*QO08oq<698mVT8-@t2>?XW)zDWJV&M^fRH3I-0fB`Hf|A8wt!XaS zO&!KVaO6g&RQ(x}Nu<&Q@C?7U)2=co#f~@lCLpX$60lHRSmkcehmClAfSsF#6L_yE z+e6CRX!@MEi6_?5!W^4eOz1T>-Qe&>Zck5Weoi&Bj-k!E0C;vfrL*4-P+Szn3Vi7X zQ`hXO_ISHJ?SDYMR;G(W5nkzG>XE5+LSx(h^%n(c(Uc^Wv)PVsR9apHsZhV^n9ouW zNl~a!wxlYHr!n{=lvnNa{WBeqRdqf7Io%p7?q*|iRvUV}^+HCGwSuQ0C~y?@(L}yg zEDM=sKG@Tk&c@Bh*J}nAuv^b%Y?#d%6LMlR)Y0>$_j-LcuN`afUx#eS{uKVbf(*yO zrQe<_2lM%wY04w_#vFV(l$_0~7vZBPB%3`6ei-4T`s0@eQWZ2qvdc_~a`Xl;KYc>$ z;LqrvDOjktY2+{8NRH4q?2QkD8>nDW7@|iA&wOnwJVjq2U%elQF?ZkogxpcUxyPK~ zsO_*nb_h^qoAnhI+%wcNlvj$yUQr9fu$F~q?F-G+)Z7iRqb zx~DoY_*_iOXbTGZEGOyGbP~eO{)Pic9BP{7uk&2?ZBbX^5WOfBI<~=`m0r!eE@Gs6C&eOXD9zqO4{btzy}y`P~jnQSsYX&Uc@yab9@P z<_49uxnyZF1E&XeI!(YaW154ycNilPGPevhHj8ckzretPV$9xp{Yfl|N z?Oy1EpC=={cDLfwXx%-s_!IK0D%#8@TG&q`pae9%nlE^C?~}u~;Oe@jX@B^V&y!a; zNjX4VL~RLWbZ$_Kb~A5JmOu!YZh zgi%Ce(-ak~ZuLLGc9d`!#(%g9N^%wms|XA(|GhG3AQ&z!X~S9#s`w#-lq!zhTK@Rx z8tpuA;c{1_1NmA8IoMvT>H~5pzLKSj4lQi9%VRl9PwB)^WV&!?awTfXb_D z%kX;@Dt7jK@CgR{l{|7DRlk}WkBAYTFjk_pE8?PFbL2rHmKUVar@#(U1tqLWn)$^@ zk60=6Y2c$Ue$^SNha4iwU_0~*G&?V{+40k3NS%qqLtA@MFM51E=omiJUL-8&XI+7a zS);Q}>X7~YH#0KI^~q^Aif*+XiJI|vr2^X_3!+Tr-q_y4Z=e@Y9PP&j>;aN zMF$zhzZcPRKIH6ZB4Jsv--z5hkz!sqdxd+~7ux~2LruEri@%yUAY;<6^8VSrR@*>7 zjDk=?c4WhvCdRUbnX$;%H4b9!oEoMMy_;jkokdiMilGWQME@%(%UirgDxg9v2U+I% zhKdP#-{bY>e~E6(QdAOPP=r$dvGGf4NzY&@>wgbU4(1!{#05Ee`US*sgFMkMQTE~gdVi_Ie(ZDnO! z!t#o<6I*ij`}%Do0g}Jqiu?AT>~@Asy<`#@qCf4i|*G%%Xy6)Gx! z$*;%p821a5gvVJfpMiN0&#{5rF zmUtE<5F={!kaSGl2W}7peuD;G^%&sR`$`N;yVkZ!VX?DerFTgKtK@(ge$2oqASC$#FWV7N1}KerRBDeA>6%8C+D1vEd;Ma?K@`>2ZoA*RU&!FRSyir2=1 z)~SLMuZL?EbK4VmXg0WU!xmnh52F6(+0d$5Gg8TjBNMI?boi$99ZjYw6qj76Uq*Iv zQ_wB*wu^Y`74I!ozrb)n=1oDEt7aqDumz&;-h?M*mNcdL%S?Up44&8v>J`92k^7Z7 zNJQDg_fI}Y1gD};Ph3V%Os|Fmg-o|*HCW{pG26jxT1ARmEaFj94B{BXWom~+xjqB- z;VLgih|g&6IP%`X)`$zHkGE}+`V?#X6A{#bG0+@}o1u7gxitO*nl)m&Jrr&JW7Nl( zf4Q4rmD4rR31IF`+z?eq18Irlg#JwYQT+LYeKd%^9q&Bk!DB2W+Kip*S8_wy`w#f5qr(N0860oX}@j4J=YAIjpQ=?7rwlWit__%1|4sOpBjW?;h&n=2AT z2-8B2n7Isj_Q5744Rr7X;NNk$z;vsNL}ZDa%;D1E9s5n2{$%$kD6l?B*T&MBLIK&d zHIAPmy5;!3`t&s$wyL|x%O&f4L(2d3R^N7a>I5Idu1z7JT09h{Z!rf;UGK3|XL9Y5 z@HAZw4Y>mrHa@|I$)hL)hO4O*d`?{swuJ zu9=P%!DH`t@@^i(7uO67p?-!>LCML6lhThccUL?C6SjVKFj(-hvahX?kFY9Z;Yk&( z#wBM0;=?hEIK?8nP43V$q^x{pXFypGBl=amtge(%ECTPz8jGo+QT{XWcZGj2+$F|;v&?`mYEXsL#JHX9!dXWg%TxDt^G1|{YN8qWXvOUt+SpQZ zb%F~QvC)Mz^FHP4FQmdAS|yU$<(dz#^Zk$wDG=Nz)$3(v*xU^$cH?%u5xh_w4d2OM z!NmxRqdraRUAOqiZ`-EAo+PBBN=-H)$hTG5i3@QSN2R2USKtd=gNtPAw+KeV`vZ>e zlyd39N;f!+*gb+8I|;l^FJ6723A|SjKNrjzBpyJPgOic2>w*ny;8Jw zYOMDo%{%sw!TCKP3R;_xj-55(BL(v7>cKsiij&39u~-=Kw~XqpIoFu}G#%t3Aku{Hqos3?yPE8Z16rSx zP3_iS5mjEzNii#U)N;dP$vPG;4k*x{?W8Uld)Coe%@=;ECz+ttBqw0y_T_wByGOs?a!r`UZuv1kGi@7(fEH@ zmQ6uV5mUtXylJ&$N?+c;u}fOeRI4G=u(6YC&}z9J0YDga+Y)?oNCofeJQ0D|bos8` zp$HeUYag>X!{0Ao(SYL~o;M4`_?E8k7F?-5@eR8qQcd6G`noBmWKeVjQj#fp_~Zq_ z3S@;GewiO_1F}@|1FExn+uDX}dQz;P`bnfRTr|an;E+;S0%>sdi>tLuqPbnmQ(c{3 zp!lBV-8|V~ysul>fiu{u_hnGNlA`SzT5-3x7D4mS zh$TbFmu?}E=m#}`0abPDlwS$&kPkh{p#NiYB@8SY3y$u$xZ;vCdlrmmrXu;!7qq6! zD!@q9*Np81qhbV@cn>ZlGw_vD$=Cy?AF=#OWQ05u}Z*vKF0l0 zSz@RNa810ls+A_hHp(OpgI6;t{D>J5s&$ZS7*u(x-G{daIpkUn5ci7t*FZOuxR&#= znm~ao-^PR>7Vkk9cpa zC_@*UDyV?pnsIx%;Js3#ut?NU{V>9F;on(K;Y!DfwV)Kr-7zX@nO5}GkZx|Kqa}`z zqM++qmo>=d04*jmqZM69XKJDu`ms*iiZ)V{+@a9)V(zS_h1@=;QWC@HW_!kVEqvNRuv+_i!R=pO2Rea zt3aNnrDflrtl@(=Ih1hn-fvgjv0eYV@fq4o9n>WK$b)^Uk=8$YLg!h_{)=KNaDnNT zgG1jNnLpuc3=9%ab6q8Sy>kU7baPg6HMuAm_UMNhO2u%a$9~7Rq2o2)$;UNvIJtPz z&sEWVjhD3S-e1n$krHEFZD&w_op*{}X3f#?T^4hGvU4zEC$a#d(*(* zK(D=I45^qSxyyfP@_Vda)45hO#zqmlxWEu1W0%ID{)JM`fO;OkA?I%5l+N_i2#R8i zQ)xeyl`(Fg0zr++ zAL}}@oezhH{|ZrB6v+#6xmN!xsCFImb=1OHN#k6&TZO81PS8AK(a%NVHb0xT4Rc5n z^i52=TLSa-c_=i z$V=MHkCj%PF=WLj1h*qf+uavHF0^^Ityvww2v6O<2IjcX;1p1bC{c6MOUlh!aJCcC z8)%th*B=oc?l4lJ^J@(fmKdRM+=!R-d=1~<{@_#Z-hk#4Q(ZGtof0!v6cWk1Dfzui zR?YQ`j<8{R_VA~DydZ;soad^Kt;jxkOXHezHYghVN zFX7C#o6ep6*dDr9kL)i+t5S*(ZQf}^oY|u1I<^Z~-Ye&<(w#c>jmd+y z94vrhEfsm}Q!1I4N5=DFk$)R4v%Nh`IDb}596A2%+62EOLR$VS)l3I%@6X*$yD?JH zd)4V{--4*+*hNQ7J?$0eC60}@=_Jxaa-J=v{818*IKE7r6#j&Y&X9_cgQAFDW~m9z zv*6-Im?kUby^>6vH|g&_Cz|8rwM5aV-!zv5)RQW6r0E^R>DxIC8Tq|E^?JxXm6ymR z`NIa+-|fN&)+)w}R0Y~{`fomL+P@U(E8*{KD6DW{i_tT*#~&E~J%PD_|>M<7%KyKCgBk9s*(ZqO?Tv?X^3jY@Ihb&No{EPZ%HX?k8PVSp$F& zFD9xd_=iCvUgV#LwzphJ#1}mWg?F#t*P*}}+-ZvWzgT5f^HVNHxFjt)sYH|t0Ribh zu&Bt3qbYSp5MQ~BK(~s4s9izSRX=Mmt5ol28zz22qM`oglDUvF10Kpj#oxgOJ2^V+ z$9i^VpI(1x06(83G`u%>PUqpSIVSZ_MZokja79=_BF0f%wX^2<*#1ebvzn z!E1lSq{>H5>wqfD<9jyIVX)+36qFFj*Gv4utWPtmJ7G{|G>-e}(6&8vgCaXFRs(;; zH~xW0F>w^!rYVmv?d#KKoZzx-CFX9%El5%N>x`@9!C9wyuaNHb3C&%je!fV#pSO5r z-c3{9PtAnw2nhP`$mLc!sn%NjE<>nNh_?A%Ko5<;_#WBT&}tB_B~&+`dM!uf8Mnr{E9%6VQe>dANB` zGri~mWQ2SLQY_P5F;bBX9ma_g?+mcKCTdCi=P1RkkYP(!|*bV=Hd?z<7lL zhDnBn_hmKf>Nli3q}Q~IxplN>!Av@gmy9t!eXVN8r$+rsuG)uFV)v}#riibwEKinur5YVy z7bMq8BG+89FnhNknc8cdkgrkB_*Iq1s&OGqid!F2{$zJwG5bsvZ~9KfjQmJ0O)=kS zei?o;HtyxpX>s>e<)TFLIt_ex^X+RHt4eGFILdhP}CJHnc^5HYshi_ zGQUix;$QDkN0b1Q?8`H+n3J3Ex9(KiDoJN)MpWKLeR78tR*HIL8K&D1ho(w+eSLNi zjiTOVaT=)}gz?=OUW~B0m;&~nB+=TkM@LbHVvH@H9r+zhTz>w$K&^9R@O}!=Px|q& z)0~G3_lex+VuDu0uS%1TjW{9xbnliJnLn=l&HCoEK7hApM<5Ut5jxQLC%J9hj` z{2cbvSDtP7bTNrmqXRuK(D9Brm)IjX4hSP|<@7 z@0e|*;&@P_hbt?yTS77y4(&bWO+Zi{N53Z#cmdCK(^H&vm!&CUE|VGCv`+dG7Hql+ zNsT)WHnXWdgxFF_M1F8JplSzY`uWd-Z%#tWTmoyAcDB6f^bgU4%J1O>{#fA&qA@$LVQJ{ z(o)1Q*Vd0=Nhg5xjAuP(Z0HMfCmbKPGPvE|x+vG6$82v&GDcR?beSRNaoalLc zTa%6TXq4MRu5Canu3Umgxyu$_vNm<@{rAz$ex+mF zz{-^4@e7T;NTygzPD~m?PhqV>cxe39-iSng?(Eper-@onAqmaB>(O_8>ML{N5W6;O zE6qy1wsLU|PQ45hRRlGcrZtk}bH(;TEzF7?MgRJkms~7nFN;=Wxv&@& z*rDoZ(*GESucPsJwedAqym$rVPjqohQr=kQH?%oZI1qEP2e`+-lIH)YS7<$6X>Uggoo?fGqf2)h`Dj0s>{I@U^=vBPR+JR?0X zT!9jz2yaH!Ea{$(s^!Eljrd#&b@b8F*pV7z#Bty^?wa+95*$f?8p6oo_-{tg(^To2 z>EOS^DhaY6!PElHu>jD;Pq-A*hdJX^lj9I!6^E+;653FWe*EUrl#8iM3z?7}H3Pbr zjyj3Q-8HSvc#5RIqW-*EyYGCO{K<7Cgvrogm=%VdjJpYG10?l^%^DT{;|%X>CkrJ% zg7rEgOzcKKSLz{QtF94E!t~pt_bNJS*%UWK??2;k%H})`e~0Y-f4qHVTU^c3E&+nO zOK^wa?(Q1g2|Bn0cXxM(AOpdJy9JjF?(XjHXZHJ^{p|e*&iOWL)-~O$dTLeQU2+#B zM`l*(AGG0lHEady&F|K{+7>h^PjF|fPFl#;c~VElr&1&ll?pl7GGg&L7Xl~%$Aorl z-``R~kl|heN+MJ}`{v(Us$f*2ouCJB#|>xCko#CbTsIUD)#%4gDn^>WCPQlEkg^ixlZhcB<3!$lUgQco`w@I;S&=L>v zXtf_5QVKmv45!h|(pT&YcCf^3U6E@b>L$)81SsyO6go3IaXG2tijygEX|@6Vt)TXIzY@aCh-gZ3TAjz0bAeJz7h9`BY=&6QJ+rQ3JFl&IP=W!9 zPKP!0h4#Bfd1}IVmdu`qQ5z|J&qPJ=)$xWSi1MPmY?I8~JV8uWi*<(a0o@ZS#bzBM zbi?DigmtrR3idWx0lv1=D#{GYF>koPtjxB#glBM$;hbZT;jBbv&N8MVM;ST>FBkP> z{vcs7dQXP>ygYFQ($FA|Mq`H3%~Hh0xy>mck`h*-<2ijttTq0TNr!s^vo4B{2LfY5 zWrc<0XhUTuhuJevWm2VghQTbw`^yrMuqU1WJi2+r(XkHaC-s0^ZibOikZm(HaYwW= z$rj9AaRu&>hZ9}qLmQ*unjRF|Hx|km+oEZ%-eG~h;wwI3uU?^Kt~xz++oUXmjEeip zET1r@{sd3>v&7tL?O(o5MxIU2_15}faYym2f3fx{c7?8M)N^?aSe+E^=Bo20r^$os zOGcf)Rzj`0>Q{58ywq}Xad(_3-wFC3qerkC9x`5Dekgk?Zo5z4Z(;iEE7^Pu*dM@KPGoLGdXF+ z(sgTzQy!M#4LLTPZBJkd4lW7U&Xu~)iHOF~_-nyIn)r1CC#6N}z*1~`^2n6z;Jm;k z3qfv`?E*0MxCm2kj=g9Yugq!(u}Ir7ZdM=MQhm1Tww5^gc&fq}E$D3xF(GCg z!y(1P1HMwWD+H=5rzE};SBAFyK@;rv&ox!D$bIort#O32Dz02@+q7AwFH&+6y5$VK zeV;@1I?vP*M%t}8Tq&wVv=~*{_lmYR&THQmL5gAwBihLH$%hr`N2>)cnEn;@dQc|G z^l8KAa|s=Dv=k4L{VfomAFS~#nDGky-pL^1=*Fe7^EHZC*?BLf{E_{Vx%r>fVb_^aPrsP*2DGFsDBgDyNDfW#y z2L(GmDYHSOBCRqffT=T(=r0XC2hpx8DJ=VO3pg@UppIKg%-CG{Bw6jvZW=VoB8$?e zPdYiPx8sX=o&U1G083E`6kG$?t(~>Zix$Sf)XB z7zL-uu-1ltf|z8!MBK)5)~ie|VY_$=!lNnHq4=i~S7dFSaVPJP@uV{822rU+a*smh z3(J|Gw^2tM=x1O@eNr|B)L9K(E(bCSdQC{s)l>+PTT83nP(H&_EYbgD1)WWue#W|Q zYi9KRd1#2tH=`09lx;O*os%wO5+w{5u+2-$G`O@<+{}&CVF79H@+ejipKx*awv!$HzrByHLfGSp4)O?#_^SHhM&Ez-y(=IP{Xd7fu zX@=LI&R(_2g$O@PIUXUqnrF9FN{>RGK^@d9*ret6QP7jtN= zaS}hc|HXeItnO`rhd?4)5U(Gjb%zd6cKJwNh%00YF0v~y7AVOn}7QPRIn#~ zp;f9=>Dc1N59N$Z`!C_&q8{)T^`_*M3gFphT%wCBusLHp_KA=)yBLDb5Q@1;j`S@@ zZ_&#Gehj`8%AE%AWsJ(&aNghDAtUv-bKaV-uV$<7@zTn^qoG^^=3LNs!hDG3loR>6 zTH*!rmgn^YWLNr7a^mkzRy`?azR>u!Xa%34@l4H9-E!i6R~CtM%fR~Hewmo4j$P*V_$0Y8&DX>wJ|C^kNU0_vkWkgMs8NWN5~{Z1A?s^n5Z`vGB=$$; zDF47%#w`d7`+A3!*(?1s1`})AWOYqA2#z)lgmyVsyLArEl?$jVbT)&IQtgwJc$Mt%#Banix&xm%m7OS| zPSI|{|9W)T>{3`{Ytj5A@k%p38>nD46hOsPNLi9bGoZzT5UXZjUDjjr!;A{lv#&X% zgeCuk?hvHTkjieJ z{wm##(ic~3bTta0F!e1|Lr((>FC;P~y9>CJnKandx~azla;asLK^5O%OOTSi--|xn zy+HWvvGc-(RYkvAq9uNt)RLFv;EPn%0)n9xe4 z`z9TB$XY$l;rh;_8M*3*x$?@`y8UP>iFcf@-U^;6lE{85JW=XNmJX(F} z^q}PIf;0vIa~YTM{D+nEK4yPYgrZCYh|{-3dGb9pV_k^f!UJa-USv29RFBTT zeFtDMjY@KuWUN(v+X=BY!q{=L(E|G&e(^EgB*s%4U9{aM&G&4g#c|KQSk!sFcQu03 zpnJOMjs?ue*t<`29}$cSDZwS8`@g+yB6f>aj;R`?-3m<#GzWdZsYiobu&_j&v=~#< zaSRYAej%G?1pA?2=&Tc)#a@BNravN<%=9T0lOI0yM^T24?3LqExKu{id(N5pj1EzC zG@7=c!cjJpAEvvNI!m}#R_~0nwhJ$zFc7B2fjFqbexCkaM=b`e<*Rz_HB0M=(bH8*eh!Ui@L)HD5ImbMmOB;?`lZKQkpU(h_OU z+}CgTMRIqltK0+`9SAU^3@uOSh&4O(LxoOzI?E-$BW;PRqoG{jGprr)$n}R9RX&Nh zGPjl+z>}2Vq|^B9j*QN`sQW&{Rx&mGv3?f$Gujd<%{J8$awBW8yfdr55o4@K4Y@>5 z3V@_1A3S3fHYLK*|Eo2oaViVLc)|8 z9~|IIPsww$*#<@^OYqAbE*XguhYKy8@9S`y4Gf-_JK6mi714VmH277CtD~-2eydou z&XH;rzY3j~bKtaI@;(LYNAPM?TNkX43uQGsV$7qa*fJhmka^)r=j!M~UDnFR~ z>-N5!vL}=oO$?m{h7f`XMf*oMy$SeJ5n&MJ2DHo*?Xo=B)0JB$LIeRxenIg=ShF@h zCILyE522D?KzW$sIYwv#m!d2ex=`6Fh9;U1b!jXU65C>0b>hFRH$MOUK=ByZxlfa48ioIdLhq zvue9p?!_9j{AYjEjnq88MUIG)1sHZ(M6=4rIvyaT@la~c2J}ntMs=lRmH}cHOKlQ( z`8jp^bw{i^e^%HhLW$|z-}OK7U{E}4?_!M;{NAc~i4azX!%ULjrpk5(2OhwA+fFeD zuzWYa@-Q?p6I6zh$Gyq3Y)SV&u!LYk!Tc*}m{g0M1bzVlowiBu24pVzBq-p9d;G1X z0uD=mHi~)&&GQhRghJY_`vK74{JTqZ6XO_uYv~G6$rsmNUKMprv z2_e{5gLCrGG$}gsVdTMibc3ORpZ+jQox{n-1FnlJhJ+CEj{S&1obnRMp^Ycg^1AE@ z7A13o#}da7Wd_L-#^gp7qhzp0gP*WZ@=zmSb7E}*N|O&HIEcu?Dxqs%!nE7 zzL2vhO{W`Z+fbqM7V$EuL~Y*^f%WyOBY|eAnNil{7jUG(%bICcMxa6})@iHFWi&(j z+k6boBVV2^RA`KSK$kgW9>Igzk2LUEZR+M16fK~e4o`vH&&c=rs_;~Fr zF=*aSJz89HYG}z?eU_!Z*%7%WW_0P4DQiot=~Mi-?Tgw1Z%W?p z76Jw{5WI4@Fw*4Dmu9mxxjU|8hj&aD}m%o-YN;F0* zA1!mCCHXmIt=82U&uA(NXAK?{(3evq&Ml-V*)+D|H>)nc{lWSfbLVYgz*?D*33jkZ zM3(0$L&U084d(C?cJJXGvh%P;Vq&te2JT?;@J^K}H!feeU*`pW!hC=3y=|OW9@MF+ zyu!M@Yn;dm6g~W6_X48lw}DB{+;w{yfw#QC;H(>b)5-1)teoCx&HpZd$nwvZ#5+1 zkX41oNvg&f3>4-@`wEv_?WE^1#&sT(CwoqENO(BZB?L~rQuvV@Opse7Uc}yzOfox+ ze(2yraPZp<9LdjB8@8gXfw zKZ>PcXAcJ9aG&&6YK0TF_Q0=1ALjI~{7mX4PlLjk{f%6yM$j7AaU}sTt((F7st?t= ztp7vr=YS(Z>GCQ|!ikmOFr|BgYnoW}&nZ?G{-zVI%4-WX1edp1q}VVUfjt6cuDseK z_4e9W4K*Qe^=yUwZO=ucvtCyo*SY?kvlOVduNFIsTo>hil}q*9%a%o5moocpvp^=- zGI@hz)pRSSvz&bg|Cg1vMw|BIIagh^^ndPN``6#zEB^ZDS~dN{8K`6f{?-3}H8xW` zYo++FQU2%JGb@F!>VHT5*R^u8YANUcouzGd&r0>Uq2%AQwh1W8<<9@-DN1F_X0ra7 z=bD4=zn>$I`v29WM|@S8oZk0Rgm$wFmLmTMQZ}ZhrmLm~UX+dbDuWe`n(CP>H>G;( zyW_}6r(gfGZ2$ADk)xB7ljE7}FA5~Ly1I*585y>vUz9y{XVy#0oc|+E5Mr7snjUuS{a-8Sl7>8P#U&y0|7S4>*e%A-%llSixpbTD4UChQ5c8Widc!Y1a+cIi zh`lZEpcQO%TxP?Cy@o9w4E+y^RQ^$h_lwj_b_<-GTz0_jPbkzS<8VoT-^;+OD+|%F zuCA`R3QbXN^EP)IH=g~<_ztU~#7e(m^dTidryH$xx8ptK9+%WL!`6vt9J{Wk_v{bL zr@sFhTUc}1>Mwh3#??$Q-&yhI5=bgs?c*DL~@ZC86 zK*_-EiD;$~a5H(gu;H`f|M588t#m`J^3Ok*nWx&JH=|@8!|ML-H1P3$A*iDLZnROg zWV+>}?S2wJ(f-_ZAMM)Rei=hz?=-kx4-h8YoCh80OnA>kI;{Ga+5u|bz}2%Iw=vNU zqyF92_toKF9@?PXHvDdSY2JN?I5z}N_?!1G9O@>rH@t@t^E=OmAN%|b?{jZJ?fX~v z+2JHF_vsT|XRm7AUh<_QB`-woe-fEj-J*Nv+-n}^Li1nV89o2pMJ0Fn^b9j@v|3zZ z*KF97>ey(lo@C2U|JxUE8(RE|Su<)@xTH)aN%h`V350LZGE1u8Za9o30ZD-rGlLTe>o+HiN zz0bUZxbga=+-(&ftGVXXcafj*uuA3K>9XMC4#-FKd+TIgX{)_XvpMHnxoBGO`)JvW z7w&wo`F(K`zj2~tWB>ks=O0KE_p%Cz*XuZ%O-2>s5IWGAFz~s2*Au)fB7C1FKB4-! zJ*Qf+e?yJN?b%VT%)Y5S5OA--wRibD;b8b#kHmfcXsE-nNW=ZzVkw#MqHahY#e>+; zYmexT*?s>1nkqIsphq(`^BIH60QY>)N8nmk#m5_Bd;Z;du#w}e&W7jCV>0Uc)yf!i z>n7mIZCF#f(zO#U? zhiBaG^F?35 z7^jcRy@_@BAsj!nHg2wJ^xt1x z>)pM!(1*GX#&C^1jz87&+0{JFu7IAW>cYQA8{G5mhs-dwJ)Z8knH3?Sa6t^tuU~9< z*#3n^LcuapZGU)0_1*4ZaM0uT9uhQkJq;InNWJj2>^`sVo%?5pL#8_;%98hg4lXsR zfGP28-Vab?7IKRZXBOV>H2Qn-WbpEmsvOgYQ_pR+@Nv6qy?E|0w#?7{R5CW0ZGXMH z^Mu(sK-fKX(Hx;*mnfPzlM>)IFX+&1%iL8;3??&*4)y9qT zTPlg|(p!8VVqWcA1%zKpr(f?)z93?r$0G=X2v+){<2- zE{{h>t%uH~zK0qs4s15P=k2!Z>uNgf_p*G8%MQ#2LP8xc70Ub;`D<<)cN(Q#Z=u;e z!DD{f9yQ~Cc9z?P@`dkOI&nbDX6*+KolYb%@zsI<5y1T`|LN~vZ?MXune%H|uIWCf zr-~Ci=a&XXck2d5sol;SoF7kli05bO4ni-th*Cm+r+ycLzTKd)E7){{*L++dB*%Bp zbTH#2H_>%(G|I@j-y=8f`@+LYs{BOD_k)}DwvAeV|Ha~cMOov+>**`=+Nksk2zFxm z^c3^Kd&XP%y+nWW^FfYAhmP~d)5m*mmGIqju=3k@Bh$%V%LIKJnR5Oc8Y zgvD}=cxEj3Qbz>}V)OcQqO=_G<;uFzk{^4((EII6s!frz-r4i1;=;Orbc+`EN!@;z z-i2Qh_lC#IT65Ob<684tx?kHk^~zb(ovmwSv0od?KNFevjg@5E>aK2rtBO0l?vC?c zZ(#@i61=@VY%?zpK6a*2t(hRJA2rX7#uSackX)M%}rZRG(G8bUnv7c&$43R3)#+pnj6qonSOgE3C)4&7kjH?ROTlxgFt{oe7Me!|R2PRyB-% z(#&Ni1ohEwUwb&^u|h=3M8c?c%oBJ=IVoNJE(b-nMah1yYTGl`O5=NK%h9}%bxn)g z9=@97Yw`MW)MSe%B=u(d*E&v3<&dt>pA}>UatY;apj*@TViNGfbkXs8x(b2aH;Q>9 zbjO5n%56nx5C6$m*`_JURLL^+BC1d}CyT7sVDFD4;ISp|+%V+c#U@YU{bxwEu|Lh4 z?Jox~ENXmLJ)0ZmiWNz+0B;A$iB^dQ#D*oG3?&Ib^Fq*5FjvHQ&L*DuN&ZBwWK0&v z3{dy>^16gXyz6(aIUOgi*7^JvZF>|It{sEg2;7#aF}YSR6+JRs^eK0I_r95|laBP& z;+8~2T_EBjB?>18(bh{ZJchWerZ5-6Va<$X9!0i5<{t64hOADjFi#i9-po#{U%EeC zK_|`N!BIjTjtVlV#MI+@Ime?<|5Q~G-AYMMftNxq-;S?MK$%?ngh68J%gzn+;r3GB zr28m&NM#pE^eyiht=L6@GFZM71}7OZ2QhdQ`t9>R0jScR@eKLE1;KDs9I7l==5i)S zW+A3Tcu%>6%uGHwNH*NcoL)jYbyhlYqzZ+Nke%z3QQ=ZB#ILL0S|B_-vc!m#Q2|PV zERF}6+{UuJ=i4ynjj&AxJujdFZY}&?*~INNOJHOzL`z2b<&s&3UoV=n2$WdGpQR!O z9~T9d!;2VMkE)1pdLeAbeUoH%!kX-)37ae4ZJ_*HCeSXY^ykUH;-)fjx&G*DHmVP? zmTMmyR^j@F)=2d1$)Qmgy8h%wF2?>cO6X5ktvZ;-oV2<+yb*1hK3`H4$p)o~Ag4@p z;7RB)BN@f0ePvl|4aF77S)nK~u`)m97vloSR#k-|2L%&4{_ZjSRv*Hr-%$u<&m)~n zx*b?0gie+@l4*d(M8_O0ZGa>TbPf=y;+DtxY8m5X3ik*{^k+qh)Hqd$O-n;U`V-r5 z=FfV1TFj0x1LGjsoU-dLH*A?mosLSg(T-um3fuyoKx45u4leHuAlZm`S<_`o%CO;5 z%l5UaH}D~n%cs0|HURtb6Kym_@iIH(t?sTkVEhdW+2l zZQ$Iy#>V+tA<>q+8ed=#SR>hNy2HK;G4>6q_hp{cj6(mN7Ryv-1*LjwN68LS(6KvU zRwqKKO?dLnhH8sn%&;woIfj@Wwf$}J6uHXn%W4dfOHZzRTTfd=f6c4kUTd5_Oz2|R zH8xi$Ut@;rx1r|wIv$v?S)R$1Lx)O{qX^73Am!s1B+n@eOW}n_7Nkw+(YedI?V>Y< zJcvmyGWdxk^Wq-#KGTm+yB^BeAMSG=kej{-sJL~YQc%x{wy$Q{LkC3dub<0bY=lf} zg7fE;P9W@SG5Dr6IFrV+=GKf3as{XwnR`0J1t^Lm$-*RA+L~1RSOms=I)Nh2dfzoG z(#R)Q@e$dK0LG8F1LQ>cn@i-&9=IC^y0VsJ)@j(UWyJSP{kRVdWDIeUUc+nRgJ^{C0dmQ{;cMy`h z$|AN-)T+&9I8@zPh|Haths`6_bNXl3gu<0=AJ{ZSrA?&gRohjaNW}@O|SL438 zp0Uu9bfBOTTEl_T@1 zWFAMI`coTT{-5#Ol|PL1bwC;FhpcZ}TvQ7K@~lqN^sA-VnjzGd78BHt79CkQIc1EW zd2E{cE2wsc~;_@_aNO?^aj_0l7UA7K8e@Mj_akO*z6pD5()P1Ze z6?03MRb(7`;YjIE30RT-1z>L3x}FdK)UdMuxbN|Jk6^RNo(Un`1j{TBHjVG?qNjgv zT>9<(qRlFJm2m2SXa6|_p44jW_D9%vFXI@R|LK4UBu! z6gQ5g3p*R}ZlZr{(y7p9pB#0G?%@&;Bx{M*PstMBK6!^@d8!(ftem?=5htpsbqpu- zbsY@^_ZCQ@51&6QA&DIDdw!l(6daP9&kUHJrUdmkNl9ke43Y88M03!77b9e7^lZaaaRx%JH^ltxL7>VYURbMnhqW$-KYL~M9T=>SWr>oZvVHi18AyGMWe<;OuhQbdg?8r3n_BmI+CMVqI zYH~w6bH*ve=PyY3|G|d>7b`D6sCX?^iDnjs+WKy2{e9iabgq8BT}osM__r^ha)M|Q zcDGGvg{YTwpbNx)lWU>x1*$~-c>z#fS@j$XRLhg#)4w zFF^w`u%pA61>Nso5bX+}5r0hQTusWJe|smr%5p{)xy1E&;Z!DrrrY{D$MOsPbk8p( zO^jC5VLKNEC@f1$R9P$_;gHy{mw_qmch~fe^&msU15i0o_>)F!2pOa)zR#JkD!$Db z&fz;!yT`*XABdzCRe!O?31OZlMUKX^j{>-$;g^SfLu7@;Kk7{e6>jX+!)H= zphb>vJ0>vxD8ou};3X zVVWPl_xO#VsqKTnf%zTw&Vl}PM?XX>20#xF(<>`Mu?@#(08LfN)biOXf^YNKPjGv| zo_g5ae|%b{xm0m-DiTpM*n$&kXv9TTU{)l_sixM-=el@{N;xLq19TWq11t2@WITtn z9vbU6fL%n=Z9?7@noZU*XiMfMouN_#&lqC_o6rU?|EX_H&Ufe%zW67CNR~mEsbK`W z;C{+`R?)396gwVB#1Y11Dk{>M3BMgWi6@&aM}wFk)|(zR(}y%8WRIvh{oeWVbEU#% z#of>Pl>RSw1i#NwY(m*flvVj?Rl}6s_D4~GG_54~sJ-k78A#;|5TuYSfxP0emnm(^ z+6YO|DL76p&?il2 zkmogi(SpveDq`x7jrktx2x`j6YV#6fw-869nn^{B*4#n!l))9k-6bIjYQhS!skwBsGLE8ut)~vpx_nQo|a;> z?`2-iIk(n>p~JL41Ngk8j>giI8I5Y+u^0Adrg~ZdVE)f!5Ng`jw~;nerf^f`tUD#? zBPiyOcy>8dvlPST*WWrh)9lRr?Z$Mz6N|#WT~_z`u2X?=)A@BN)IG&4r9eI6!3@%b zLl#T!%wRJN0z;Urj>i6kk@%EY9`*#a=eY2dl)OhduJdRk_LtRQ0t&nDB(`}j$J+2f z)J&)Tor;>L<4u4dklE;A{$tQ|UGQL$H*VUY7iU&e7GlU7 zbu#Ko;9ZvNl@`b#F8@b0!@$zSwAu`xe7pf-p9a9n+C)GL_5_+5fo-@Ie*1q{toC*8SS3ngXdv-go z!S7q&cq7y&Ki(rW5on{NSATJ2y3t-!b<>k~Q`+Tmy-bB0WAIWuVHUp8sl3r>q}w?b zX~S%VoZ}*K?+2Bh+icOCoW;;g z;KP}U*GA0rOU`^jLtxDGg+cvp(0-$`HWjv9^Tut8-~6W2V2ACupSPxHRZ)=s8Skth z9FmRDkHCmHa$+`K9RK4+6?i4cZT`#3M&o`9REnKp)#vIPu;yKjGA`4EvCGx0AKBS3 z*&%f?l-@%99k7!#I;ZP&1B=QJu=2+EEqDG@6Ee+gr^cuDn7AMPL+*lqHqZGhuzS73 z9-Akt|G9pt4_X}Z!DFo+1$qt)g$+n6h@B&yzc^8+>A(You*<7>u*VOfoEt* z(@UgBI?uKA0=qfzj&%;Pc_){jD9uVA5h2Bd?BTYNs`) z?S>i6i|c((8p92*^DTDfRYWk!rBoA%?)_7uxim)@fd&7Uml{H+iTLr(oj!5zrWs{h zKX=a!{w|j5b_!Vcyset;%*3wwHHPK8GG~(h&xkPm7tNQ6()>`d=9!0*-B})|y!H0H zHuHq}YuxeHsUKs<+XRfhus<)o7)eLwm*zBMY9Jw77)oVosF$x^IhCNb9t=cU2kE9@ z(4IHk=i1)lUsh1^@!OrgZ(`CQF^6;|X8MNvo0%;g0Eikg@t5JY06X&B1Z_L7iOQWV zo$H~9)j1w6Lpsg=#`lT*8Ba>Dg{}KjLs>+b-TC^wv&}q&fVo2nz4vJgDdaCN@43E1 zxtw*Uu29hyJhL5qXa@54O=x!VP9=s=O=5ETHLl_>MMD|JN>4{jlu&giqx7I2sT`g+ z7qO#s5vijs^ZfkeZv;)j7D`8ZBqOxQ_`q(S2X|?dQvpn9AUY$E|GE?mkl=xTEyi(% za9lD{3~)M#Gx(Mth|<@ zMv!GE*WIajw19fb5qyotsizX{K6!3I>i9jviG+DoZR;@eq*LH+1#@y@EeP9~G%f zoWLn!IQ!E>ZVi;m1{w^{+n9ybFyLsmk{*N+p%OKEWw zvB&oyu)j_tL`j|lTv+8fw6OZ9dzoJ8$pzhY|j_)nn;6^zR1nrwQ z(@RiL-A^+<`dQT+z$mrtO7=KUZ0(B8wj-UN{kHD*{E;46Uz2BTKl=z9{MX{7y|iY1 zCxnyaasCic!;P44e%@kx2XZia!NlV^-urFI-mzWCIU(rxwuT53zcztjREu)}B07z! z{Z;#Ou8n5%lx_Fv&G}nuf@PMmZ2v=)&tMBX$aS<&q?7btqdQqAjCcji##SwjAZf)S`i^qvh`4`5$X zZ8P{G$1Y{whbf2JM3=iu7Aqv{+$I9sJQ(_W(3M%cdA4odI|VH@MuA+(OYt#kxeP@7 zq(>x|-x10RHgJ1%f_G~(^9_Lb2Y=_&`h`ACd>}3nfUeUydm6Xe@BFHWNUnQ!yQM?% zD6uzgGM7zG{AUA^uVInmk)V>{8-euEIgcscd@*bBwxCDsXGuq~2ozjO5TnRK3+Y1R z=yDSz$RrKMJS_|bxItub8Rg{yM8$+Iq+Lw6GwZXFMT!_uUa4g8r{>1m+jY)gR}L7E?Fj?{*`bMNoDCZsxGA=m(-Urz;ibCLPZ)?U!dSYKv3(W)SbmTRkrK zUxAF*>7^=#jso6i3q+W8zsVk(g-lOhwF9Hlqp&5{t4e$QWovlc;wK9Ej+?eVj49bo zO+p8ceb)ZbOSP&NABwN8*LKA6yYYs87sKsBygw!}dHkK7UnUj|5Y#Fh@|@4|kq|-!V2x_*B#0Jzmd~!yR~B z;iSgjgBPOC#uC^HtKw`wy;F&z43 z+XnF4TA7G9%4U#3yggLT=dQCQMTXW2QYY!5SY85$6WO z|AsAx`M2D0&=kZPsGfLJ+|TPyaNNw{F~@n)e)qdJ9;yFF&W;WNGh-$Y8OX6;&v9JO z!G|qpVf6>c`yt7f#`K5FM^1kT#nHOLW*-1aW^DymLiWP+T8L{Ydgy3z?*ZhwU}csz zh@1=5a$}dLVLe)$*~+eEr2qa$2uMpks*kl3>> z=Kf1=%Mm(15%Xd#>qB+fo*2OLt6PfZ?Zj&HBX7&8^15$H$chGQdPabrs+fi{(45+j zfd*v};ttV4Y3xF;uD;dj32UHVjn8Ig(OFvCOKWTZOhV!~WAGzQ`?vVVFbSh6>-`n-y{8miR@qEZpC6FHI%i4%J40N;+@w}bP%_(_xuHXj8ol3li0 zAcewlDMKwOkhXU<#ZMsJROxF~3V~+d>qW4$hC&OoDqn@`h`1>i4-vNjayfw${8N}8 z49i;}`iKs==!faHdB-iAz@*iuzTn7%w_P2SvOop;HdfA~HLcTEOaPjDwL}ogt1uz& zRN1PBci9N!9$q{=G`?RX^^xf)wq-2`Q3-Hk&-|?RShVA-HqBd_d)*gZE8*;X3u>mz zUS0EInT1a|4`%zJ%-Z>dg_z`sYsUB3^wmEFvs^(=S_7q7*JCWjFDCFe?kfMDIs*Yw zTS-=2B)k{Fug8kD0wzul6>PvMXu6{&A@j1UC-a&`jQI~`q_a8keFXg62G3q6Uu*s! z57kOKMO@1ro+z~p@ld)IQM;eh7(q+hRS#Ou(Ic6JE{yN6x#|~WvG%Nx#mq7 zQIW39JZi)QF;phE+9CoDZHfiiD z;Fc+e#OGitSkp|i5c83wm`o2SnT?!&d-vQuXgHU5=!Tc9(XV@%JECh;Hr#pOZr5wx z%TJ}d_&bHx9 zl8{4T*!NE6GScpk_5me`0284KiO<{M8z87-NGPTvtIQ;6x_}(y#9iPY1HY%x106i5 z`o&w^Qp3z)655=I!@KnrQXVts(_b?&@|xs7_>^q$4+6OI(Ge)jjPuBm2yk>u)vQ7N z(AZ`P4-(~?zsAlZTeXfMuG{HV1l-Ab)BUtUY1Q#D)yHQ%BD8xxF(sQ;yF_*sK!H^t za+qdg1zr>OvX!JvM`kgjUwR>cd;gtLOzN0~H(3YWEGQ1%6KMJ;ZVMG#9#fOr?3$JC z57m#^=^F>Jx9*hKQQJ7c{IS%RZ^$BMCH9R|@)o*nX~-D#mGY2bVY{Uqt!bH!>b_wXnr;a4YHH-}Pp4K2BnrK=NFG&RiAk34?%Rbt)u`JBlogqVv zI153bACH6tA*^y|9Pgh0@|6h@ez3O`EEN4@hppYGW1@Y#!V<+p=^P!K(O_b|q@e6&3k) z#r>OE;dtc~ah1uh+1z_K4jOpzFe2Zvjd%w;q|7pX+_0}|yGc;Y4yBbv?Z~I2kY+zjAhsu6%-Kcs{ZzY-WH#p(@F1kc_=V|k-(T_b@7M= zOaxF5Ptm3VqnHWe4Da9<)h3zqR43|4XBLEC3jz2_W`)0?RAqn{7>1_f4a1&@i3#vJ zU(kIJ>;kO?iv6{BId4Bi9ies%M{UO4MLcL55vENy1_UVeReWe1VWQX@IYQZ`cubSn z(daZ2!%2O@|Ao2_;3qSY)oYx1SQZJ6d!ID}cZka{^?s(XkJFu^hL-9Jn|VMxB=R-SdQB`7ID)yPr!eTqXWa=vDO%u!c6Xb? zIl~;_GPn4sxknm;S!0SJvdGkEn0ZNwEv%&%7!ZEn%=DO_Y1-0c;H|-xtn+AVa0vpw zckVz9RG`(01r6JHcFM$c)zJ;&Bh*9_N>UfIIqWB1{9_h?f2X^aM`{yw<089#I$RM-I&P0NLN6URxQCC7aKbt z4|6%~Bqxc`d?_Ps#Az^tY8p%qG+H;ZT(w;IGRfW~C~ea%!54iaZdF6-|s zXGz7EXVh@{w(BgzIt)E$xU|B27g9|RSj4NCE^gJFK|<#C(BtXmEO#R3WQ}sOra(VW*z}?;>q@Dmz2u_>jEF1pP6?%L#UibqTtyX&B&H(C544fm6YZglSbL z87k&JxDi^C{iM@C1?s+zFMx$#qj9;B=H)#6Os(_SjyO4B&7f2c!p7!R`#Qs$?1Nc) zYlUg(B(C{d2%KeUR7=Xiq@C8dOyt4!!-UQBM!urJ8l;|nDnS64oNM&-!`7VilS_K^ zW34*ht8Z4C6b`m0E3^lIXzim}4OZoi%eTH9F@L3hv!zdzbz)an=0h7;?ssbF&G|GU zUKta=^-k_ZP{RIc@*e0)gqe_0>{OzgI!bvN>h49`S%3pXXO&dLSC~%M}A`Q;jFCG06>4O zxl3x(A{w5Naz6V7tR`Om{u@h{8SRdg_~wGD9dHfaI)w;83BL#6q6?=&8Hc|@-dSa< z2a$DA_$hcIci^Z}FOUqQvv&?-s-JOn*JBABfA(U_ZM{&>xmkabOgpuUicHSjjM-|t zYBNC$<0-H9$49+6geWD7#671|ouJx<3C<3Xke3vfSer(3>OJmp`?_C1g9}GYZF?mE z{erTUN08liA#zOO9QF(von;A>=vYyve`(U`IQ} zvMYVi-j}`i@FWL|F-}`paFw=Z%J;&OzAY6$GLd~s7`^I3)NWs*@K1G~j?ZZNe2S0s ztai>y+=idx-&FE{0Qo=$zh@6b;xDK$OZ!hz8Snb%DPVEDJFBc#_H&>FXYWq{ZI-TF zdflxtR*^8-m#>xr-wU!-15aHRpQCJw@@ujR(ZzL-$Xcrsd` zbupz(sE#ukw8|Qndq0h{AWJIV3pi7ZbD91dyc0vMT0u#5Abp4}M z2)bc`bAXIQ)(|SMy2BY@(LW9!zt_N?+w_eLxVI)?_2ZM*-aLNpPbXG}h7zBg0og3k zc*8#rzi|N>;jxLsE^G=+Y~8Jp9E-{Nl-0|4$S~(xQ7P#n)`djAv=@o*P=FmFgJK`f z>))pI%ZFa~JdM&n8NK9Qh*s>1e-}J|_#^xFFBk|4-1^IPAH7*chS+Juo=O27avQ>q z9gF$|&eT&$nM!m(>D)_fupe^CS=x@#IO;7$v$Q|zvDuCOacn}GhY9KKv%}5IVFyUD zSLtb(hE^B|ulcNZ5C6kVVknfwSay2#pPT>mjq%(T$(ecmNAExQ z+C_45e&B)0Z~bKSTPs!0(9i3^j~zF->x@E8$cZ)gk00~XiGVrz%GHhkPsO>*=+Nf`0X3+zM{rWGVxJ>sRi$*a!gE^n z9rC&vrz~mAVPB2FM`s&%w?~3AP>F{W9~tjV6lI-J2TJYiq8>-R;tbI&z0J^n9k0&P zQmkQo<8frb+o5Apfip1=O^-a(<5x2zlF1vZz5{KRPH?7g;ex*HXF|7S^;#olq+H%h zUODq4d+U?)w%ZSU_@w?f+|oS$n-dR2CWm+j^UTfrww(bL%bN9GlbDC4(cuBCazCY= z2MTbmUZ&%lP^@mt-mjar^n+2frMvxMX$SP z2u_|o?d#6EOb)96!Dd&N<+&)u&?&bH4Ty7)cA|NC|#VgLXT07*naRA^CGY_am(I7H8+ z*EGxpyF9{vfCy*ZYskJqkjB}&CXF-T)`J&+WQLN{&@$3E=IB{peVO4r2@1Cp$7@fF zavGw92Lyut&Z1&l0y=^Aq;3l^Dhf*jPr0YO9Vj^ov<`w@KztXhjADuz&^ghj^qlIH z_Bb(H<_&)4gR?&Kk0Pf5U`}D20nZuG=3r`;2F}6|aS_faLy7q=HG4pWPf8~DRPX_@RCtX* z?R+A<1HBf)Nyv|T9Xb1R`xi%q+4lLv-#uaIA2sfwHK=d1$~leG!#lVLXHw7m`J;w2 z@fy$oEWx>I=_#!-+0w|^y##&)*nHrFA0D~j;Qo07iO!?|sd$-rmhS7u4hV*%&xS31 zj9ZFI(f%B`Owh>>O9A7SdWNOOR4XI2a)NCZoG&rKo#J&_QZh+Bf-6`@Y;i*`dGoAK zFX)k^O6FBejhMAvyVI*f=>(76~<|LQ1#lWYJ8(P3^ z@yJ*`Nsdp}NyK3U9&jEYb<)@g@uC7s1m_rPv*9>iEIxGB>AOT$%pW}Yq&esB-@jR6ZXz%Wo)q%Xga8rTOY$&jsf!K*I2kjFK0^a7 z*lSE!;~FE4R5-)D(>0|Rm@o_Ewk+Cb=6`>1_>gC$5>R7!q%%G``#cXt#F0M*XW+I8 z)P<$oYe3z~aMt+Al&*$z5MDk@Lu^HYht`C^79-;wSRgRDK!TqPomV~myr<|a?IpyR z6H7^qaLk$Sfx8O`uZ7ow$qX)|#8Gcs4MAeSdEx#u&sxkQqKSWgu+{#1U>)l?XY0XF z96P{06>KrjHbdR%5xTN?C$ltw!b4AnC^{I zhjypiZmXuQc(U)nZ4BvbGcfm-$tCh5AI;0})JY=9j!v4rzU{8ndth|&`5PgrXY&0Q zE%-edRDWA%XMpM!>0e?m#M&j4?r6OE@CWDzz(i~5HDe$8?!*I5zeO*d`5&(t@Hzg- zbB0dYv-!50GJNRW6!V7P_d+IT*^SMQ{@CtkwjtcE#_`w%6-}$>> zMYa0|!h+0S7tcpFiIh8{8B){!ugwzTh>$~a@;{Ut}7Fhp~j)elUZbJ6-s?sZb< z4}9RwGmhEbbsy+E^w6R6Z)>jfnCv+Gtz!pwqf4dLQ~$lH?_~=OSHts=7M(K+4~ZOc zM1l!axPK^;A4Nz0p5gat+2{*eC1@Zy6UzMI4{DTN>1Irg(&KMi8iqRhUuU1VEBg#s z`G-lfOaC*e0(KGuAj|3`XUMB^Or)K1o-8gxCLfb=fZV605jQY8ip@&1wAZ9Oj*@X& z%0qh(g_{pdY(l^U;#r!_115aem12%$c2Ts(NPdWp;?E=cUIL%yk?&}FIBYTi;--tN z;jS}(dHlcv*X_~ATiGOO?4`+hL!UXge-0V$)@#Pz|Kku5JH2G)xny$QY|-!+UbyD? z--5!KEzF1)vu5(cpB}y3>qhnDb$)-0G8dsX3%N;(&d=!3k~m|vd;2PuUUq9jW*ssx zcU;iejzr6&e;WVS_pH5TjQ5tw8fbZO21!VbjW}xAsLPmnV=5=q!FtRIGenD!lZU*U zWu{;y8J~SRt!#6e&qlf%(5wy`((o(5B(J~a=)s-CU8}9{oxkQ^zB~DVmD1_9;#Z^l zeP!aFu*Nle?>mDim%j%`|I@%uM9vStw)QQTPCnq_H0I75Mi0Jt;?H5*#_aWv!(M-G zB>RKXY#j&=KrjdIDMB#s0|3SQ+#9|zVJt&W;B3-3o9E7W%kx5q)pw4)^3>5Rr?KOh zM<>rbYu%Z5g&BTm!Qe;tfZ*XR`?e!F&0~Kaf5%7H|DJF2n20mZBCkKKdj^70qC>$) zvOJ8Cuv#`Y3g%uW5cy5hbHhact{=@_XOqyvm%@$*D%l_F;51u0%zKi;GCSL zn;JeuB|f@tORRs7JOsjI8px~%@g2vy4f8*T; zhnxmRCx3A9BvtFRuNmAe5E_?Vy6#Qw1t$b?cm8_xps$O`dBy^jGh3LD9*yDU#JRxg ziDGAFR||FTH6UbTz6%C13b1CyI9D${{U#K@w#u%D`7(h^FJHIkr$>KYkumrWF$)pR}XnQx$;M@9ipgHbAtw8l@)h01PReKPHHu{ zE6M&}vGgglW&VuAo*mqN%P-b_==UJ5Tz2YzSbydpT0!W%#)(1yOwwko?uN;mp3y(gHpQXs+tCvQEjXZmC!gOwG5MdrXc;1h+H(D} z$=}TH-^|N-7MTAn#3TS{ik>dxtu|m{oFSy`D$ZE>8#+mxss5ZZ*>UJFlJ?!Qw0VuQ z$aZxn&120gZ|fh~(!L&_?DMOYeB9;}lV84Q{g>}2D-xDaQ2lwwpw`#5%Tgl=y=>O}>r|x`S-;Q=B@7r~ceslK} zGJMPqvx4fY*H1A^OJ`|n3&jL{yeDza{;7mBF#Mpm($l>)or3x^23mN>|BQa>KgO4b zw`IrZP2+ouc0k@GEyx1FJ+s7qyil#M__hJCeb8R>vtncqs~bYa=ii_|<_T<-=W0+< z7;9;QEy%kZxmDO~TY1m;HDq*WpR27Hee-FOUS5YcEFg0Jclf_?&ZC1M z2g;L>^n$@VJmRG0;r_FvOl|srm?v_UR+QqAhtZJ@sACe?f)E%5km6wq7mp8pCQzhX zwR`)-03tdnKaWk-)oRRc6Y{fvh%@B(nk<=l#Iu5+du|=Sh}_ZHe&>zbkt?dJr>%^yF50!?4f}jx3q1axy8ViF!cb(XWKaemGSW@hz&vcjuFp` zctv~mPX^w)D;YU$CygldeN%Lorp3=*DOo`tWFbC z6P*Q?+xm3D$Tft4lARtyo>9SoPU18C{LOtky!-aau)^#7+t_KS9M zwJhNwl@QBGAQ{PILAh&sj7%zUZ`BQ?k|4)(0aR#b_f7?`@r45^K^CHrN^`e#V`KkZ|2%yeuq2yy66h|V#z?Y-^$5tR5ELT(8h$8hG*_#y<#P> z%#nPXAgcS{%P9Zd6L1$W?QR58Ov0$@Z)X_4LYE(Q`mR{#xO3k;pT;*^%a>-FY(03! zYljZpmOTBo1*=z0-L$m%nV(IrRDZ)|hsI_^&YjuQS8=PsPaHb)=Vw? z?f7T^yYn?GyNd+I=kF@c5$4pVwC+4bpX*9n3YOS)zO_h74G@V#5aF5cFDc;J3)3`qjN@-{0g_#-{F&UCI`jv z)e?)N5OEfB5`ER8Bd?(E!TkuJc@R{YGLA4Z(Rs(#v-h9NDScz+y{!jM*C{>znV*fX z%xOqG%JmHT3!uF+GTu24-8IfY8ye&Ju1_8Z-WM2RT*SMW2N1#xS_x<7`^a|+qugGv z^-dmB2t?;9vowjLeB9^>uxh+V`csR#;UV2W&}ZqqkC@rQuhE5w%e69Sf_4za5t}g} ze6z|Cz{HG_wwM$Tg)J{0K6GJ_vizpe3o~Iv|NeR8+n0|%I&Ni%cz60g@YmLw=l9v0 zf@heWHU)Sb$tP;x?C`AxT=Q_>K?e`LWw-uk(eK1f;$Qzd`MqC_f8*X3=rgvT(*pDu z_Tltd+7p+=kzw-y!8x#8VP>V$+r2elnOQM&=e=n!$K`0H9yI{+TRD(?YATTi1-Y{~ zV?(ctdwXL!kQlcp^CgWP)DmZp6D%NK)tkUa4ShU?DhPC zuUs8#m$}FNPv&=jzJ%IGOyO2I>)NDF|V*rQy4eNcWynI18?d zaj1!+mzDWUL20wkYiFe?J!9`Z1~N)hixf;C2HKdYrL?6OXTj$JK{|`>vDObSHpc)6dq%yy2kq4u{Tq z>82n{zu9uDffN6w|FA!dzx&dOzbc%mgfmv!iHvY|QYq#=l-|AxReToxI@8rfZ@C-< zd&joxuS%6N?Ro;qDcKp7QOVn=9ZT-UaIr~OA4VANT|2vPK`ENTVb*94e zB`pBEo55lbIwvqmNagHG;!KXx`wVkkXL^2ULCE{?k%x!AezKO+_|j)~+H1x|#||!l zwEVym^*mBDV;&~(ahM=*d;n0vMAM4Ok2+c{P>YU2kLi|*23c7kC_TF(rJ4TH^MQPP zz%DogaxQ_0zyT9>3gxK2=6xJqhmL4vy7&q8LLdds#h~*zN}T7m(AeSnD;?1aMkKaG zN7<9aX#^(sT9>o5lu18FKpBW?5-bh>YDlB#DH~AXh)aeKqgyrqGxq&5kByC%@$fNd zM!iCp?F{)MlzC>H(~z^z+GXhCqXxF*_|RFpNKs*_tyjRA)T_BvFFo(pbhCmHy^Wck zY~6T)K-cmGYY25!L`3Fz9R;_^RD z{yBT}*gqLXlscsy*8elbRT>q3$~(%89iI8~^J|1nU%MNQo$Y zPjo!gI_NkaBF){w$wE~>!oWOB5)ngMRLh&qZbUjNk zp>bq&B6T4)F-v>F%r7N-213K$zloz;iVSHZoLg!OzpJLRRnu~qvvDK0l-#{U8_#o1#v0FU9^Q{o-E`4x1H^GTem zm)@T;Os|KrYqa(Ziu+@HXL4;AoPCAYBW2yTA=mRfhVr^NkZY?(zds}7maGo$8aY_o zNIkPOZ)^B9a_iPiS6a{I(ffxW{#FZ1harIA=pyu|?dJC7djl(P8GYCJM0r*(3lR^Bp!v(Cd@Pf`}-ckr#9y71fU zKmFSlng03?dBe<)zNFzM4ztIB!*9NN{a3-?f95bT36c9Xa%#)(3juFDE+=SO#vcmuVo>!Oj*C-=9IpB{upP0`9wrhkhOqzLS@emDAk1ZE4U={SO)C2SqQBsi13!@Ra;^6Fc0+h_D`I;U^5zSbZ9 zXZ(}Du$P(K=SAN^ubA<^7li%$ZFU}f+tT%4e$wA(mcGZ-#e1mRIn3!MF(+OFn>QF! zgmd-MQ(D7wt|%jOD;srRAzKrLJToj@0A%*($AV~WQIc^RzsiBKjGOAhS}K+Tk%^Gb zE{V)eFGB_=eS)QGor47_4M}v?|9W!rb(b#p^ODCkuA( zddC?irY<>e-I@3KUUs(r`FGZ>8J_*+7rC8;8U2SI(f8Hww+ea4ijjd3VPbqsX$x5w z=I^+=AH-06f*!(mkchQJbU@@aeNG@o23w5#e2P7imq#^9n|mdtw_P;&8Z-IBBF=z< zbi}|2bJgmAPmr&$iz2MXyOBGQ)L+wxv&6);WZ%kBW@*|uvWVNFVp4%KCc@ct&BG$E z5qzwiY2Aq=U=>-K&7=$;^HI4~gRw0NZNW+4J3v%7TGp=eQ%d&lZR4M~opZ(robeZz zty|ka>&)Hy?B%M##!Fw^_|KoWpmKDWE@pn~?*OrkBLk+F%%-5+ z6yG+$WcG4hybSZIIJcKpzh3|Gdjn_axYvEHdEM&Ja;I$v6Y?oOu_9?qP;H+7OEDq! zfJXp=;R9;)NZ(N=0MQUqHcwB-U7XGkc%tx1@-Wh0HuhaO4-t5b2xYid^vx6x zLF1ubh|Ku2!~G$11Om`InDkkxLR|8*oHI*i9H`$_gF+_8l7c)R+8JjsZr-^UCx#P= z5lBzC;|)I@|Lxi!Wt%Pfu?%NAD90HX)b14XBSw~@QtPVp{F_iPEP|=LsIu$AQ%!Xb#>Oj=rV-_6D&1A zb7)|7LsM?UA+q{wiTujD$G>l=4Q%w4-=A6$o~&&aY){nSq8|A3_>Y7w!M*UnPp0l9 za-O-3U#_q{QY z$Hdf<;uTWGm^dZ?k;Zx$V0994;Of+5;yYqB^P12d$O%YjXt*H^>U11QN`tdDh_T~8 z5K$VO--HJhKc+bYf-r=8q^=N=#5lnm2qr*4<2-cz3>cLu&H&@*0a`cEJe10K&b%!(OsH`$CQsM`7+=qM=;Iv-K8}wNJT-aA{bUlG+jvt2 zoS`S2MYzR`f}ToG--JS$S<7tH*69ilNRK$zma74l@gd8sLuN`dYgTs>tz5tB;Oh1p zRCy1du={ra$x_CzZIP~foit=tBe&X(tz@a*Qq6#yrL9ZUl!jlFS_gMs|OWt_8X8 zBFtH9B-(X{bI>0;Kf^+RhE=WagB#s36P;P@`cKy)#b?loCHi~3;-z4WdCC@Htj;|q z!5hisgc##ivjCBPL=ti6z2dzleaDu+6nAb#Olc8E+;C25Lu_##cxPuT*5HhEh_ zV3EQ+g@l%Xz-N!xYr>tNMh6P7L9kjNDZECJ@<@=GgxKO#$wu6Bm~GYE+_|d;83W0{ zfX(oet}t(-0fjTvqKzSZt+mzq%Pop6K!)S-61;ZvW36#7rw`^>C%BF%arX7tU$!mk zI72r$S1-Nq*6>{0XM8;Svxyw<9p!6UmfA(;P7`x4$ApX$SoY`YCM2gJZS`wGa7!fe z$}cH^F0zQ#hUUC18+M?8p7Vm}L!lO!gpvM$t2wa1Z85|a>nYJZmwyM7V}Rw{9Op<7 z$C=c1uTinV`wVj;4XCHcYRM2wnX#;~b0Cum)o=#3j4=1CLKy&HxF*CF2{X_$#&ZUw z4iSq8XChln(1PL&+-rs!YbfR#Ce&-RXDk!=(B!?aU^U!pF(CFmVhadL8&()++=vlz zxqm4SRS#0`vAXvfNQSv)#8U}OAcZr-+(c%oIFow#tBy0N8^<(<4@3Yl^@=zX9fkfy zJTwJ7WU zS1-NaTN5(V>=nta{1JJk7&_W6SS+8(^jRt>1H~C19S9O#Xtt|lk!-O;7TPPkk7=2arQ$7wMAS$ z=0aidJOPdg{ex-ZAwc{QgGs%R;T=aDYFZ)2D$sYNRz(G6OQ**Yu~ieERl{`-1N`7& z^1IxdHQu@z_^=1?8^*c#ffvmWlh2w*r!MCz?Xcs(LA&OE{V(1UBD4bz&zNt>t`Csy z{Mbxi*K&Zcg~M>h0ztsK6yuyKJ$@6)x^Q*2s&a`Q_`Hua%l^ZxMxQC{+k*ec zCEY+WzYZQG3!MIM;$Zi8`^$G66vo?=dYVpvOR**EI&srOGxprxHe9gP;GSmcCeZO& z@Y+WBjc#Z?u)>(|B$$ccJTkn;v+TKNK6BtjX6mzm;RuZ{n!YL2*uLXaFMg*qlIYj9OY-CYC-lW3fW% zcp)e$y>bNk$Q&g*BsY~Y0<8G*n2d7MLSUU%T|E8XyG{$gJG^@#)~eMinG`yFdq z-%`Bh)O4Tz6>%>4hH^2^l}nGiHK}Y=y>c0KCONXW9trAo@mv_Py1y>YuKqiz{JK8P zZQY^^**i>U*V@f6m5IOV8D&;-f2 z^9Dcp{6>)YMC+S(F|U1j+2mvX>}CTWdR;@gF5a$eI{49D2w|RVedlff#dqlZ;}CZ4 z<6a1skb1B*V1Y$ABc)5gJi-|m&M0uUS`(auY{cbb*W4f4?zZpDqvz?i?DeLhJ;)QU zR@^lfEYdj>mxwde7W;3o0=@Pw21Feoga|lfCHD|ztfA(|BCmmDX<#$6lv+Xbw=H8z zOCIL>ZYC-Htvfx%g6~MYM<|p3f)xo3iHQ|>=;8&09EN%Dl=PjSlOV)R5ze8WVie^b zr1XzFm2f5i=yV3onGpB1?r?EqCv9;a40XW@Sz94GPaEd@^uz%Fox7-#CKVw@|Np6;yyw+J@AZqs{S zH%7fU<}RCA3C2n@{Ey1X-;#AOh71~H+O8oJ^!Z|F>&I~Yy)aN3TY+T3L>C`^09HU~ z6~?5-8gX>SQ?$T@V^FN~)(y>T9||qETQc)IuO4uyxyD-$o%imCUWwteA@iw_y#m;)>vI{WCM7tIRI?p)ElnOS6FCNLrZ!JMqUc%4A)*MY`x z%!6^puone5$CxvmDGvdt*sN*mlQ&I<+dgLZpY*O-Z<UP z+pivAJZ%57_27B$NKl%}q>%zK(gYJq2TnqQM6kyYCJ~^@afVWysij0i!=r=Ga(?#V zIMewm#yO%&1dw8j(dMoc4}oGsO$LGqx5W^z`J>0kG1WG+bvI40se{kV#BUt&pq8-I zaIZ0YO>hGxlb{XZo*ic3^u78=$mHRDIywL4kK>n8%rh&#`-iE=eT%`yQAf^r-KNBY+2NVbpYg@L8)4IDeCiiJM^ApB zHkvS537mn)jp1YTj&tSG%SEU>eF{&>_3vT+48@Rs=1*yaYQ@X5@?FNN&HZ^>RG;9f z3C@)R{shweoXIa6!i^nTQu!DkJ3ZqkCxS6WCL^`f*Hbn`Ehj(xgNem&A6Q_s zQ?uL4W-fc)6=64JUa2k%Ua>dp#_*Q{e$lyLD+zE1}Fwhke|;^ zlV>+GgN%@gqHfZ(M9m|3)##UYnf?A9tlXh_{il3n&TIZS@!vO3t!!T|*=FE?UHf<1 zh>2PDtFa$4Wi%ctW4jInCls3Lm{9#ibX4LYC_EJYqWZCjKrta$=j87S{IgyNVy}&^Gh)CC)mT*CMOLT!E3wIE=>kloOoA8%}aPT=D} zOmP4iOS0$)&wDRY4dv$%rkX*plS-QJy< z)p*ytX20SuQ@{D+#P25>FWRB8*RvX%ky{?&@rlp=a7y9KEzKXlu>V;1V|9a@^uO=; z*+<+r`Kvpp{?dez%^FL#?SJ7j=?-&qIvXZFL$7X62`|3EA2CO`j` zvFoWV{%)A%V`p5~_vw8?o<3(@YLshPeOW#$;*;IIW{W+S~;AR?lpnd*S<8i?fYlE+W*3CpV@Z1 z{`TKLK~%NS5%L>vaMAuwMBJ&U}?Ek)s-SpRFSL|N>lD}9tO@p z1U2Phc+=|-p#hPDqRtsp7Vn52KpyW5^WX{MdPwP6gfo!QLBScwQ9z{5T>|2Ov&I=< z`csRXq9|-p0))hAkOaX2Z1B`45|Dvx? zeC&eJZ~s+zDJpBGF8=PiPp@z?G4R`J{P_6WFKqtui4rmGN$ijRGxo3F9)EyoMgJ|r z8RZm5aE3CRL%c|wtCyaC6H3XB6PbIw4q&U*!c~Y|q3ff$qLEJq?x;9LRFki zG0p&`yN{jvqm!RJbItennQ_Je?MqHZ0@T8l|2z5BAB}$NelXD*F14~@YB*a=($a2u zXwvv3xaV8Sn3BE|P=nZ2iev_?9oj~IyO2=&^7Z@wE=}pRt?O=Ro^{#81HzVSzm)^d z2rz*NrdqtyHl;WNv9v&rE|YGU6yQwkP2n8zP#ZnQr+OSo{R??0#Tve!s_jI2!kL2I zN5yOLH*^dXevW9qgH-$mthBi+)ceogN4cve*iw-*k58U)&iY#q9D3`X{oBq0S;q0s zJH|i$z45yY6z@cuC&u6Z`Kg2U9XxWk{vAdlIW2hn;mM1x82kQ%0v_%Wi+Ls!{B9P& zQ?b+5eK8wvvUyh>7H+-uE)0nxdYP0QhefD6tPQw}uNBe-+RINF zs-;kL@uC;2?>>FE+=Ry7H&eTk^dhz_dfwpf+xG9C-RJSZeUrER@5J@@73)tm2{`Yx zXmHo98oT9kR!;r?zKLJ|wMgf3nQ^+{fQ?|)D8EyZd)<=pfL)&G`o^KoK~Dyf91gY@4Xkg zDLvg=Q%E&pUO_hM1yu-XC)$u>0cDF*9}~;;;i!b1VIRrbP6-gz99JP=qPWeJP9Fhk z1662Y$h{PI-agU(`<5ZT@V2B^crng*-Z|0!`z{|Pg>&RnT_&lgFiA|WERYProDWKQ zC_mbZc1mKLP0~9~M(rQ*4U~mXt?_iEuU=&J^LwA7jSBJ7jrMv^9T`7%;M^i@CLUgF2PyHB!P2aS7n)8 ze}XfZf?aA8TodLlNW^CMsShQ5IS;Y=j-&U9kW=xVGip`foF2cWAISk~166nkA&&CT z9edu@6R8hK-*sL-Fm?G~v#0Hah|oWnp!gosCtKJ2ZTyr#`=_JbZh|o}u3nu&zSuIB?%$0sCD8R)^3smyfwWG@1Em zSr1xAhYS*wc)imqhd37>?%U6;%Vl~?t<}~>^p@uDVb!(H^mt@ z1ZM#7`kTlK(vouq98Q!ctghSQEDg6LNDYm>L~UH?5$g;vujO^-HHdxZQHZ>Tpz}H5 zwQxDy#SwuT75}0Z1qHSkQ-L!?N$NXD*kW8n0F92=Yq>PbfPkZq7p4`#+@#~(YUO7` z@N4*ToCCB1XHe!V4~LttXkD`48PRR6#qI}7n|gBI8VZyOW(o#B0V;D{6Uj8~y zL|h!vS&qzX0rP;B#W}=F66flr_isXpjN4nLW~l&l9ZAoEo(UQ<7U=>7nIIKwJWa7U z6Es{mfP(d0_!KIXsYp}hq;NAvKyBo`Ce?6eGGnIm1^@sM07*naR4b28&@GYaoSE-N zAkk9}8pM!m-Rx{NoXzy&3~8Jbwitqrj)}y)9A|1vTBc%)0Vo)~3w)}4laWQ7Ye4I2 z6bn&Mxh1Jv9tevhyG&{)(l9-zgEJLy=K7b&WL*W$bWr1MMU%cKN#I;zzS>|O8b6}f zxx^OsPA(_6Ow-C#!zXPW~#}g7*d>xD2#2X9-Ccp#%NX)SxRCAO71r$I+1uc zA&v?$N#jhdHkENk=%w`jO(>pahdZLhDTow9QbD~jx*NDLuOooJUp*W_6`w0av)jcw zjs(efgr(kVac`w%0WzrSFHMy6u%jb+liEZ&37j!fK;uja54p7|nLyD7MB)8d5D@Ep z$6}lz;So(G))Te5?jWQ*3AV5vk!C?YM$v%`AC-p*snk;NE-Q`>)l!UPQr`tj#pozG zQ~WK$R4L_5WlV@f5Z{TDGM!X}7!#!Og{7e4jD?m~$GH?MweN%VC%h z{TXNM3wn(ld?#cpJZGtfv&SmdtrTa&I9U}Frhm3^PwLT1h}MbyL_Snm!SFch1idN1 zyhnV(svX11P0GnQsYQosb-=oSGZ5)-)bK38ryLWhq0zsIqgb7mnZV~M+1b^m+)|u5 zCPg?$Vofz32F?^?0;wHGLlk!|_9Brf3)FW25f39#)x4I*9JD!sfz2`D3K%Zk)y14J zU7P`cqUfkD6JtzD?lEZ)1iqHy45c_T%!$*BaSn0Ax)6(WTf*>A&Y3jk zkg|msOE5uE=P;t;yU1%^JAx1MBg9qHAO?QbHG&gr{~3qjN3H`_cR#R zl4gH}8D-_`;-+l9Vyb=%Kwr%pP*)*rHILdFmtYP0$|f3JMZ0vh#9A6Mpdp}h083U# z65-@(AhotA#hFS=V?tz38_rOGbN4(fUn_fQM*X&kj3b8;vH_}gAwL_tD zx|`Tb9Ur=)jqyQ5DAWR}$tAHF>8zdLUHG~dig`%;docY{)gJjDaueCt3CtmBOVIx*xR(OdI4Y9K@iEnS7_ct%8aQ0& zJzE|JMJCW#3%q~=d`#&*J@Gej_zKT(`=w8-Vxn;-fpZicm3B%3;aTixIEQ$dHk_x4 zhlX+|H7Brmr?vpGnqtjyHV%(StLgbBj_G0dh(nzao4w$+5Sd*(6Ub;*^uj~&IpS@T z&NWc*IK*l=M=f>VRm8b^>FJhX?0ml*t+4>docmUee~yl^UWzp|1)oQ#%pPcj{+O`Z=^wF_ zP9c8w07KBEepK-Z?!;0Nx9|l;I4<@ay8mOrYL3f_r~YoVle{F~REi zI9no%sHa%ngl7Ul>^oN%8g{X(q(%o+iVml0(4+WX6EbBOoe8E3%p=Z7Jxe z)p=NUuSv1j5;IaUXUcKjkXZP~m*7$Wv_D8Pnv-`bWTP%_QvxaSHgtc zJ_7MoOia29!@m&S+Md7!B1|ID6lXFH>dFh5NZz&9frc{%oGFUT#4fIjlU|_lL|B2; zOsK*`CogD)fJMkKzhO*0+6W_j$hoMGs$JX|qd!Pk>V3qNVVuE8e-t99{z3-;#UhT0 z;0!+^(EA1$Tw*jK5HTqeXuPAKO0r-{bf8dM_NFp8+M3P@ZHR;10k!B*V~XL7aTgXK zF%!DL2lzaxxD_F2RV1EvA#;KcK}|Q#8PwV+JQVmiNyH-dc}hK)EQr+FikvACiZVK& z2xm~N(_qtO7UNv8^nP#6^k(ac3_87r0FbMqqAao`8%?eJ5_va7(z=kzvtTEOudo@^ zQ7$~4D;TJht&^2s`novPzyM3Neal=^A#!=AI==*Sro}vHr zaHc#uJb;BV!x}uo85B#)c1Jb{XAKi@dL?ELhj)l0l{0eCzuz-~OqtiJN1Qao5Kx&c z_qHzIBAkQsuU!l*FT>HcVa4U6^1Clz+ZIP~JqunF1h7;=*k(p|i0$*)&ncPwe zb+Aa$->sXd;t6Ekb97|U*C+g_<8<7y%}!FWZQHhOcWm29#pn(@wrzK8+sV{#=6Po3 zo%ior>lV(cyYAU%@9zgRdZG$m)NLOv%4=BbJsB^eflbI(4~fb|MeqvW5%q0~AcJ9w^!i64=!9IcPKuCLU&O)WoQM7nH~CjeaxZT< z?hS0(N*_J#0b&cpc*oE_DEhk^hKhuQl60pq+Jo2<>m~snDIJHU1du*KmrhVN#Du=^JPx-nd36+Q9cXABSkDHjFN{yP&?hc{p3J$!c=gO>Cz{`YZ0+Q zF^iQ#UZ%2_G!|P+sBv`TM0&;!Kptx5?toYlm$qKD)j29Y`79rig)&cdT@XHbMidnI z9JeoLjgCs{C@zMSW|?e0+6N;w>Iyy6R6rS20S#r5BcBucS-$8-XgnYi<1P6@&V9Vg z|0NO^KO~|@dm@}cTtqf%Cf79uWlfVfT}T*!Mtlwc3$Y__avjdJXD;-C6I^OChP}TP z3HVy;8vT(j)S{eY(o_I31%}#=A+1xqn}!{2VSSu4t4ezamwu{kr0`7Kix)IKZO_rC z17FT0ln~iLM*5w}&f#-91-fZ1Z|7H{9JYtiY_FLjgRad>b0u_t=E8%pQv{P!K+379 zMK~SVe%KJ-wr(c~^_KlO;mvr?hRd|ft7*$wti_jXvmV$T9gxT#$EhcTcOEo2h8vPW zJX!$niQAr&Rn}-EQSNYUCyY90i!@7ER%}|eha>)Fe z$~@K#0ptRx*@Y=j&_LZIQ-%K!rWT`DJ(DNfPk%d}C2^~b{dX;(hRAE*=>LKQy=ceZ zU$VCXt~&MHPEK0?kJ$-i{CfI-n9dW58Y?^JRv(#BHYN9^HSsPtv)^owEovNj?f!#6xboO{F?AoDc_d5P zHPV^#+nPq<%*RSb7NkwFx$TNZSy^Uq_8iLz9m? z4zo=E9*0mmuuLbc#TV<*A!Yes^JJ&L9d69LO_{70CXq^x>_AqO%kU{z#Zx3 zk;7N@fJP<2?OhC-eXT zgf+hpb+c=1eDOChOMRDmy%@zyiVjd?1t`-9e2;v9mt6lBn0iU}A)B0nthmJ63Wv$7 z(+~!7;s^$(&qp$E$V%D>=@8!$8`Sp#P|z3#wwkI;qE?jThM1|d_+SH!c|&8FN@75b z*~ov=etShkzsQlfsDLl{L_Y}R6>77Ax!KGe%)H{{$o&Q93d5O;Zd*pIGN5r^y^zx_ z%@T(}I`Z`-*2#9YDdp$}UfN03Y3kT562&ES{)PuPViB%P!yeh`d6J<0y;z)V&yvZW zAxyi9uy_M6#i|Da;zbV{9Yoc!jrRWG8rW!$t8%D%2sJ13jA72~lYSg{4Idhvqz#oJ zHieVBe0VcZ)~|Y7&k+=SQ$C+8IHkYiJO!wS1v5XEq?qg&d4PxV;hhQ+9@#AucS`U> z_n0RLlBHBLQ*RHEG_L%;GKa=z&B0bi2%qNpIWl5G50UMXX0&sR$4)%fcy8`m%iF|} zA`9rIQ+dloAaQC@#f)hhd|gMgT^JQfGgOj&B=9Z##Txgh3kOd@Xc^eD6_ONM3ms^h zLVgCcs=gX*Xws~-0h$-TQ@4Xpe-;&E!S*cIXGv1>npUV%N^e@SXEm#yimS`)Kvk63 zf+OWOev0}esB>mwn=OU@M)?pS*e7!I4GGb-l%^c$fQSR=1=xr@gf+C?&LE_Ys!OU~ zMCB-TME$3%(P)8t2SHdr6BZmCqc<>APFDYG*MNRI#vVbmSu%4DKGzA>8oizqC|lqF zos7=Xs-B{8nbO0n^%s5qJMCcRr;%xCOS5EjMmU&RP9e)T{+rn=9K#u!Q<4Mpnvw4) zU&V?cKTb%CMDnODw+Q&DucaJ^PUt-RXff#bNywRI6QS#Zw_uvVNlhW$suk1VKC3#L-dDg(%0Nnlg2YtdVr_gbDc7O%xo8G|RS&tGyVZA?~qGvg8GK-e5 zg`~aNxt5_@G>ZGDU6~ER1e3&f1{STZbOO6gsh$*B1YwDsZZ$eQVj0*50P(4?aDszd zLy+}Z5wK|%Wj*o}W$_m~I#0c-wylE|3fyP9C5HRjmf8OvCGazYdJURRxe;$S0 zT@^{CX)U{+(f)23!8SNc!Z7P8dBi~-<8g#Aj8ll0zuNI>I88k3)$jP-O@z|Q2g)st zvN@!0@7t_VpWO|3M6gdQB_kT~0BRHMZlMW03ZbzehhZ<-pf~M}7ZkBTe7W!FFMQNJ z53^z|?g0!xssYr7ZyBH5$dLS8%WRhw&BU=-eB8tnILyeHH3`Jv30Xi?k?+JNym`RH zo2KzO6YafZgS7Ub>0J(Lnb6=Eor?Js9`5WBs26AOzzFcl0^LwSA%6S0HX-5u1?xPp zO+5ZFMua~Lqu|At^%#4yc1mt^{ZvVN^xxf%*baFn=QK-aGf^7X1asuzEi4fFwf6L} zqX$tjw(0+q1cpl_Ih+_4pKVOCg2`$amJn3%%wZq6Am=tExNneL4`ksFp?k$$wq21o zg+r#PQ{^hqI)ya>)!tbO5Lo`vIUa)M!`FrR=$I@1qD!&CzNc|!*Kfud=QMF9Ia4oL z6}HJ7<+G1h z4tn5-3$YVO!k~^ERZ~uoEa>}jRBKkc=YDYCNjx8u691+fQTF<#CJ=wD)pM`vwp;mqYx{om!^|`~*L>npk4vveH2midv9i zIm5Q7RO8mE3yiFT!2%$W(#3Qzem6QQpv(EgZ9yt4yBb%liZCRnW@&|*3Z6_2_@)CntMI6AQ zX&`f`{f7jxn&YVzXZqwEXkEcn$W=1Y5mkN7W0D8m08<8Vl;lqC@F}k?IUxQq{*xKT zp0oeH;$RUa2n)`c%f28?+39*-Nm6-y(E>mjx zc@M3SuN%_z_(HrbJ(T7d<2_;kMm~!r7+<>A4G47@@=QZdGq?GM5zl3q`8PU}7O0{Y zJX&u;$Y2fR-m*obk63pmN2+qexin4bi5n(powY>dqlMxMJpLj5^SKaSt8Mt)B$KA&rIwf1#oBb1u?9i{q79%&EY) z4(r2sd?1`14ru=$1)#_6y5Cs6QdsZXhXoyCK@oCWSSI#?!OJ2#i7=Oq8CtnA@w8r* zgXAY?Phu$8nT$2#>tv!SYCq@jxxD1wZpQt;43C4%RB`X8b|04nL!G;u!x+dS1mB)4 z&qeTQ`gWG+6EE;(=*sr#>^yoR8HlQrHadi4zDQh+`+8Cb9w~roa?nj?U0UT=*NwYAnrwpgw(*kEglXp_ehH6iy_hbUJ zsDhD|rT7PAiV_7Up47=v@b5t|NYQOXI?Ab5({r>r`4o&PG;4bnMV%Pyauw!7l4s1+ zxfo50V0A%o@;30n951;Q_>eicFY^?C(&Bql@e^#Kf(I~9|NjY2QCL`}pcSDJ(&m7} zl^1rQ^Ty!1WX=|Z4YXs4lBJ8oFwaceiYhRT(W?eGfYQKKg2Ih1of}xO?Jgk>Y0zkO z1Js|uoJ6n>3#_m|S!Es~0Doi?So}&jnT^x#K33030>L6Itd59HG-W|K0H^eJB12MZk!M5ZR-h>;Xg1XUKvNieLDSAdBgD#}G&uHsjzUF3tgOUtIsL8x zK{q?QwRIJdqdYF(8A@pF8lZrl$AsB+TCzDoH$gST{QJl2FUAlR0q(ostti z`2oz&LB+3K9(Mz`{pNWe^`EGLha!7d9}CA#OnXNbPaG1H?queZnx6M6s3n8L9{j>~sFJ!Wg4em?mFd6&(!kn!4Awybya zuC>Kk@@qxXWtWc#Z}V~nM}n8#>0%jr>T}>491kzUdhtJiQr@Whk7s6Med8dPrL$dGCmg6E=|ln9tVBeSPaU% zhhE)xU>Re#$?12%Yt-2-D$LZ!1ftN1hq83OFty*HVEA0O&i`Xd;qh;Ud|2qY z78Ku?iP!CM1DOlgUSP=L*wubk4^cZFBbQ_?Zc2bYnG`;hGifp{`>5pq6XV{632xSy4e+iA+2 zaoW(~FLVOwD{=D^0B9YMferVsE&5!TlXD+Qb8BT8QOHRSA>avDXu}2A)@5&pX)IVT z3WDqi6gzQ_hoGI01})#k(%*MA%7ZQJx7Vy7hvQkx@c*s___^)3SWNNX^@zX4Qe0tc z->$64JLojcbLj9~0XhZ{&+qX69yYSQ<^6mA=Vw<1^OOT>iA-K*Y&ASzqw;I!e82R2 zt^Jrg)sxrtk+Rt?A*+AF`+MHO)}Fhd+j_CRj@V&)1ijIG8K|=Ccgdj=kHd6gn{qH9 zq?8n!Rm2vez|jjfooLoYkEWRz3!$LBSE4?Gq1pbv%JJXy)KTsKrlkJmY_p|gI$vp4hpA(7?t zzJ9u=-UI%I*i6vq`ETK+tlDpMaMDI&bOIM`m0L?>CM-b(njX8_zGE3yK1r`Avj>NK zs^lNVRHG~P=2uPKX9w7(Z>ot!I}1gTTs8dyEiaQS5bI@p4Oj^7`cWMIOA25``3QH>Zu<`iUObOPMkdGCTp zgR8t1vrHXQivDUI5Fq_)r-Aji)|#@nW;PH8-rTC+)6!UH*KyGc7MC(oYmJ9K23jh| ztK$!;6DYv9qeZ;i84kTHx7j5QBwS6Eo#BcOYc!)D**%10wuO>ddji!qDMK?TY#L72f z>aIcF^W|tiX6?y*&5_ zZOTqM-@mQ+9z=Bc!kMNJ>Oa14;Q)jPwB5nop zfC3y;)@+CDz{^-M)^=wF>oL$dC1%0<43svd?u>}{4d-mYO|itlE$kluUUe^DJh4lh zcp&{8U0JZJVYUSb9i_o0i?M=3oDQ}bsrwy zsD(=nyrA!7$_AEsX8?aO2$CfIK?Zk$4U+yVVZ0TUm?&%#vfQ~G2qKCYA8`|c3!Ym% z)9h8+05T2>c|aFwvV0(k;_IK7Ip;KJ9vR47U@n?+0N)(l?Tk+i@jtC|bSe_>t<~PZ zDfo$+2?N?GOc{r#C2TiL=W8B`TK^+KA2R!s>1+;uqeBNXLaV-0%wnIM-`?@_5&{qN zhfOoxjvoWvxxcNCqD$TX*<o;<;$B=Lfu>_NVuPzkP3SMAZ1D#n}z$SwG5_$j>0p zG-l`SDU!PxlUX|VzXhFof5-1^@msbC);6v+ttcqj=UPb1z-BCF{B8Pq_Q<^30|7E1lk@Gg;!r#`fN90Hxy$GEluussF32I)|IxeQ zTI(=wT>a|(u}L5(s967dGz{jRU8qR z!bxa?lD8k;(V#^m4WGZAPgSej2|5EXzA;7L?b%^1h0ys>N4iBu-U@{lJ6YwGh&zSQ zRwYX*Q9Q>SVXp|#!@MX4+)I4dOW}RbcpkN^j@JYzAxIN2YOs8dOx_t6J0320+*%^?4;un-9s)@lyD({7i}*7 z|D{f4eFuhzF#P!gfo_c#!=RBm~Enx&xN7g5U z$c-`*_qg_f@i6diZ-E|UuKU<+i)FcMl%v4g@iAn`{AV*E~)xGXvY9(PMKVUi8F z+fxmkE=(PU;#yXNgIb7Kuh-nn^|uyY?%AP|fk6bey6SLU_Hg)Xp~Z_^C{?n3bG)7V z3~rgs`nBovw19EKNPFMWZ)<*-#pzo-x3vm!`1ep#TN@jncW&u@3#ZvfIX+`KeoI*| zfU?iMm>VBC1u~?wQ8?zmA`vsfpCPLVdc_z%~%)U1GYz_-JALTx7bF!5FNrt19uP7b*NUm%G zqg5sa=e$17|7NHFj6pIj*eWz1k{}KeR=Qu1ePad7A=kyd;9j-*8H7*)dGDJ?s62;y zP@dgPgL;Qu$f=zt;ua{#W)1TMFL>B>TnvYO+ zAcsA^+wq7I%+7nv+#NO$G=K9?l-loW?g0xN|01SI;Z{ueWIZd9V&W~QjDF}FWx^q8 z?9(-rnjB>Wm5_+1cC*y>65rGqy`L3F1FJA-&&VosmoaBy)$cZ*InKm^ZEvn@jNiUf zSQU)}iExNSN)(&3Wr>eNozLeiovelX<=@aqmOM|iYJp^78{ulGK=vdEn^)$E?O5w7 zU^au8^pt@c=JH_7{?GKTmYaUV`wXV#lM5`QF=v72%YMDv^kaWcqHne4$HTN1bL&RS zMrpwGYy~iWCDQZ1$f*{o?5o8U7nqKdcm}a9zp_8W4Nj|@49@Jo<|uc*tIh$-FZpaE z@Tt8Ik&?V)L{xvKOPxLCw(HbRFCh45+po5mCbl{aV*COAF-2w6n{~drzi2htnro*7 zh_3BDUbTvxl?oKi1nhkOaQWnHC9V1T59`$G<17~ALc$+4m3P=|UodWf8D`CC@g++( z1vY(Zz{2gd*T3VZU{!U*4hbT%Htr*u%zEQuTRr3$o?dmLaia-<=&u|<1YpuXM%(Ln zrc!qLOK`l?67zQ)lAKK8^YZZDYW2FW@L~QVPPvpij7}X69jIWSE1XoznM@NHo#-(f zZL#<7l(z5F|1k{zOcc-_Dr$&!o>s|PL-VJDk*y9en{caBGpXZCY8@Pv;*A~@y-`Lw zn$ueAVUK;J_nRkMin2|kboqNH&nx2+eN11oLi(Tp>Z@UXDdpsrWt*s2j+jW&SF<>Y znM3hhSy7pqGe6N&ao0WGavAnbGk4PFmig)F{uPl6{6?vkSB$>swYTee1?>_BP>+uL zf4r$yDNr;}scN@7 zNXkUtQAQB~NMfttY(8OptN5czb&d@WHCw5(Y1@;-pqm3us`*J_KJ@WQMknZED%sR~ z4O`|^r`q52DJ(*A!)ywfo`fR|NEvj@i*lmbV-2oP&-<^*ZQFU*SWf@W8$Rv5Pn|?pgzGYs7nLeU@6%k1WtEp` zQGm1r@W1`P#1LrBEn}@;FX&Go(uizSA*SeIEh4p2Fy=4ACk9H6Jr2NBoMHBK=jK}q zM~rY)btU>>fU(pih&2RlqE9#xIjz!AR{_(JmtK6}uic1D1u6^nb&~SlQvh#0V<}PF4YMz0kG1sYHmcOk zzH+LdKu7@wuVc!iwKZoI3(8sfPCF;f&<==dK*H(_&LEIMJfp+^M(QAoP~J zQF7#M3R*Cbi~_gJ7|p+Hp>iTMAsD8O+Q1=Zjb}g+EA5v*Ovq@A%1de%Njb_31=0JT zoBgc!?|G-*KF*kY+^3U0ajiRWD3Mh5(_G3>;8lJ(oOCK-+vh_AbgNf9zbmYULEpC$ zA&wK49raYCkwkhn?@#H*w&_KnNuF-Fb`>JBxZ&-<~=?zX5qdlAMbKv3_F6CoBajg)Ey_WOcz{N!dSRYi+p`QG1 z2^9e4ZyYxAkrgm$3r>1Ct{@|wWt8@=Y&Br+GJ=leZ8ueAm;SDlHi6Kqahvb`VK{WHy;)~ zLF1)EYa@Osx8)~9M64*X9a6H#J{@_)+>7b-z?4{^kX?^7oOgqJz^On;LKpr>5E;E1 zgEKgJ;dcCf-%tF;g6Pevmy$!TiDk}KJB}Rv&R1`JdJQ%19p8uhb*u;#0(BZ+-Uej? z(MI(q?IR*!#fCWaAynrFXz&fbyLnSNALQ^6jH&4 zJQa%|ugLuZQ7-Ykk4i~4^HHqLF!rJtW-b)wnMBjo$oqWV?M7&SH+*#fM=BJ>ZHTAf z`G8s+KTX*LcLaNOPs}p~(?SD()zyeGN00g7tNsNR`V1n*BtXT?0Evlgyrt5a)h1X#WULpsGDSe({^F)L<4|>! z{5?;S7U%7A1t<>Sm99G8Ms zEGH2KHX!p?@t&!VP#=Yz(Ow2|ez;HKRNzMeei~CGk;Jn*LQCMIqJmY7>9t{m0^-jB zl#~-;aD?dZM4msmrO`8g_dlEIRa(*|1_mMZQ1x^@$1Du;1dVf|NGb^XTR|1cluui( z{M|=9lJb8uJ=TuVnP4mBo|>%~u(1)mxN$M@!6Op2%LUhvSXA5sMt2ZJ^6U>u_=P^N zm-NL1@hi;^AypagjLI2BTALzg+_LWPQ1W-c`dVUPyWcZO!}f@>H{I;Jo1XKwj$#mS z&82u_a11Ewa<<$IyzL6BeQt%iP*c`3rxGE;?4}54oR7{HMV<%g4D^g;WkH|V-0qhs zOcto}lti1L5O)ZUNhG+uARgIIu)zg?B#gx`xc2uaQbmmE{=)tJTA!8O*vkZF0ak@Y zoFj#g<6mxMj5C+me_d{Zb#Qen!odPGU?@xU|TAyrlts zuZ0X!VbAwUUO`oUe8!}ucU;uojvA6cp%%EkTGy-yjwU7 z$l2awSTEN~G(z~{>KFFZ9p{yh6S$j>!`Z-8#av28@QrAwqZ}`W@RPj17J!RBR~IrVS;BZ31$4BvRp~TcY#= zEVn*~>apnTujX6MV4j3jl8H1-9m#p-l|I+vG= zv8oY+GYcV!MLafHE22kgS!{ny*Vw`Ij8QM)eI;(hTGBrR{ZDQ!0*u>M771Qk0Q^Y1 zfG5gSdCYb%XL9`<#&wh`Moy{<4UEgq#Oy4kz-@n#FSHg?#Gk)WoN=DCiGOO2ql&ju zq{U1^(t@nzFqUjB?ihhILfrL!K-72jEXGf+e&p6D`0D7H>1&ay>2lu4l~~ii?oOO!JXT#6y*OaQ{F=jzaR&)IkP8;S85fZMgmT{6 z;~kPy_gwSI*?=)!gC`JD%xd&$+75@Z!LBHyGF}JGWb~ok%y9PoBi?U$DU5)L*bc|X zi5P>f=ZnOU2RquC6*4PsR>^XX!ymk-wrwm|owJVR(otScKi0C=%O4w`mW(@BU-}cf zJ5!&%>>w+^|C4RqQpezzp0r1)lT_9REy~wN^fkZIN zci8Gp72vGgJ!Uks13uzBpGKdb(ak+|rFhqrfUR?kO{hoSq$}Y3hl+EZqOizZ< zvzc6Fc@-tawk~zL!F~T}bX)O~jJm8bA zX{<0h%q2Z2wHsT17AV}tYVbOCiMJ)vc6__ed^A``d*UmsPUN;8$t)xoD}ii=5I*hd z+;+dGz%i|LHJ5dL&i(fkcN6Gg@DykEZDc3hRF-)N{OuhtY8Z#KmK=_$lcg62pA#~Q zEDC^L^G0Cc_R>ui$JdDHb5eVesqs&4>&3P0Uo2n_`2HSUIaMd3Y%7z`+)g2hg{3eI zpHLo1v}oC_BKwJ^=>#3Vh#lBox;wPc)D8AH+s}8nFMh(DdPHbN0L&)TyV59*M<3`q zik(wK6}Kv5EcB=D5?sy*(?>4bFL5#*UakLUz8iy$DdBS~UmhG0THH1-?w|g4^m{+x zv*0v_@y8$9_1}4&$r?}c`qx6^vE6q5gDub!yr^!n|6cpY{=lq1t2PjJqN#XObhT5m z*_q!}eOrA`AvZu-_QV?VO8hP>-_FE!NNnCW^VoB%BA$}(th$>R05VeeEu0~W$!}%w zb39O16qV6YoGw~iwcESaQiLB52Fe_*_gmSzx#4))g4?z@l9!)k^#3+mjeS(4`uosy zcVN*k3$3oWRMiov|4XCaxv60j^(U5)i%w$NIUe)HQ%L9v?TnWCB(1Q({X%ns4(0-i z&{^o|v)u}M*LTQz5-fisQv8@{9W8_jl!voiFZ6xBzTFw7#Oj)HU45rV9lO&F^;)g4 zXv*X+k!=`n#x0U@ogmvSZqo30eTGJTe=q%UFX+5aDMP7v)CaSOMHtIlb_m@fW3c?w zE6CBRWg0{Gm2)2W=Y7}GkhCK>D%6aMC408;PSng%$gUGo%tO~ao|8ua3B3-PF>@=#g#XZ&zMsqadX;D-H3Qrg$JcrvTj=HX*0f_GJ*BSsY9`I6!Sjh*1VZ|Sodz>w z&DtgP418Uryc1*?#`p0?<{Rg5RpNZ&X<($fjyf7;^&I1me<_sc*(yG~S1c~&K&$9o zjkP;i0?^ES{iTopjA3$vp6{hn*~)qsfEu>~b#`|jaT8<;u<_Zv!g{w)>1x`}xUx+L zSb2+wT}B7N@zbz%KbTmN6kj2p!A6p30W?2~l}WgQENc?PZ*^`)1IC6veqVH&(9af? zBYJ#iSuA=%zb5N%yx$#J!$iX(kVy+O^2CR>VXSll6x*(zi&B` zZS1+4t83VlGX=QEN zH|pul+x?QFC=5OE{sctDEd5i&&pMrSuog_a{bK;yDM2UkwteJqHU8W2-$8A9FCjA4 zUz}~aG*W)%&zW{V^v(8JiFu4?-%-G$Z*Kk;I4+5MAfYFqfliJ?)3~G&s)8b0OKGC{ zy?Y2_=dxhneYv6kvaWz`%7}wGs1rB@eAE(QAM$sM2SE$sBks6UP%dg*U`NM|M;^l9 z1)FRE0@W%9{pd-2e?GL0mYbKFvf=Y{$yaO6bN+r*ni4#>`{Q5KJL&tcmsFB8g-kb3 zHy~_Df#O{ntPvcn#C#vCp1ZNs2!)^C5^YD{e%A7~At@D847NDfe9(%e(wRu)E4oc` z4EU2<`Xrmqqh7Pqtu2?B#e=8gjtQieVhA9zZ3c<6eLEc1*L4~HT`x^c-Nd@@WG{uT zpBheX|K&tzZQyFu&QDf(^wpQk15a;vtD?8i%vjKA+9+XjUQ)DO{XK*%l&5{zT_Qz((x+3l@_H{@e1RD$EBg8>p%^B5FHq=Ljbg zl%(k`5MGGYLNf)7boct<7uEvjY?aLzCT{pj66I>sfl#-{svLt|_$R))R8TO<(ZW@< zFLNZ`Sl~6dX^P7xm1X(q_~d;RdRO~uAId7~LHpZIRXpw1wi1J72Mzab4OxeF8othS z`A_=FUhn??p<^OR-7o6nPd^GGqYd2;pOS_P#NO)o6}H}54!iBq*pMZ4-OLX{H$#S1 zQ(|$n9=0VI@>wcs=df&~r%hx#1f+et5PG$b>deVsOv)-NIJM_$9J)4G9_lJO%tJem z{HavGE_vX%#4Mj;$85rZlnlzs`nUaSbTA2?RR(EB66&r>q{nTBvYfr)-?qu*#k)l$ zuE8^3ej&MpG9|hE+{vNyZ#cafr-RuEc^*F(a8Jwir8e(vKj-+@%*l211=2a>zI3tq?lr=vdHggP2IMatZwtuP;rSIO_wJRTGsTgd%3~8U7}_r zrxRZfUBd2C3kWV=c9!Xc7We2p=^>o$4Z2amP8*fi7G$=`7g0-B#oN;>4Y^~A49YQi zfllwk+e5kfI{w9YIoa=`SUi2T=g|gfxeIEeBq6FEc(X!Dr@>GIZoy7VDCYJ#Y+xjF zP4R{?k_EAZF*#T=1j?Bk%e6{tXIPru=6IR_{d0HjCOa zt22pky1hJJR{740C$$rkE$ox);?F%pXe@wF?PSgNKLBdZd$5X04 zzipt-?Q){7>nUBevvNqItI=oSO2uX|Bruk<5bbQP0T9`Ob;zE>o81x;QCROr$FF`% z=YmJMQoL^!6^Z9U(UZ4b{T5TLrtuG*g1Qy|14h8=S))M@TB)2DM^B~Wtp8AkP_kw~ zTllPl3h$>UNtL6$j{hla55J2hm1(-{SuhozywBgJ!DBXTbgzX&9d!d1FG)NducFVB z4u%l~Sh`7zjyYs{#@v4q=k2>d@qS>{Z$F<5Mm`xAtLSORNk}!eP73aEBq%4>gjILZ zJX&r}Ze$m|)AE)7K6=P#+Jb|n)W36sB?KHzHEctpSV_70=6v3Yx==sn^I(=yQZ@qwY@ zbS{h^fy~lmGZRYQ_qKT~0|mn$Kj1 z5FrAIbPu&v$uoh^1?8WVPS$h`5>?p-nsuIpD;2`$D{E>)Dp; znvqnTSydW3Ny~7yhq6dSKa>S?xk%(5p~XE#=+^zK_A*Ayd6Lroz=O#aMu~Md0=in^ zHp$o1hI_ddcKrwMszgG73RyuL@jCo!31dCPHcK$1R@5z2K6*bWT5k%#mQDBFg2{1+ z1vQ5P_>vv4#dppzS;sUa>nmuP{pJD6_GN=@>blmnzd@`Z-ru_S^}f-FlG-f9#E>*f zpFGj=U+45<=BO6-6OWHO_J?J!3Nj}*urI1`-~cm~LHU|FRx}=eAu!=AeJ6BFx6f&l zJt^8Z_~(n~baGqun{fpgFn_(5BXh=IWgZM6TvtKi9>xNmqT+nMh)8ZD{4hu+0Ec=_WphLd(M_*za9(o!}*V4Np(>G%b;Q|R5K=CgnHmw3bEvjYe*j;K3 z%_ZM67H(-9{|Mbz+GrV_kC}Vt%3xYWSxW?K<^(pBLs;uWLLfsy_%>o#mQs0%hcM>n zxJ++nboxoVGn(&S=@M<->E%Rr{;z;7sH0=;hmOB4EsgR?C*nagUdzanNoO3Us)N2Z zsf?daF!>wFQlZ~NAfAF96Rv8UWjR78>z}ms@BZxm-Q%4mVEB6fUDTXx07zxrO8^hJ zKu452@KNYF>H+Vz5tmd-XmZ4OB4#>k2ZXqDFT@;^j=y}h=jqQPBgsDTr&2tFu36x9 zuy|iI1^1_Ln9c{HTI?#{`k?BcW!G(;m4nyn->^KD7d4#gcAautWh`$q{;7@La+=@$ zS7SGxpa7_UY{jMIc~DRmYU=L2*3f%7#c6h#K9_bK9-VU$AbE$;^+dT8?N+ZQx>@?O z>rnV}q4Gkle~?kI9e3BE!!Ug@Iy>OD<(^W3)*6dw;J3#CQ%z)&%Dif~NI2PgkbW8_ zd1M6G*AzD&cf`s_S560AnLWMJiBF#O1H~hoa@SM+2IsT;3bUw22wSD=YPR;XpRd-w^||y1Dcqe+qwO^R^Hb@` zm-RRC{Q=V>ns0CmVryq#!|l!)?+S*sZuI{Ggc6+3Usdc4$rBF4fHL7O01F6k7cE6V-qvEODR_HWXlX% zc1QJD;A~JU>20pBm{D*i97l8A_CoSi*894zh20#RpFeM8>1XuXnB-#40oy$|q`I90 zrYY0~P6blo;iUgk8?YSBht=+rbgCFu)AOy7B8p)wIQS*au5YYDQ^&I$@(&DHK|5+X zI*pc;hvS~k=zzyhK#=ANn?>723OI@3Js3t6Vz9VXDW3$bV@euh>;bu?P20#$u+hUs z8`EqqrbR!u5GUmJR`m86!HwsLYrLD6DirNSD;tT4>pP>ZMSdJ?$b zVT!jyb{kTq#M2n#Og)?_L#9%i-_=uh#{KexrQOHD|s@``xfqDYp zdiuMd-E-7Elm{zxNi!f#0vY7?v!n(wgyx<)tH+_*mwQ5G^Wg`gbyB$5^Gto^Pwk_C z1SJ2gJ~;rM&|j=vENkPy9^Em8d^l5!I+NYW`xt9~la5N%V{LeyF@s^8MvH%cnT7$# zr!en>n^=q}seYret62gZKYl%DPpbT=pbyewx}lzh*+g%5(}52N7(d@6F6Dx_qn^iV ze1$b0jFZ9XyB0DlJ&R_hA{^|`A1un>`=RPw(&~yE;WsXU$WXPBnfql#w-mbfP}sMc zBLi^cE*Vaic)~$K9~BuEo$UUj+SR|hf%HPsa>trCs0=Y>*uT93Zgy%O!;tAC7Eeg& z)`)3ftmNhya6Lq9>DZy7-=F% zQqt?R+X*dfw_>1C+^kqu+Gw;o^RA-Q%;aY+;jI3xjo!U9Zj97K3a`>h8wP*0i0=5=qqbdL8hln)->$Tp;z{b}f$qR^NZDd|&L>n9uk@Q3Dvu z``+us-p}TVP5PJcYMhArt1Hy2f!>X3U)C#DfN}RC$2iXz0;jgN1|zL1H>=m(>S(f0 zfkbF?U+vF^=zp@C{6F=O!POy4ZD9(ewr40h7;NYUuHFSIhcLt>gi-6KnB+c>dMF$UFv z?-&ILOm~b-hLnehPeylMKX#r*`q07Roqyy>N*&&Z7)zX3%s;|lV0dOl1M_fsc7#m+X~2;Tvx>JR?? zv=+KL0KbT~MxTppIRU+Ifpgpjse_Zq{^U)@BJvJ*3lyUc9t@6<|9~31uV3XK)5Vzi zZda^dyJX+*ao->-Q&uVt5gqb7E-KkMV#|*=<6gzS@tn^LC{Qi%`brOy=te4B7Ha4& zZfNNP72J+QZMKUNrA_>FLuz$%782Fn{#K{`bOUz-m&kHBVA?&;S`JWhlXt*iNN90v ze+0!6)c8LRv8TI>sP9Q)g|_n&!@0gJ!i*m-iA7AJ5_PxH(}DS|nhxta-`hcet?*}g z6teu0R^oKo4cE09jBQr_SzsSOR?3L!5m&1?{kWczc>~IHw!cu99%O?{{Zrixcu!zb z_vK`yt7^3^8-fZVcI@dT334C6IMTy*vm!j7yYtU=v8wnsJjCS2oVcgtnpkhNb@xV| z7W~_jgXxIv;eX199Srwh$5RW8dz`u(zWjb})`JZGpF>g*+Uph>{) z&0r5*FXDY8u0cqdmH1B`O5-U6e!{wnOk@Vi1m}$BQD7sqG24FZfII)xn2VaGqn^rd zE9)kQoh#5{8?1|4#hJ)hn{H1;e*N{Gu0&XVpCKf;^~t{Qh2>&AGR z0)v_qHl@gHSL1h-S)A!}S2hC<#nky#na$-?Pwe7kCN_TFkLzj1f2MYi z`PnMlY5%(|9&WH4+A|AC)msS3a9B_5_zF*zSPo7e)4b|cds_NEo-Y}2`aha`tEjl5 zt!oe`1cwBNB9Pz?g}b}EySux)6ReQn8r_mdsu%O`UXAW?{u*Om>foOgB@^DN`nqA2QZ`$6sr7H`4kpRA z6I7@r&BxcPZ~H78jod%ESPvjYKlyypWk1OcSmokP=FCVYX!9mBpzTGkF@HJ8+=WSD zBMPDkVD}Wig2&3_6uIOp=->1Al)PkO$j?=o0r?jfzourctkOZy%ck9}Y8Nrd;s`0Q z06i@5Y{iUrE7`$pQx_mFDpb3t-<7Za^h&HpxKD##>4M1$PxlSDO*5&sct%crU75G%zNifUiF)@9*7gdY))C!qPneM=#Ept}O_2Az@<|%YHNyfpcby-E% z^{$RiOY6NSme%l8vHVJ{6Y8o$uN<^`E!C;hVeTFyAV^>}6) zTZwX2CXj6NjDTh{#^UB^O@s|smK_Ya@EP$YdR`XTQzP$)2!X}aV&lNOT0aKdDSl_| z`1hKayjD_bk7W`@BTZEP=pJm`=LRO09Vcl>-&J_;R35LO-?t+%9W33R{VvjSD~QEQ zk>-hj8v($OvuDZ&l}MQ-1S-=h9_3Z%}7X*Am&Jpqfq6eq~woPXf^4k}=-@!~)2V!k+zm zSB}dGdf%2re^o688~3Cjt>?6Mj#!~qu)pb*;sc~AT(3?+nM}M->$(ECw*Mw0M)<@t z9~_QT`0Cr~(xE5SVO>_bE+5WMNvGI0?|8A94p2Tgfk;LVQD0JJK~rr!K1sQJ0~ zwk*yR0*c8h$5<8$s?No3fWMqz_zqg!L9ZZJ;IF)wv381tQPMaYobJ}kth6Q?CWJ)1GOHm2HumDAK%!Jx;h zE@7{uU_Tf%uxnk8|MPZGq_t@w0vsF6QuA5AbgbFv4$abb83|nXWASVn{06l;Es{)0 zLj;&|{bD9o5cJ&);>Av>|GU_JyR1}BF`f6Ic#wg46M9S_*W~vkgIL$TY2UiX^wZ)-+t+%RipTh-80A!C z?#knuld`Fc!=~N8n3Jf|ldXBjhM9Bq*D@J(Rh=2Wwi>3u&ww7T^HEy+4!KG(0ss9g z4aivY+TFuw|N)VZtO2Hp71yE`l`}ofuGe1_v|A@ zl&jLPH6~xf(TgfB*nI3v$0ThI9DQy+o@{RN$W$uCx^~^<6=Qc(O=1eL67H=8diGMw zy>`V#o%9ICeH>sy8nI&;)f7W^q*)5WhgF4rl^3rd3;&>YZN?NyrDFcIZJtQB!p( zXhphbtcQ~K{4x8{r6py{7T7CA&%(27?rI(7a|Vs$da6YbsZtY2Os^XXTa|3R>FuNG&$?qpM6iSyWXndQBE?O13jV zwXt}C2_cFB$IA+kpXe>(#|t&J>SkTqg;meW{w0b`g|GPluKu2tOqXtz+C8V{V{uA| z&)=}9?-`vu)VNlQ&OfeD;HH(aP4#=NiJ~rRd=Z({JML>~80|lZj;JxevSKljjM!P{ zhb_>-ARo=9q%HycA15`yFt}5&0{37e92_cx|Hy-k1w(E$ za*jav2ZN|eO18GmXF|UmOmx7hLIRAVGl!#dD5@RhURDp3s14;}!gzfimtc$AD9{ps%kEnx}W zS!wR$v5y(Ot({Tq@~TLHqJ;)Ndgz=!BnHkPE4r zk-mLBu(VD62_&0^SM6(wUER~p78ySw84F*cg?y*;f(!$B0|6;Ytb48()tCJ#j*-xk zP>C507LmdISJir~IqWh^4Z_P8PCWsYA~#xIRgX6_x=CJ|PS9LGs^`##_!xUVfk| zC+t`XBUQHdHFyfQfVib{J}G^_u1!ArUh}hCpMmYaDazVwx2o>oj_v51t9uR5rq~tZ<7!wYFR@G~T<*HZGL*O_v^ZzNpehC~_x} zvbax~O$5uImqga)IBA<5dx!kShCyuW)YWP>A-D7a+feAfSa;)nnKtTOf>Xx|iuT?+O!_j*TG0#X0oJ+<+Qk66XD z2$0@6K(aGVvSr;@P)G!ctk=jZ>teuCv=4NQcKU^g*9L+%D@V+q*}ldN{TfcP4Nb<2 zQkHu>>nbSToe)T+#FJ*{oiVj6YEcjoin^qoJ{m^?%aT)i0a-VKn*J(1z*T1;Yj4V$ z+0&h-!0QhjV5dF_P!>*#P#71W|C;=J(Z2Gtn%L@O;o9rXinWbKtxAo}ze;Umua;Ym z6>J`VmwyLo1CC8`_+GE^o>oy`+ecCMjGBM%>{Qtgs>M$)SBW?($|$6aj0ie}P}agj0cs*D%BuUVcfDg{S&Kx!dNMbk)I(JkntI|aub zd2wR!?-gGO;{JO>iw7LfKyH1OBm)L=P;e0Xh6}KQ2LhPsq0CAdh+`IgO9RB5x4*~} znq|<9>aY}qjM;B*;kfgI0`q~6tTKXjKZDRh4}&l(Mf zC(w)G6}FIdt#lVf>7s96%)^Gi)<*x_CU7rsA5x79WL6C@qyqx&2kEqtl0C`434JC$ zm4S7!+)lS5V-c#5N_&lRO)&W_@>APvY{14Cktf8f4O~G?sug63<35tr(CFD>jW+-Q zS!X3JK0E_+Hhg5;NXe)Evp{rhwa%|$MJ2SfmNy*^?OUr+$_BW!(BJ95QI~Sy`jxIW z(<|)6=bzM|_BKC|Xrf-eGk88GorRtMva%3tb$|23^XRa894kdw2tkj@Y^}~A_lkV_ zy3BBuCiV~8ZAY5H0X*Jq3q%9usE)%7M@TH?|GnU<%ai5xq@Lrvm};xi>sfQdvB)FA zo^M@n7Vs^3l4c;Y_3n;AP(P2hk44y>lvxuG&AABEVxBL=j5b-jFQi*9AF1T#_AsH{bnQY+SlZO!8H}r3`Z*pLb^{C zDwd3okOy?T5$;>kOqm(slx|XFfwvnL*@bDQym&_ajsW0MCOFD>hEj) zy}~6H7PWwt9G+|Xfs@GZUC^ZQCa>#9_D55h8uxSmcy0i)M5U%_4)|%y`wRuMx=ELG z?)*MJG9xn{g=4R|$xuc*?#{bisPPF*XFGz(%M)ne=k2tCy>1AN%Kmr z8+)NrYFe!7j}|AJ>iJb#8S*YX7QvgBQjJdV8(U(9+$s0mUjBymISL_8XOA#+tN}n@)?g?3L z>@9(WZk?8Ko>0ssRLUvPPyV7g5PVTmrUe+_I`55@K>Lve<5VqJe6<@4zg&ol)3!0P zIA)o=CnV=msktXQP_sT?vRq5b6$h5yYbrw+Opk;H5K1>d<)&D68_47HhLamw>l`7} z;D}{Gqzqtm)W+-K7h(!ksM38wXORp#UR1|~{)2yyqT{P(@*)Z9$>RBfXbTFSy)C!s?J zZPP|pc}UUn$W!XL`Bwq{$5rR}q>1m>Ol%jOXO~@dF%%1@ZOG{O>4WAu%%GW_V%|sg zJ*S*!s98LI$4l7Xo|ZDfi%b@GT0#CHl!Z6}=|?~l@`sMB6*4PVN$rQXV8U$Kk&IX> zHEy^1_a`ArzFJ}|ojNuqMn{OVK4}gCgK;EaVP=lKt3jZHs7a56Y9h!;>^pa^d8U8` zf4FpV37z!uY}a#I)aO1wip{tA2%vZBR+kq^jyI>H9+*E9EaL5b@HaS7!U1OfP*<|% zs#cfr6~@r!8bjE=dQ8^`@mY=Cg-I>y&BgZj>COaMqX#k{^o-4sK&BqbQ(`hNuAQ zL9O~vGMNH>lY7hp=m*&_%$yMR8mqBs`7FJ$N2bZH1*$^8aGZB+3Jhc&5QP|iVGpaM z65qE=$tPezz;o_z$(Vku%zzbi^4cR47e&tK?J()25Nrk)tnwL1q(FzyD8zq=711p; z9}``*mNfwdF+T*d&>kA!4;66m{ z!NGx_%gD==q_co0jn+!9z^_<>+NRD_6%NIW>#@ly&bFYGw3I)u6~}sn8YxK&cy&|l ze@m_ZG~}uH{Tg-pnUZgai zFx`>d5>#j|whtFY`6p@3ItpP&geRr$y!n<#8Cs?NFtFixT8gQA#Pq+Ow})fQmj51N z%?)E#9>qej6kWER|4cK@Rz}o?w#U+qJ!O^os907M)zaZeLNN8SS4DJh2cA1SvP*lUdmbhHQq|WdI(-avKeMU zSra*Yf&E$50~?i)2cjjwC~05V_k}{{P2PnHtShuS9n$+_uj#NCdWSF>5&<&6cUa*x zqjn_^`Y9RlAYA!bDbysE<{f=Wk$dTSJf79e4XeRwj9l8>>%RHa%SubFa;m(71~Ai5 zzG?8C=boIq>zUZ)E=*VT;AZ8vX+BD<uk-oH@&I_>DZW8!*^&&6@r=OeJFAD#Lcr~R}uQ!BNxKjpue^F%UX|MTLi}ni0gwDcyMn090W65Lm!)=uT!7S z>Yc;xT`)+6M$?$M9znN0Wf*Yji#ujX$0P4_1Q~(e;hfsZbuddNcMIp(XN)1?f54az+W>)Vp z=)F4LVA2R<6PjXT5{RYKb$`~jC>S+&q#txYxKMc1YGY)pjf9o-IRqU2%XnamAVJcTTzRX_k-eaJ zV@}(x;loxcZk14;m-~T(%)n&%TI1R5q3|o+)`2zb{Sn}!;x*xF1QMvIzE~_r@&JkL zlicx;u)j1trrsaG_bUDOBP5yJRleNwL2{Q@l{K5j>S+e78gFL^rutS>wBQ5OllS5C$3vFK$H5hnK@C1~4b{SZUHC<~wk*k}3m4KJ_3ThD{iu3m~lO2nX@ zs2yG>$*8)>$|sd}IhKFQUKs3i*mhD6m5l`Bi}tQ$Si)>s*Mn13-|4gR>B|=rzC&A? z#~jxZEucy2c$(t?BVI0Jn|`w9-((iO6vo6)Z;38K|=>24U8Tnzd%>T9UEUJ_CS)Rbe+DOP`~ zZ<7A@iG!f7&{|{J&JKCu6GzQuGkkw8ykso(kw8j zwLT>T6uDOFh;%)IZ&ygaM(T#==eJ1MEP>m1OpQxpRW-^2V+ZO7ikfm8TCqEYKk5#Y zu!ku|OjSVE0tR;n)ATmdpWiDcrhM@vik?+yvDa2_sa7Que>K+bVM6K%%Bpl_+Rq> zih$l?3U|VwMPqnTNjnWn1C@==_CvDQePIv!$S{i$X zco{D(2+f-%OaX_P$rEBg?1jg0{O(^tGxPw5ptJvirQ{I*3l<(Wo21=bIh=Hm79yp5 z>vSBf8L{=3ZFl%@7heG^s?h3SozM&on<9ldo;;-ms+|?Q%r8eNe7?IA5MMQjvOL`DVk&6)y7KpOcm5V5mEqXuB%@ky zrh8o7%~yO-X-DmRP#khtyf>5j`~%F&5h}TyRYz`|fhebnvrUP7YM#1SnK|?l%SgqQ zVu2><=fooXxYRfMSedyigbVZ3LGyS$Nctn?9Mj(3drmR=8?vw7-V1J>3X7BK0hSi# zI**!Tw+C}iUV!igcmG#?oD2`CGI3ecxe$vwQED>t*s;=%+a{7Rj}$~FvW@wPnRbo0 z@88X+`~)yqpiM*RLL(hRb-+k{M*W3!T<^U@vz#xyzY-8~qtg zhb6U(d8CHw$S;PCUyY4(4CiKoyt010jJQ-g>h4I`kypm<&IDZ`G;G!d<7upn%?bhjUjEB3exTJfti%~D!)X}*I-t*xXQz(E)(VQkDoWw zm@;uT&I4S{hLYPfK-Bq=HXqcu3?@#TN{gkWS5}8@qjy{u&Tr-WUUAVVSPYUUoRUvL z;F5ZU)-6sjQi2|4FSJOMn{88iJQc=1OYCQMQtlp+gJ`bBjt0!+Ohm(ly}SZ)tLsH) z$0qkBCgZ0gL$10J07;dK{MK|$q(cq&-PKa7bC~Y^=gaAfhG}W(4NOwy-XnpN@smMV z66zYgUf15(ViWHeofqFrsrfl78dA?7gWSAY*fr+KC_F&b-Na4m1RuX{OwEjs^f%~} zZ5>pcKc074FlD0QGzTHKcb#w%zCZrV4UruNlO?c8iW!7*leqi#3xwhaHyw}M^k z7t(_4SAXNE`war5Sp0)(pDPlFK~1AfvG#n+P+m%StKdC0XM<-V*Z3**Wzow%p6;P@ zFIgGVtEO%3N31sIn#bF-Y@B(amRqmv0&H92PBV9%W##~+r~GH>PYHRD7DK`{^HUH& zWgl!G%jxP;{-_#vo$E7t?e$Bh0&&SCm2PBStXp^v7D{K;UPN`AIe-KVD54D3+_VJV zdVRA z+TkiUW8Kv>9}A8UjF2oJaIRJ4L5Or=U41E#bkQTyn;@D^CbHoxG;UARwD?4rDy>|V zW|rqXAV}wt;)!xK9V^9pUDiFc;6{vl;AesrvvOc&EHlh@d9dRIH>i@dH`5%VbKEdg zm=XTU$6z1w_W3&!^6w}vZ#ZjB5I1o;KoSrDuEf9OV2B7Ec!)1@A3&$+b?BelvUAG} zf82fbSa^9p%)Q*!JO%N7`hKHRyN0lYZoQwIi*RyJZqBZSC_!ZrSxaznUsaBHW={*ek4v6Ek+@HXQpR62vie_`p#pB#bWarR)wD* zAQwKd&xZUaR+h3SdUR`qV^(Bk9Hk`9$vYm3nt)*hM(s<=h(vFV4Id?wt1efD-9{4T znr5e1YyS7AucUx0w|n?8-XNMlW_(8)Gj!sTL5{BpU99@H&>@~ySZ0>e-!Rm3!R))Z zQo=I>oP0qMi&VpKsZm}|=c(CIms1QC^S%HUeIRz-A9{>nF=d0PC0b5o9ZCCT=u?=Ya!g< z(m+_^1Vdouc3Ao=R%Q=+SM(@cY`BrLUvMlDo%grJQ@UOd~w+&f-81ra_=} zK<=W$ zi4YC!SWujIFAV&3KNNq{bYtWvs$z((=za|a{=W#d- z=G;~%9;4`$;G4Xn?-kn+$O(Ucjf`Br47YJ79JNOqp|2&BL;Oe}Egr|OYG>SK3F>?n zR_Fi#+^l=~krIW7IDDVu{LtltF8ykSKg! z!xj5VY+PM(-|)%tIW%0A9c-ijejsjoFg&QzkH&{Gkd~is!G&yA4IZbQMA3O=hcYAh zQH6OA^(#OHd)kO<7?nem;AS_Rd_gK80z7aQb_l&XY}%9_IG!ZQZ(0(O8w$bK51{H& z!QKv8ejpadUU!9?R8Q6CQ&!zs<_{^oXPiV>C@vZ zj-nE^e6h>|q%W8zEAWV*G>$rD+1kgPXhqB8z2BAN>%-ufm3CSHz4BHHE?oRKl??dm z=?Cf+XN19i)x>M(atOu=R{?IRp)`Tvwcbh0ZGu`$C|jLwUZqW~x_F*}upiqlkQB^H zR>**X%?M^Ht83yzo<+soN`EYon)^m6!Z3y`c<{~<+P8tAx7JdW*{-(WtkY^U! zS!{+L3W`D-6aK&zk`LKN@+5%dscX74$ay1TjKEpW53>C047tXTH^pv5#mQ?LO)*XB zueu3sni|<+k`Kfc){HO~?SDFeawt-JmCwvqvXH}FWL8So4e(&86@P_ICx#eRR5(ZV z)Q!=AV6Ro)5&x}jp`wyaT~I)`mRF<$PsL{|SU>H~ zxl3*s@F^$r9TkFla*eVO8gm-~B}mhqB&}5M>6JER)>wYit7GR0&3_CN9WhvfC7Ti* zFiB{5LQDIoj_*j#YisjXbyw|ZF8K^Ji*FT&l9F9;Y|b?!Vp4OxE&$nMld)!RD=-N7 zh+PS(pzO4Uv4Zii_iGu@}b!D_gyr{DSr%d%p%eM^`X5sI$3u9A)}67gB!{mt`bLqPX0C; z!OUPILd!Y%>jf!D9P}+LZh8QxFuuLz{`5&Zye-fF&$P!{LiEmtdF4}S{-8f)| zO;$@ z+ghs1HB9-lGa$BDD%lp z)c!4kW>}F3UR3D@A~VG8$W1vpx=sO8`7tUI?>~#c@n>58&7PXccfuSNr~~Ez?#{zB z{-R7zXNVRdatKV)SenLsO+=K9H^ZO0FmiL2iWsM1TSBnopXt66s5bY=zc>Fzm5g`C z0}SYPmy9<1)0jU2*#OdvBOL+ud!&33hty1`Ffr(7p0z`(| zgOFI)Hm?u^n+;xCG42r&neFDB{r(nSLMLlx->dw*L05}@(;2zXY@4|bYAe5ZYGLDTbAa1%tRnAIdTy-fQ3|CM=bhXbl|8WNQ}jSdCyAd089buW*WDrU zGh~zh2;85^x_HSLw`=LWCEf|~B%t}V`x67IrOZMbO!Q2`$Bgx$jQe^^J&=eU<0PEj z!yfjGME)4xsnaj8F+!5xH!X6xpRQcwX0O8vFPF-Y)U_@>Kf$6i-}P7$DA)*I>}+|5 zZ+gsC6-dPGt}KWujzn=`NzaAC;0K};zkHHv_YAgM#MrrxotTR*mkU$dEr-qeRm@u9 z@%n?Ve)yJXY?_kvO4fa|2SaN!!>CcAl~U*-6;(CxfRP-kr%riZ{-TnTLA+Ftx?~NC zxVKI%PoO}yLpuz{s?A^c{>&s*7%2ma7%!sNy5yS%kEhS11!t!Px)j3Sm>Hy%(kFYU zp?u0}tCkn(r)^sdLxE406gI0nSTiUIF%@F=w}>wvTof?*O%D(%dA4|Zadkqve}WPm z;A#r+S*fWe%+wXAEg>Lp$gz1d#m_2Yo3jyuoyF1vxngM?S1p;Ivf*5+>0J44agSOV zqAn21DEVOe4uTliteC!N=pYUiJbtIdx}#k91wM@}Mbj~t{$XNHu+4Pcfu{3$`cbSY zky{yj6xQXbjfN(xtw-GRlFgK=rN<&>;)PrS86r>sM0eSEeh`?^{P;wma;_v2>wWRHHK2h)Ym$PLgnb%bQFz08n4)GEh&3>?fcfnAuQdS^YDh8@j6XS zoe+<z1%Oy{x7{L|ufYLuwatGrZq3U*Ov3_)TxJkDOakslWGPS2K zT+1#FL+Fp!h&ue*Ww~P)SGRCVa=sk)!#uu*=rmnK#gH?($W5yMEUrl*CK6<{i+??v z97`fWo6&T|S5suRJW|ehhr%A`sXLJt1t)##05P;)NMt=PN9}~b>NyqVF|1yoH^o_R zRpYS@Lh|YEg7|0TrHJ}b1zrYbDpv&W8=jGXX3G?`eUnRMyB#s_hxA67W9|aiPJ~W# z56#uT-qyp+mq1L!SNm^oF>!$5w+38U2iiLrrBXLRjaoi$(Ovy_zK|QFe!zffDP$TY zos{hcE)hdrKzH=-2y3;_R4t`aqT%tSc!0LX-(1A>45@-MAnCZ^>sNNO10;})FT-xr zM8zujW5IuUj=$@Zwi#g?r>RR`|Tp3&B}rU z(lNZ(xPdY)43U+v*GRCe?3>ATL%c>?nBz*v7z7uR$YmGXwc7!Zr1*nrLh~w^E2ZHM z{tTHlbV#{DqxZfVESXXdY2w^68PB3ue!{qQmD1w-pRSLFdrGZko#&s! z$z$%HVrVl&Pu71H+#FZmf{NdYF-zQzz7vy7`cG45N&4Ex+(T{|r^dbqjq2*$hBb!S z5OJI>7+X7cKX}I}jZ^G~G7e!FW@WZ<7{){tq2*Nd(C81%M4&GAnF})Za3fJ#gR}-d zSwpNk;pVcR$~WDN`>hnsrOD)6V3+lisxg}`;92f}RcF+xI} zOHjsDMN*=ORszN$%ns71j*t%-jv3p&{?b559B&yk9--^bYDK6siw(SL72KM|ql>0M znOIK2;%n5k*-D!uPP1s-Fm>!O7EEHEs_O%e*?2BR<`46v#uLF^HW$cmO2^#9 zUZF41MG1XHWV{<`!;gm!1s0YsJinvUD1VU3!#^T{wh_?5Uv_pbg%o=TFF1eyAk85A zLA`FE8h5%7~YS1Lri=M`Km{1a>a^0ad($hV>Hrq5!IjY2f={?}i z*q}Eg=rhrmEFZlghE;$}e#VlYmA{Kk(C}aUAJ|={u4O{l3c66x_Y>Qu2VBJa+JHDk=k>)qv?91fQ z02jbtEF{~dM&d^1m0L?;0Yi7V>94+sAJ0fsUFd{s4l+SM_y!4xr&0Gzz9H%8Y)KgT zU?E1KT?kUeW)tW0RDA)dQ0a?+qpw(Brz;WB{Fsd-b$)=87u=7JNDM2(&7dT{`SW6m z$J)2Lt{DI$5trJk?8WPGr4`xL)^uD zbVNsQaoC#gTkVC`KK5EDXnZaCl{(m@zuQjIR?tybovD@$Eik|0?fQU?V@bhJ7#xT8 zgFiLg?#$G(kuZ=MZu&2}zmLkuhiU;x6RV0_QW-j8bt%$mFrqFrZUB|A^%GUdWjYmb zW_MV07uAa}4lcf=hZ8MeXvZjBnXHN^uw3s@BANI~rxQ6P+#U8@0F9}cHau03L?`_U z$NGdTJg6aQepop|*({=UHNZZmpc$j%Z%74^ZTN~7&wqay9*c@QG;cq+))_XM{>nK? zhd`Jw_9$sgHcKJn{p)9J`$p-gY!=)kjdbcP${D${+drVO__c|A);nEZxm z%K)up8eU6Z`By=JhP#U_vqi`Em5{Cea-gUcXYC8No^344{?;PwO}&$A6{JOPnR{ORe{Z9D51Q-df&r2qMch z)4>@e(*o@@Fd|P&GPwdZ=|4mV!5s@5&noFeqVqGm#>wUK)`fA%lApYJ*Y3N}a;%e# zl0RBnbrGlBE%`C$gI1f9*6s}S$0)mGr-XC%%#bAmu6>l;>eV(D>{)?PUiJ1W{LL=A zDuFffi4esk*N2B2?;gc(^%oTQ?+ZCajjkVW5A@XZVIadu%A)^ssS0!Cn84Qao>`uj zS1!w&OaFhGKGKwD8-R=h-;|{P&*kGYv%%_rZ}bTU*OUFmMT&tfnfqfpZL>$;`v2$28GU$?#D6+b4lQLZ?k-w@GynI&pEW5{ zt&^&O-}JOF|7#}3GtzIx|7*56UO@-V|BsvY78V*rfXj9tZ}|B5eU>hV9My77lMSU4 zTGYx<2&p zaFQeXMe_g@vuS-Fo0PI&AtZD2r_xw1x285Tt_p6o35A|~m`Q)2?7R|qN z9VV3KDl=+WSSF&RjpXnaiuEjS9hf&jsQ)|46qMn-vMsxKs?{9FLSsr%phe>XJA{MC zH+@zd{usQK)txurj*cjqe$G;#%lL?{W`mze!eUu0uBXijVy&H(gli4V)|yx4uFt1h zjvO*O*BorpIKcZ?DN~x|_(R}%q=buvf->KfVs%YXk_ncD}s3CE3AF{B^?Ccgt~@KWo3H~+%AM*S}kd}=6X-w z&Ik?0hNXTemdt(xQcv-f+Qp%iPVsrZTv;$I&1K6%knc;5_GuIkS7o_SgJiR4$ zmP_6CnjxKuJU?`FQpP(S_7!!)9DLfk59s-dD|(|(*J%;(_di_kx&Ar%*?{v?7Hhheq$6>j20{2uSKI^gP z`t`kz%w6XQocY%kYdiO%^Q;!~>NP#mGn=-zcT78O72wn1&0mg=$M}%NoNwYb(xiP% z-*(Z^pPT6a*QE6qGaGuJ1ISK1+t_mA`rx2y0|FR^im$}pX3yn(0=(`=w0f1qlx43} zQ(L~`oR94?nM`5~;$*R1p7HKRP?N^>5U$D!BPwSmS(XXria~q#ON(_Vfve-F>&g11m|5LIdzslosKPq-Ax86bOuh55s z(s_rU;e(*sS=-&&!pY%{?c@FSNa&x^`;OSILu|H8xc^${w(HORv!zT^)O!Us3%$pI z#qzSx$ROj+bCx|MHy^zIq!jhk8MFPc$hz62HY#w((L=~`!Kw})UY2m3ex=!aKUi;a zqC#aplhNnC9|fzjVdN*vskRvQPdqjnAG@Hi%@$t+gu`GN#g z86S_woGc0Tx7}ZC-055&YZln3l3ghBd}pIU=YIFmx*6Cyo_jP>cBC(WthEL(;ae}B zd8W%!xouZEyZu_y%1#lBt!^85f!u8JR!x3T8pj96+S~Qo1hstpZQ!0#H8|dG z`tQo;-{(23Sgyr}vHR4(OwWCK2(Wje1=~08)2lJ$96#JW!^ZtOwj-v;Zn-#t`1up{y#zcFn z8jD~Pq>*D3MZ*DM;+TnQ`xQ&K!a8c;e0tNq=RckpIePU5#K>Z$mwis1rbh|CIrFRb z?-{u|4?WOMWx`U6MLAO|P8Tl=$e2^??sv>i`%7WAi%59h<~EjZzf)=TD!i|sod)%M z`^pcnt2WY3J;phdh(KH}%D%}a%iZU|>1t@%^xHma$LxfNP|=bzc{N14u2xI(dH|=( zBH>&mj-U5Vo>-Q5Q#BVB)iQ#Zv&eAwKRMcUC}0>0E^+7PYt6^P@u2(5fA?k3b$#kw zdZkgAGpzW1_U)+>VwTTwPmN0ppC>Yo+VX?PQEB17T&oY}wnc6CU4!Xn@RNsczOtrH z-|yO+^=Fl@mTmVZqOyz5_zd>)a(;YYW@Wf`*{odm9_JSurwUb>i#OPf-mgPfd4(p^ z+3)BQLSPgg6nwaP2II2bd^XjywF=TW_pY4=->oN)_fh&L(~{I$XwtZx&z*$zF6O?o zR%zULJlp%j&XLJ^U9V3}TTCC`6t8(tt+kQek2xWHFK_oBSG&H5!%K(wd(HX3PK4@e zTZVklAnGa71pbD*LxeaZCxFLOJllw_+{4+Fl*adQeWFhkmE!~)npM3M^As|X^JbP3;exdH1wLJ|LZQb1J}6Os?}KGGG5_++m6PV`frk zy6z;WvRdD0yUY%-fvev)*jprBYcbHb#0Ml*!=Z-|3lKrOmqP!`Ho+^{yuQP|y0iTKR6m_J@tv>j-&6R$eCTzS_u4U_Sl$?&hN1 zS=BerW;%(hD2~8mdsstB59NEgzcg>6S84T(s*`P1kQ1kkJMRDZO=MhW3-$Yl4$zze zJ6?Vn_x|m1rHxmYEjwN>sq>$6=xT6gCzgv2Hk;Oc*-L+1O0L7_Jw9PbrsE=AE!DR4 zjLTcx>&0_IiTNPgb4f#}66Eo*1xw#qx8vVKI*4?llA+1$X6mbA(o=UKgTw7TKToYX zuHD@7rhqwkFUv~jcQw_rkS~F{!B%v$dQdFm=Pq(xa(*^Oyr8TOLHQH8;Ux}l`Xe7W z#;A-Lki!wwS?Wt^0fwTjxpWw?e42%GaPzc#Fq)-fIBgJtLD@%@$x5UYoud>C!%wPXEx3TE^ zab%ondu?Ig*3HnJ1S^ft9(wryOaOv2ka)KEmycD|L2U9lgtBv$J&j&6~>!!2^ zYyZ2t%}4$5$BmH~#E;gQs%rP=sbZk?AC~7~h5THDb)~x0m^7B22T+dL>D zvE!vp|6r7ZZBy}+TbSweMhGw0W;p&h@(@QK-gLimiyAvcsd^WQZvG4UM$0Ri#xac6^B`1t|uKduNg Z#FI8{nMCja&gVA}5+bs~m4f<#{}*mMXBPkf literal 0 HcmV?d00001 From ba829ab24b8450b3d3af6d5cee72719dad33f4d0 Mon Sep 17 00:00:00 2001 From: win gutmann Date: Tue, 31 Mar 2026 21:43:56 -0400 Subject: [PATCH 2/2] feat: log-sentinel v3 tiered agent architecture + Grafana alerting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces detector/flow/investigator pattern with a 3-tier agent system: - T1: fast triage (summary + anomaly scan, evidence packet builder) - T2: deep investigation (reads evidence packets from Loki, Sentry integration) - T3: synthesis (session narratives, baseline updates, regression detection) Supporting modules: analyst, baseline, evidence, narrative, timeline, trace, circuit_breaker, loki_handler, ollama_client, prompts. Adds Grafana alert rules (8 domains, 46 rules) with webhook trigger integration into the sentinel cycle. Updates all 5 Grafana dashboards for v3 event names. Also fixes DataCaptureSuite preflight seek to use ReplaySearch(ToEnd) instead of frame-based seek — avoids stale ReplayFrameNumEnd reading session start state. Co-Authored-By: Claude Sonnet 4.6 --- .claude/CLAUDE.md | 29 +- docs/RULES-GrafanaAlerts.md | 93 +++ .../specs/2026-03-30-grafana-alerts-design.md | 218 +++++++ observability/local/docker-compose.yml | 10 +- .../provisioning/alerting/contact-points.yml | 12 + .../alerting/notification-policies.yml | 10 + .../alerting/rules-claude-sessions.yml | 246 ++++++++ .../alerting/rules-cross-stream.yml | 267 ++++++++ .../alerting/rules-infrastructure.yml | 348 +++++++++++ .../provisioning/alerting/rules-iracing.yml | 354 +++++++++++ .../alerting/rules-sentinel-health.yml | 246 ++++++++ .../alerting/rules-token-cost.yml | 246 ++++++++ .../dashboards/claude-cache-context.json | 292 +++++---- .../dashboards/claude-code-overview.json | 142 +++-- .../dashboards/claude-token-cost.json | 398 ++++++------ .../dashboards/simsteward-deploy-health.json | 190 +++++- .../dashboards/simsteward-log-sentinel.json | 426 +++++++++---- observability/local/log-sentinel/analyst.py | 376 ++++++++++++ observability/local/log-sentinel/app.py | 111 +++- observability/local/log-sentinel/baseline.py | 229 +++++++ observability/local/log-sentinel/config.py | 14 +- .../local/log-sentinel/detectors/__init__.py | 0 .../__pycache__/__init__.cpython-313.pyc | Bin 196 -> 0 bytes .../action_failure.cpython-313.pyc | Bin 3914 -> 0 bytes .../__pycache__/agent_loop.cpython-313.pyc | Bin 5879 -> 0 bytes .../__pycache__/base.cpython-313.pyc | Bin 1032 -> 0 bytes .../claude_session.cpython-313.pyc | Bin 3692 -> 0 bytes .../__pycache__/error_spike.cpython-313.pyc | Bin 2092 -> 0 bytes .../__pycache__/flow_gap.cpython-313.pyc | Bin 2892 -> 0 bytes .../incident_anomaly.cpython-313.pyc | Bin 4586 -> 0 bytes .../__pycache__/mcp_health.cpython-313.pyc | Bin 4560 -> 0 bytes .../plugin_lifecycle.cpython-313.pyc | Bin 4987 -> 0 bytes .../resource_health.cpython-313.pyc | Bin 3968 -> 0 bytes .../sentinel_health.cpython-313.pyc | Bin 3486 -> 0 bytes .../session_quality.cpython-313.pyc | Bin 3700 -> 0 bytes .../silent_session.cpython-313.pyc | Bin 2058 -> 0 bytes .../__pycache__/stuck_user.cpython-313.pyc | Bin 4322 -> 0 bytes .../__pycache__/token_usage.cpython-313.pyc | Bin 3801 -> 0 bytes .../__pycache__/tool_patterns.cpython-313.pyc | Bin 6283 -> 0 bytes .../websocket_health.cpython-313.pyc | Bin 2856 -> 0 bytes .../log-sentinel/detectors/action_failure.py | 103 ---- .../log-sentinel/detectors/agent_loop.py | 133 ---- .../local/log-sentinel/detectors/base.py | 16 - .../log-sentinel/detectors/claude_session.py | 98 --- .../log-sentinel/detectors/error_spike.py | 46 -- .../local/log-sentinel/detectors/flow_gap.py | 48 -- .../detectors/incident_anomaly.py | 107 ---- .../log-sentinel/detectors/mcp_health.py | 119 ---- .../detectors/plugin_lifecycle.py | 140 ----- .../log-sentinel/detectors/resource_health.py | 97 --- .../log-sentinel/detectors/sentinel_health.py | 86 --- .../log-sentinel/detectors/session_quality.py | 96 --- .../log-sentinel/detectors/silent_session.py | 53 -- .../log-sentinel/detectors/stuck_user.py | 93 --- .../log-sentinel/detectors/token_usage.py | 105 ---- .../log-sentinel/detectors/tool_patterns.py | 130 ---- .../detectors/websocket_health.py | 71 --- observability/local/log-sentinel/evidence.py | 235 +++++++ .../local/log-sentinel/flows/__init__.py | 0 .../__pycache__/__init__.cpython-313.pyc | Bin 192 -> 0 bytes .../flows/__pycache__/engine.cpython-313.pyc | Bin 5203 -> 0 bytes .../flows/definitions/capture_incident.yml | 25 - .../flows/definitions/review_incident.yml | 34 - .../flows/definitions/session_health.yml | 30 - .../flows/definitions/transport_controls.yml | 24 - .../flows/definitions/walk_driver.yml | 33 - .../flows/definitions/walk_session.yml | 33 - .../local/log-sentinel/flows/engine.py | 85 --- .../local/log-sentinel/grafana_client.py | 15 + .../log-sentinel/investigator/__init__.py | 0 .../__pycache__/__init__.cpython-313.pyc | Bin 199 -> 0 bytes .../__pycache__/chain.cpython-313.pyc | Bin 12150 -> 0 bytes .../__pycache__/knowledge.cpython-313.pyc | Bin 2602 -> 0 bytes .../__pycache__/prompts.cpython-313.pyc | Bin 5749 -> 0 bytes .../local/log-sentinel/investigator/chain.py | 232 ------- .../log-sentinel/investigator/knowledge.py | 47 -- .../log-sentinel/investigator/prompts.py | 142 ----- .../local/log-sentinel/loki_client.py | 156 ++++- observability/local/log-sentinel/models.py | 100 --- observability/local/log-sentinel/narrative.py | 214 +++++++ .../local/log-sentinel/ollama_client.py | 64 ++ observability/local/log-sentinel/prompts.py | 396 ++++++++++++ .../local/log-sentinel/query_cache.py | 102 --- .../local/log-sentinel/requirements.txt | 1 - observability/local/log-sentinel/sentinel.py | 579 ++++++++---------- .../local/log-sentinel/sentry_client.py | 136 +++- observability/local/log-sentinel/t1_agent.py | 220 +++++++ observability/local/log-sentinel/t2_agent.py | 318 ++++++++++ observability/local/log-sentinel/t3_agent.py | 329 ++++++++++ observability/local/log-sentinel/timeline.py | 200 ++++++ observability/local/log-sentinel/trace.py | 225 +++++++ .../SimStewardPlugin.DataCaptureSuite.cs | 15 +- 92 files changed, 6495 insertions(+), 3293 deletions(-) create mode 100644 docs/RULES-GrafanaAlerts.md create mode 100644 docs/superpowers/specs/2026-03-30-grafana-alerts-design.md create mode 100644 observability/local/grafana/provisioning/alerting/contact-points.yml create mode 100644 observability/local/grafana/provisioning/alerting/notification-policies.yml create mode 100644 observability/local/grafana/provisioning/alerting/rules-claude-sessions.yml create mode 100644 observability/local/grafana/provisioning/alerting/rules-cross-stream.yml create mode 100644 observability/local/grafana/provisioning/alerting/rules-infrastructure.yml create mode 100644 observability/local/grafana/provisioning/alerting/rules-iracing.yml create mode 100644 observability/local/grafana/provisioning/alerting/rules-sentinel-health.yml create mode 100644 observability/local/grafana/provisioning/alerting/rules-token-cost.yml create mode 100644 observability/local/log-sentinel/analyst.py create mode 100644 observability/local/log-sentinel/baseline.py delete mode 100644 observability/local/log-sentinel/detectors/__init__.py delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/__init__.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/action_failure.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/agent_loop.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/base.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/claude_session.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/error_spike.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/flow_gap.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/incident_anomaly.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/mcp_health.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/plugin_lifecycle.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/resource_health.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/sentinel_health.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/session_quality.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/silent_session.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/stuck_user.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/token_usage.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/tool_patterns.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/__pycache__/websocket_health.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/detectors/action_failure.py delete mode 100644 observability/local/log-sentinel/detectors/agent_loop.py delete mode 100644 observability/local/log-sentinel/detectors/base.py delete mode 100644 observability/local/log-sentinel/detectors/claude_session.py delete mode 100644 observability/local/log-sentinel/detectors/error_spike.py delete mode 100644 observability/local/log-sentinel/detectors/flow_gap.py delete mode 100644 observability/local/log-sentinel/detectors/incident_anomaly.py delete mode 100644 observability/local/log-sentinel/detectors/mcp_health.py delete mode 100644 observability/local/log-sentinel/detectors/plugin_lifecycle.py delete mode 100644 observability/local/log-sentinel/detectors/resource_health.py delete mode 100644 observability/local/log-sentinel/detectors/sentinel_health.py delete mode 100644 observability/local/log-sentinel/detectors/session_quality.py delete mode 100644 observability/local/log-sentinel/detectors/silent_session.py delete mode 100644 observability/local/log-sentinel/detectors/stuck_user.py delete mode 100644 observability/local/log-sentinel/detectors/token_usage.py delete mode 100644 observability/local/log-sentinel/detectors/tool_patterns.py delete mode 100644 observability/local/log-sentinel/detectors/websocket_health.py create mode 100644 observability/local/log-sentinel/evidence.py delete mode 100644 observability/local/log-sentinel/flows/__init__.py delete mode 100644 observability/local/log-sentinel/flows/__pycache__/__init__.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/flows/__pycache__/engine.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/flows/definitions/capture_incident.yml delete mode 100644 observability/local/log-sentinel/flows/definitions/review_incident.yml delete mode 100644 observability/local/log-sentinel/flows/definitions/session_health.yml delete mode 100644 observability/local/log-sentinel/flows/definitions/transport_controls.yml delete mode 100644 observability/local/log-sentinel/flows/definitions/walk_driver.yml delete mode 100644 observability/local/log-sentinel/flows/definitions/walk_session.yml delete mode 100644 observability/local/log-sentinel/flows/engine.py delete mode 100644 observability/local/log-sentinel/investigator/__init__.py delete mode 100644 observability/local/log-sentinel/investigator/__pycache__/__init__.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/investigator/__pycache__/chain.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/investigator/__pycache__/knowledge.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/investigator/__pycache__/prompts.cpython-313.pyc delete mode 100644 observability/local/log-sentinel/investigator/chain.py delete mode 100644 observability/local/log-sentinel/investigator/knowledge.py delete mode 100644 observability/local/log-sentinel/investigator/prompts.py delete mode 100644 observability/local/log-sentinel/models.py create mode 100644 observability/local/log-sentinel/narrative.py create mode 100644 observability/local/log-sentinel/ollama_client.py create mode 100644 observability/local/log-sentinel/prompts.py delete mode 100644 observability/local/log-sentinel/query_cache.py create mode 100644 observability/local/log-sentinel/t1_agent.py create mode 100644 observability/local/log-sentinel/t2_agent.py create mode 100644 observability/local/log-sentinel/t3_agent.py create mode 100644 observability/local/log-sentinel/timeline.py create mode 100644 observability/local/log-sentinel/trace.py diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index dedcaf7..411818c 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -150,4 +150,31 @@ All session context fields fall back to `"not in session"` (use `SessionLogging. - [ ] New iRacing SDK event handler → structured log with `domain="iracing"` - [ ] `iracing_incident` / `incident_detected` log → full uniqueness signature (`unique_user_id`, start/end frame, camera) -**Canonical reference:** [docs/RULES-ActionCoverage.md](../docs/RULES-ActionCoverage.md) \ No newline at end of file +**Canonical reference:** [docs/RULES-ActionCoverage.md](../docs/RULES-ActionCoverage.md) + +--- + +## Grafana Alert Covenant + +Every behavioral change to the plugin, dashboard, or LLM integration MUST include a Grafana alert review. **Alert silence ≠ alert passing.** + +### Change → Domain quick-reference + +| Change type | Domain to check | +|---|---| +| New `DispatchAction` branch | Domain 3 — `action-failure-streak` thresholds | +| New iRacing SDK event | Domains 3 + 7 — session/replay rules | +| New Claude API integration | Domains 4 + 5 — session health + cost | +| New MCP tool | Domain 4 — `mcp-service-errors`, `tool-loop-detected` | +| Log event renamed/removed | Search alert YAMLs — alert will go **silent**, not fire | +| New log event/field | Consider whether a new alert rule is warranted | +| Sentinel code change | Domain 6 — self-health rules | + +### PR Checklist addition + +- [ ] Reviewed impacted Grafana alert domains (see table above) +- [ ] Verified no alert queries break silently if log events were renamed/removed +- [ ] Considered new alert rule if new log events were added + +**Alert YAML files:** `observability/local/grafana/provisioning/alerting/` (46 rules, 8 domains) +**Canonical reference:** [docs/RULES-GrafanaAlerts.md](../docs/RULES-GrafanaAlerts.md) \ No newline at end of file diff --git a/docs/RULES-GrafanaAlerts.md b/docs/RULES-GrafanaAlerts.md new file mode 100644 index 0000000..7692acd --- /dev/null +++ b/docs/RULES-GrafanaAlerts.md @@ -0,0 +1,93 @@ +# Grafana Alert Rules — Development Covenant + +Every behavioral change to the plugin, dashboard, or LLM integration **must include a +corresponding Grafana alert review**. Silence is not the same as passing. + +**Canonical spec:** `docs/superpowers/specs/2026-03-30-grafana-alerts-design.md` +**Alert YAML files:** `observability/local/grafana/provisioning/alerting/` + +--- + +## Change → Domain Mapping + +| Change type | Domain(s) to review | +|---|---| +| New action handler in `DispatchAction` | Domain 3 — check `action-failure-streak` thresholds | +| New iRacing SDK event handler | Domain 3 and/or Domain 7 — check incident/replay rules | +| New Claude API integration | Domains 4 + 5 — session health and cost rules | +| New MCP tool added | Domain 4 — `mcp-service-errors`, `tool-loop-detected` | +| New log event or field added | Check all domains — does it need a new alert? | +| Removing or renaming a log event | Search alert YAMLs for old name — alert will go **silent**, not fire | +| Changing cost fields in token metrics | Domain 5 — all cost threshold alerts | +| Changing session lifecycle events | Domains 3, 4, 8 — session start/end correlation | +| Sentinel code change | Domain 6 — self-health rules | +| Grafana dashboard change | Domain 8 — cross-stream rules may need annotation updates | + +--- + +## Alert Silence ≠ Alert Passing + +When you rename or remove a log event: +- The alert query will return **no data** (not 0) +- If `noDataState: OK` — the alert silently stops firing +- This is a **silent regression** — harder to detect than a real alert + +Always check `noDataState` when modifying events that existing alerts depend on. + +--- + +## Testing New Alerts + +To verify an alert fires correctly before relying on it: + +1. **Write a test event to Loki** via the gateway: + ```bash + curl -X POST http://localhost:3500/loki/api/v1/push \ + -H "Content-Type: application/json" \ + -d '{ + "streams": [{ + "stream": {"app": "sim-steward", "env": "local", "level": "ERROR"}, + "values": [["'"$(date +%s%N)"'", "{\"level\":\"ERROR\",\"event\":\"test\",\"message\":\"test alert\"}"]] + }] + }' + ``` + +2. **Temporarily lower the threshold** in the alert rule to `0` and set the evaluation interval to `10s` in Grafana UI (do not commit this change). + +3. **Verify the alert fires** in Grafana UI → Alerting → Alert Rules within the evaluation window. + +4. **Verify the `/trigger` webhook** receives the payload: + ```bash + # Check log-sentinel logs + docker compose logs log-sentinel --tail=20 + ``` + +5. **Restore the threshold** before committing any YAML changes. + +--- + +## Alert Catalog Summary + +| File | Domains | Count | +|---|---|---| +| `rules-infrastructure.yml` | 1+2: Infrastructure & Deploy Quality | 10 | +| `rules-iracing.yml` | 3+7: iRacing Session + Replay | 10 | +| `rules-claude-sessions.yml` | 4: Claude Code Session Health | 7 | +| `rules-token-cost.yml` | 5: Token & Cost Budget | 7 | +| `rules-sentinel-health.yml` | 6: Sentinel Self-Health | 7 | +| `rules-cross-stream.yml` | 8: Cross-Stream Correlation | 5 | +| **Total** | | **46** | + +T2-tier alerts (skip `needs_t2` gate, escalate immediately): +`subagent-explosion`, `tool-loop-detected`, `session-cost-critical`, `daily-spend-critical`, +`ws-claude-coinflict`, `session-token-abandon`, `action-fail-session-fail`, `deploy-triple-signal` + +--- + +## PR Checklist Addition + +For any PR modifying plugin behavior, add to the review checklist: + +- [ ] Reviewed Grafana alert domains for impacted change type (see table above) +- [ ] If log events were renamed/removed: verified no alert queries silently break +- [ ] If new log events added: considered whether a new alert rule is warranted diff --git a/docs/superpowers/specs/2026-03-30-grafana-alerts-design.md b/docs/superpowers/specs/2026-03-30-grafana-alerts-design.md new file mode 100644 index 0000000..88c365c --- /dev/null +++ b/docs/superpowers/specs/2026-03-30-grafana-alerts-design.md @@ -0,0 +1,218 @@ +# Grafana Alerts Design — Log Sentinel Layer 0 +**Date:** 2026-03-30 +**Status:** Approved + +--- + +## Context + +The log-sentinel V2 LLM investigation pipeline (T1 triage + T2 agentic tool loop) is expensive to run continuously — qwen3:8b T1 scan on a 6700 XT takes 60-90 seconds, T2 takes 3-4 minutes. Running this on a fixed hourly poll means real incidents can sit undetected for up to 60 minutes, and the models waste cycles on quiet periods. + +Grafana Alerts solves this as **Layer 0**: always-on, no GPU cost, fires webhooks only when something is actually wrong. The sentinel switches from polling to event-driven. When Grafana fires, it delivers structured alert context (labels, values, timeframe) directly in the webhook payload — T1 skips cold-start gathering for the relevant domain and goes straight to targeted investigation. + +**Layer 0 (Grafana Alerts) → Layer 1 (T1 fast triage) → Layer 2 (T2 agentic tool loop)** + +--- + +## Alert Architecture + +### Transport: Webhook-Only +Grafana alert notifications route exclusively to log-sentinel's `/trigger` HTTP endpoint. No email, Slack, or PagerDuty at this stage. The sentinel logs every trigger, runs the appropriate tier, and emits findings to Loki (queryable by Grafana dashboards). + +### Provisioning Structure +All alerts are provisioned as code — no manual UI configuration: +``` +observability/local/grafana/provisioning/alerting/ + contact-points.yml # webhook endpoint definition + notification-policies.yml # routing: all alerts → webhook + rules-infrastructure.yml # Domains 1+2 + rules-iracing.yml # Domain 3+7 + rules-claude-sessions.yml # Domain 4 + rules-token-cost.yml # Domain 5 + rules-sentinel-health.yml # Domain 6 + rules-cross-stream.yml # Domain 8 +``` + +### Trigger Tier Labeling +Every alert rule carries a `trigger_tier` label (`t1` or `t2`). The sentinel's `/trigger` endpoint reads this label and routes accordingly — T1 for most alerts, T2 for critical multi-signal correlations. + +--- + +## Alert Catalog + +### Domain 1+2: Infrastructure & Deploy Quality (10 alerts) + +| Alert ID | LogQL / Condition | Severity | Tier | +|---|---|---|---| +| `bridge-start-failed` | `count_over_time({app="sim-steward"} \| json \| event="plugin_lifecycle" \| level="ERROR" [5m]) > 0` | critical | T1 | +| `plugin-never-ready` | plugin_lifecycle start, no ready within 60s | warn | T1 | +| `sentinel-cycle-stalled` | No `sentinel_cycle` event in 90 min | critical | T1 | +| `ollama-unreachable` | `sentinel_health` event with `ollama_reachable=false` | critical | T1 | +| `loki-circuit-open` | `sentinel_health` with `loki_circuit_open=true` | critical | T1 | +| `post-deploy-warn-rate` | WARN rate > 5/min in 10 min after lifecycle event | warn | T1 | +| `bridge-failure-post-deploy` | ERROR in sim-steward within 15 min of plugin_start | critical | T1 | +| `plugin-slow-start` | Time from plugin_lifecycle start → ready > 30s | warn | T1 | +| `error-spike-post-deploy` | Error count doubles vs prior 15 min window after deploy | warn | T1 | +| `error-spike-general` | `count_over_time({app="sim-steward"} \| json \| level="ERROR" [10m]) > 10` | warn | T1 | + +### Domain 3: iRacing Session Behavior (5 alerts) + +| Alert ID | Condition | Severity | Tier | +|---|---|---|---| +| `session-no-actions` | Session active 15+ min, zero `action_dispatched` events | warn | T1 | +| `session-no-end` | `iracing_session_start` with no `iracing_session_end` within 4h | warn | T1 | +| `action-failure-streak` | 3+ consecutive `action_result` errors in same session | critical | T1 | +| `websocket-disconnect-spike` | 3+ `websocket_disconnect` events in 5 min | warn | T1 | +| `incident-detection-zero` | iRacing session > 30 min, zero `iracing_incident` events | warn | T1 | + +### Domain 4: Claude Code Session Health (7 alerts) + +| Alert ID | Condition | Severity | Tier | +|---|---|---|---| +| `session-abandoned` | Session start, no completion token entry, no activity for 30 min | warn | T1 | +| `claude-error-spike` | 5+ ERROR entries in claude-dev-logging in 5 min | warn | T1 | +| `permission-flood` | 10+ permission-related log entries in 5 min | warn | T1 | +| `subagent-explosion` | Subagent spawn count > 20 in single session | warn | T2 | +| `mcp-service-errors` | MCP call failures > 5 in 10 min | warn | T1 | +| `tool-loop-detected` | Same tool called 5+ times in same session without progress | warn | T2 | +| `session-zero-output` | Session completes (token entry exists), zero assistant messages logged | warn | T1 | + +### Domain 5: Token/Cost Budget (7 alerts) + +| Alert ID | Condition | Severity | Tier | +|---|---|---|---| +| `session-cost-spike` | Single session cost > $1.00 | warn | T1 | +| `session-cost-critical` | Single session cost > $3.00 | critical | T2 | +| `daily-spend-warning` | Rolling 24h spend > $10.00 | warn | T1 | +| `daily-spend-critical` | Rolling 24h spend > $25.00 | critical | T2 | +| `tool-use-flood` | Tool calls per session > 100 | warn | T1 | +| `unexpected-model` | Model field not in approved set (claude-opus-4, claude-sonnet-4-6, etc.) | warn | T1 | +| `cache-hit-rate-low` | Cache hit rate < 20% over 1h (when sessions active) | info | T1 | + +### Domain 6: Sentinel Self-Health (7 alerts) + +| Alert ID | Condition | Severity | Tier | +|---|---|---|---| +| `sentinel-cycle-stalled` | No `sentinel_cycle` event in 90 min | critical | T1 | +| `detector-error-rate` | Detector errors > 3 in single cycle | warn | T1 | +| `t1-slow` | T1 inference duration > 120s | warn | T1 | +| `t2-slow` | T2 tool loop duration > 300s | warn | T1 | +| `sentry-flood` | Sentry-worthy findings > 5 in 1h | warn | T1 | +| `findings-flood` | Total findings > 20 in single cycle | warn | T1 | +| `zero-findings-48h` | No findings at all in 48h (system may be suppressing) | info | T1 | + +### Domain 7: Replay & Incident Investigation (5 alerts) + +| Alert ID | Condition | Severity | Tier | +|---|---|---|---| +| `replay-no-seeks` | Replay started, zero `iracing_replay_seek` in 5 min | warn | T1 | +| `incident-detection-stall` | iRacing session active > 30 min, zero `iracing_incident` events in replay mode | warn | T1 | +| `incident-camera-stuck` | Same `camera_view` on 3+ consecutive incidents | info | T1 | +| `replay-session-no-close` | Replay session start, no session_end within 2h | warn | T1 | +| `action-incident-gap` | Incident detected, no `action_dispatched` within 10 min | info | T1 | + +### Domain 8: Cross-Stream Correlation (5 alerts) +*Implemented as multi-query rules using Grafana `math` expressions — fires only when both conditions true simultaneously.* + +| Alert ID | Streams | Condition | Severity | Tier | +|---|---|---|---|---| +| `ws-claude-coinflict` | sim-steward + claude-dev-logging | WebSocket disconnect + Claude ERROR in same 5-min window | warn | T2 | +| `session-token-abandon` | claude-dev-logging + claude-token-metrics | Session ERROR + no token entry for that session_id | warn | T2 | +| `action-fail-session-fail` | sim-steward + claude-dev-logging | `action_result` errors + Claude session ERROR within 10 min | critical | T2 | +| `deploy-triple-signal` | all 3 streams | 2+ streams show elevated error rate within 15 min of plugin lifecycle event | critical | T2 | +| `cost-spike-tool-flood` | claude-dev-logging + claude-token-metrics | Tool call count spike + session cost spike in same cycle | warn | T1 | + +**Total: 46 alerts across 8 domains.** + +--- + +## `/trigger` Endpoint Design + +The log-sentinel app gains a new HTTP endpoint: + +``` +POST /trigger +Content-Type: application/json + +{ + "alerts": [{ + "labels": { + "alertname": "ws-claude-coinflict", + "trigger_tier": "t2", + "severity": "warn" + }, + "annotations": { + "summary": "WebSocket disconnects co-occurring with Claude errors", + "description": "3 ws_disconnect events and 2 Claude ERROR entries in 5-min window ending 14:32:00" + }, + "startsAt": "2026-03-30T14:32:00Z", + "endsAt": "0001-01-01T00:00:00Z" + }] +} +``` + +Sentinel behavior on receipt: +1. Parse alert labels — extract `alertname`, `trigger_tier`, `severity` +2. Derive lookback window from `startsAt` (default: 30 min before alert fired) +3. If `trigger_tier=t1`: run T1 with alert context injected into summary prompt +4. If `trigger_tier=t2`: run T1 (for context) then immediately run T2 — skip the `needs_t2` gate +5. Deduplicate: if the same `alertname` triggered within `SENTINEL_DEDUP_WINDOW_SEC`, skip +6. Log `sentinel_trigger` event to Loki with alert metadata + +Alert context injection into T1 prompt: +``` +ALERT CONTEXT (from Grafana): + Alert: ws-claude-coinflict (warn) + Fired: 2026-03-30 14:32:00 UTC + Description: 3 ws_disconnect events and 2 Claude ERROR entries in 5-min window + → Focus investigation on this signal. Do not suppress even if recent history is quiet. +``` + +--- + +## Alert Covenant (Living Document) + +**Every behavioral change to the plugin, dashboard, or LLM integration must include a corresponding Grafana alert review.** + +When adding or changing: +- A new action handler → check Domain 3 (action-failure-streak thresholds) +- A new Claude integration → check Domain 4 + 5 +- A new log event or field → check if it should trigger an alert in the relevant domain +- Removing a log event → check if any alert depends on it (alert will go silent, not fire) + +Alert silence ≠ alert passing. Test new alerts by writing a test event to Loki via the gateway and verifying the alert fires within its evaluation window. + +**Canonical reference: `docs/RULES-GrafanaAlerts.md`** (to be added to CLAUDE.md) + +--- + +## Implementation Files + +### New files +- `observability/local/grafana/provisioning/alerting/contact-points.yml` +- `observability/local/grafana/provisioning/alerting/notification-policies.yml` +- `observability/local/grafana/provisioning/alerting/rules-infrastructure.yml` +- `observability/local/grafana/provisioning/alerting/rules-iracing.yml` +- `observability/local/grafana/provisioning/alerting/rules-claude-sessions.yml` +- `observability/local/grafana/provisioning/alerting/rules-token-cost.yml` +- `observability/local/grafana/provisioning/alerting/rules-sentinel-health.yml` +- `observability/local/grafana/provisioning/alerting/rules-cross-stream.yml` +- `docs/RULES-GrafanaAlerts.md` + +### Modified files +- `observability/local/log-sentinel/app.py` — add `POST /trigger` endpoint +- `observability/local/log-sentinel/sentinel.py` — add `trigger_cycle()` method (alert-context-aware T1/T2 dispatch) +- `observability/local/log-sentinel/config.py` — no new fields needed (uses existing dedup window) +- `observability/local/docker-compose.yml` — no changes needed (grafana already provisioned, port 3000) +- `.claude/CLAUDE.md` — add alert covenant reference + +--- + +## Verification + +1. **Provisioning loads**: `docker compose up grafana` — check Grafana UI → Alerting → Alert Rules shows all 46 rules +2. **Webhook fires**: Manually set an alert rule to always-firing in Grafana UI, verify `/trigger` receives POST and logs `sentinel_trigger` event to Loki +3. **T1 trigger path**: Confirm T1 runs after a non-critical alert fires, `sentinel_analyst_run` appears in logs with `trigger_source=grafana_alert` +4. **T2 direct trigger**: Confirm T2 runs immediately (skipping `needs_t2` gate) when `trigger_tier=t2` alert fires +5. **Dedup**: Fire same alert twice within dedup window, verify second is silently skipped +6. **Cross-stream rule**: Write test events to both sim-steward and claude-dev-logging streams via Loki push API, verify `ws-claude-coinflict` fires diff --git a/observability/local/docker-compose.yml b/observability/local/docker-compose.yml index 8ea026a..6164380 100644 --- a/observability/local/docker-compose.yml +++ b/observability/local/docker-compose.yml @@ -74,12 +74,12 @@ services: - GRAFANA_USER=${GRAFANA_ADMIN_USER:-admin} - GRAFANA_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin} - OLLAMA_URL=http://host.docker.internal:11434 - - OLLAMA_MODEL_FAST=deepseek-r1:8b - - OLLAMA_MODEL_DEEP=llama3.3:70b-instruct-q4_K_M - - SENTINEL_POLL_INTERVAL_SEC=60 - - SENTINEL_LOOKBACK_SEC=300 + - OLLAMA_MODEL_FAST=qwen3:8b + - OLLAMA_MODEL_DEEP=qwen3:32b + - SENTINEL_POLL_INTERVAL_SEC=3600 + - SENTINEL_LOOKBACK_SEC=3600 - SENTINEL_T2_ENABLED=true - - SENTINEL_T2_PROACTIVE_INTERVAL_SEC=300 + - SENTINEL_T2_PROACTIVE_INTERVAL_SEC=3600 - SENTINEL_DEDUP_WINDOW_SEC=300 - SENTINEL_SENTRY_DSN=${SENTINEL_SENTRY_DSN:-} - SIMSTEWARD_LOG_ENV=${SIMSTEWARD_LOG_ENV:-local} diff --git a/observability/local/grafana/provisioning/alerting/contact-points.yml b/observability/local/grafana/provisioning/alerting/contact-points.yml new file mode 100644 index 0000000..4414ef7 --- /dev/null +++ b/observability/local/grafana/provisioning/alerting/contact-points.yml @@ -0,0 +1,12 @@ +apiVersion: 1 + +contactPoints: + - orgId: 1 + name: log-sentinel-webhook + receivers: + - uid: log-sentinel-webhook-recv + type: webhook + settings: + url: http://log-sentinel:8081/trigger + httpMethod: POST + disableResolveMessage: true diff --git a/observability/local/grafana/provisioning/alerting/notification-policies.yml b/observability/local/grafana/provisioning/alerting/notification-policies.yml new file mode 100644 index 0000000..f1d6e22 --- /dev/null +++ b/observability/local/grafana/provisioning/alerting/notification-policies.yml @@ -0,0 +1,10 @@ +apiVersion: 1 + +policies: + - orgId: 1 + receiver: log-sentinel-webhook + group_by: ['alertname'] + group_wait: 0s + group_interval: 1m + repeat_interval: 4h + routes: [] diff --git a/observability/local/grafana/provisioning/alerting/rules-claude-sessions.yml b/observability/local/grafana/provisioning/alerting/rules-claude-sessions.yml new file mode 100644 index 0000000..0e03d97 --- /dev/null +++ b/observability/local/grafana/provisioning/alerting/rules-claude-sessions.yml @@ -0,0 +1,246 @@ +apiVersion: 1 + +groups: + - orgId: 1 + name: Claude Code Session Health + folder: Log Sentinel + interval: 1m + rules: + + - uid: session-abandoned + title: Session Abandoned + condition: B + data: + - refId: A + relativeTimeRange: { from: 1800, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-dev-logging"} | json | level="ERROR" [30m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 30m + annotations: + summary: Claude session started but no activity for 30 minutes + description: Session start detected with no activity, errors, or completion events for 30 minutes + labels: + alertname: session-abandoned + severity: warn + trigger_tier: t1 + + - uid: claude-error-spike + title: Claude Error Spike + condition: B + data: + - refId: A + relativeTimeRange: { from: 300, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-dev-logging"} | json | level="ERROR" [5m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [4], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: 5+ ERROR entries in claude-dev-logging in 5 minutes + description: Elevated error rate in Claude session logging — possible API or tool failure + labels: + alertname: claude-error-spike + severity: warn + trigger_tier: t1 + + - uid: permission-flood + title: Permission Flood + condition: B + data: + - refId: A + relativeTimeRange: { from: 300, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-dev-logging"} | json | message=~"(?i).*permission.*" [5m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [9], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: 10+ permission-related log entries in 5 minutes + description: Possible permission configuration problem or tool permission loop + labels: + alertname: permission-flood + severity: warn + trigger_tier: t1 + + - uid: subagent-explosion + title: Subagent Explosion + condition: B + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-dev-logging"} | json | message=~"(?i).*subagent.*spawn.*" [60m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [19], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Subagent spawn count exceeded 20 in single session + description: Unusually high subagent spawning — possible recursive agent loop or over-parallelization + labels: + alertname: subagent-explosion + severity: warn + trigger_tier: t2 + + - uid: mcp-service-errors + title: MCP Service Errors + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-dev-logging"} | json | message=~"(?i).*mcp.*error.*|.*error.*mcp.*" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [4], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: 5+ MCP call failures in 10 minutes + description: MCP server appears to be failing — multiple call errors detected + labels: + alertname: mcp-service-errors + severity: warn + trigger_tier: t1 + + - uid: tool-loop-detected + title: Tool Loop Detected + condition: B + data: + - refId: A + relativeTimeRange: { from: 1800, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-dev-logging"} | json | level="WARN" | message=~"(?i).*tool.*loop.*|.*repeated.*tool.*" [30m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Same tool called 5+ times in session without progress + description: Possible stuck agent — repeated tool invocations without forward progress + labels: + alertname: tool-loop-detected + severity: warn + trigger_tier: t2 + + - uid: session-zero-output + title: Session Zero Output + condition: B + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-token-metrics"} | json [60m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Session completed with token entry but zero assistant messages logged + description: Session appears to have run but produced no output — possible silent failure + labels: + alertname: session-zero-output + severity: warn + trigger_tier: t1 diff --git a/observability/local/grafana/provisioning/alerting/rules-cross-stream.yml b/observability/local/grafana/provisioning/alerting/rules-cross-stream.yml new file mode 100644 index 0000000..3c63a78 --- /dev/null +++ b/observability/local/grafana/provisioning/alerting/rules-cross-stream.yml @@ -0,0 +1,267 @@ +apiVersion: 1 + +groups: + - orgId: 1 + name: Cross-Stream Correlation + folder: Log Sentinel + interval: 1m + rules: + + # ws-claude-coinflict: WebSocket disconnect + Claude ERROR in same 5-min window + - uid: ws-claude-coinflict + title: WebSocket + Claude Error Conflict + condition: D + data: + - refId: A + relativeTimeRange: { from: 300, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="websocket_disconnect" [5m])' + instant: true + refId: A + - refId: B + relativeTimeRange: { from: 300, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-dev-logging"} | json | level="ERROR" [5m])' + instant: true + refId: B + - refId: C + datasourceUid: __expr__ + model: + type: math + expression: "$A > 0 && $B > 0" + refId: C + - refId: D + datasourceUid: __expr__ + model: + type: classic_conditions + refId: D + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [C] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: WebSocket disconnects co-occurring with Claude errors + description: WebSocket disconnect and Claude ERROR events detected in the same 5-minute window + labels: + alertname: ws-claude-coinflict + severity: warn + trigger_tier: t2 + + # session-token-abandon: Claude session ERROR + no token entry for that session + - uid: session-token-abandon + title: Session Error Without Token Entry + condition: D + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-dev-logging"} | json | level="ERROR" [1h])' + instant: true + refId: A + - refId: B + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-token-metrics"} | json [1h])' + instant: true + refId: B + - refId: C + datasourceUid: __expr__ + model: + type: math + expression: "$A > 0 && $B == 0" + refId: C + - refId: D + datasourceUid: __expr__ + model: + type: classic_conditions + refId: D + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [C] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 5m + annotations: + summary: Session ERROR entries with no corresponding token metrics + description: Claude session errors present but no token/cost entry — session may have been abandoned or crashed before completion + labels: + alertname: session-token-abandon + severity: warn + trigger_tier: t2 + + # action-fail-session-fail: action_result errors + Claude session ERROR within 10 min + - uid: action-fail-session-fail + title: Action Failure + Session Failure + condition: D + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="action_result" | level="ERROR" [10m])' + instant: true + refId: A + - refId: B + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-dev-logging"} | json | level="ERROR" [10m])' + instant: true + refId: B + - refId: C + datasourceUid: __expr__ + model: + type: math + expression: "$A > 0 && $B > 0" + refId: C + - refId: D + datasourceUid: __expr__ + model: + type: classic_conditions + refId: D + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [C] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Plugin action failures coinciding with Claude session errors + description: action_result errors and Claude session errors detected within the same 10-minute window — possible cascading failure + labels: + alertname: action-fail-session-fail + severity: critical + trigger_tier: t2 + + # deploy-triple-signal: 2+ streams elevated error rate within 15 min of plugin lifecycle event + - uid: deploy-triple-signal + title: Deploy Triple Signal + condition: E + data: + - refId: A + relativeTimeRange: { from: 900, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | level="ERROR" [15m])' + instant: true + refId: A + - refId: B + relativeTimeRange: { from: 900, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-dev-logging"} | json | level="ERROR" [15m])' + instant: true + refId: B + - refId: C + relativeTimeRange: { from: 900, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="plugin_lifecycle" [15m])' + instant: true + refId: C + - refId: D + datasourceUid: __expr__ + model: + type: math + expression: "($A > 5 ? 1 : 0) + ($B > 5 ? 1 : 0) + ($C > 0 ? 1 : 0)" + refId: D + - refId: E + datasourceUid: __expr__ + model: + type: classic_conditions + refId: E + conditions: + - evaluator: { params: [1], type: gt } + operator: { type: and } + query: { params: [D] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Multiple streams showing elevated errors within 15 min of deploy + description: Deploy triple signal — plugin lifecycle event plus 2+ streams with elevated error rates + labels: + alertname: deploy-triple-signal + severity: critical + trigger_tier: t2 + + # cost-spike-tool-flood: Tool call count spike + session cost spike in same cycle + - uid: cost-spike-tool-flood + title: Cost Spike + Tool Flood + condition: D + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'max_over_time({app="claude-token-metrics"} | json | unwrap cost_usd [1h])' + instant: true + refId: A + - refId: B + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'max_over_time({app="claude-token-metrics"} | json | unwrap tool_calls [1h])' + instant: true + refId: B + - refId: C + datasourceUid: __expr__ + model: + type: math + expression: "$A > 0.5 && $B > 50" + refId: C + - refId: D + datasourceUid: __expr__ + model: + type: classic_conditions + refId: D + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [C] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: High tool call count coinciding with elevated session cost + description: Tool use flood and cost spike occurring together — likely agentic loop with real cost impact + labels: + alertname: cost-spike-tool-flood + severity: warn + trigger_tier: t1 diff --git a/observability/local/grafana/provisioning/alerting/rules-infrastructure.yml b/observability/local/grafana/provisioning/alerting/rules-infrastructure.yml new file mode 100644 index 0000000..d5e471a --- /dev/null +++ b/observability/local/grafana/provisioning/alerting/rules-infrastructure.yml @@ -0,0 +1,348 @@ +apiVersion: 1 + +groups: + - orgId: 1 + name: Infrastructure & Deploy Quality + folder: Log Sentinel + interval: 1m + rules: + + - uid: bridge-start-failed + title: Bridge Start Failed + condition: B + data: + - refId: A + relativeTimeRange: { from: 300, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="plugin_lifecycle" | level="ERROR" [5m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Plugin lifecycle ERROR on bridge start + description: A plugin_lifecycle ERROR event was detected in the last 5 minutes + labels: + alertname: bridge-start-failed + severity: critical + trigger_tier: t1 + + - uid: plugin-never-ready + title: Plugin Never Ready + condition: B + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="plugin_lifecycle" | message=~".*ready.*" [60m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 1m + annotations: + summary: Plugin started but never reached ready state + description: Plugin lifecycle start event exists but no ready event within 60 minutes + labels: + alertname: plugin-never-ready + severity: warn + trigger_tier: t1 + + - uid: post-deploy-warn-rate + title: High WARN Rate After Deploy + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | level="WARN" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [50], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 2m + annotations: + summary: Elevated WARN rate after deploy + description: More than 50 WARN entries in 10 minutes following a lifecycle event + labels: + alertname: post-deploy-warn-rate + severity: warn + trigger_tier: t1 + + - uid: bridge-failure-post-deploy + title: Bridge ERROR After Deploy + condition: B + data: + - refId: A + relativeTimeRange: { from: 900, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | level="ERROR" [15m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: ERROR in sim-steward within 15 min of plugin start + description: Bridge ERROR detected shortly after deploy — may indicate startup regression + labels: + alertname: bridge-failure-post-deploy + severity: critical + trigger_tier: t1 + + - uid: plugin-slow-start + title: Plugin Slow Start + condition: B + data: + - refId: A + relativeTimeRange: { from: 300, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="plugin_lifecycle" | message=~".*start_duration.*" | __error__="" [5m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Plugin startup exceeded 30s threshold + description: Time from plugin_lifecycle start to ready exceeded 30 seconds + labels: + alertname: plugin-slow-start + severity: warn + trigger_tier: t1 + + - uid: error-spike-post-deploy + title: Error Spike After Deploy + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | level="ERROR" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [5], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 2m + annotations: + summary: Error count doubled vs prior window after deploy + description: Error spike detected in 10-minute window following deploy event + labels: + alertname: error-spike-post-deploy + severity: warn + trigger_tier: t1 + + - uid: error-spike-general + title: General Error Spike + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | level="ERROR" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [10], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 2m + annotations: + summary: More than 10 ERROR logs in 10-minute window + description: General error spike detected — not necessarily deploy-related + labels: + alertname: error-spike-general + severity: warn + trigger_tier: t1 + + - uid: ollama-unreachable + title: Ollama Unreachable + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward", component="log-sentinel"} | json | event="sentinel_health" | ollama_reachable="false" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Ollama is not reachable from log-sentinel + description: sentinel_health event recorded ollama_reachable=false + labels: + alertname: ollama-unreachable + severity: critical + trigger_tier: t1 + + - uid: loki-circuit-open + title: Loki Circuit Breaker Open + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward", component="log-sentinel"} | json | event="sentinel_health" | loki_circuit_open="true" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Loki circuit breaker is open in log-sentinel + description: sentinel_health event recorded loki_circuit_open=true — Loki queries are failing + labels: + alertname: loki-circuit-open + severity: critical + trigger_tier: t1 + + - uid: sentinel-cycle-stalled + title: Sentinel Cycle Stalled + condition: B + data: + - refId: A + relativeTimeRange: { from: 5400, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward", component="log-sentinel"} | json | event="sentinel_cycle" [90m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 5m + annotations: + summary: No sentinel_cycle event in 90 minutes + description: Log sentinel appears to be stalled — no analysis cycles have completed + labels: + alertname: sentinel-cycle-stalled + severity: critical + trigger_tier: t1 diff --git a/observability/local/grafana/provisioning/alerting/rules-iracing.yml b/observability/local/grafana/provisioning/alerting/rules-iracing.yml new file mode 100644 index 0000000..39b3ab4 --- /dev/null +++ b/observability/local/grafana/provisioning/alerting/rules-iracing.yml @@ -0,0 +1,354 @@ +apiVersion: 1 + +groups: + - orgId: 1 + name: iRacing Session Behavior + folder: Log Sentinel + interval: 1m + rules: + + - uid: session-no-actions + title: Session No Actions + condition: B + data: + - refId: A + relativeTimeRange: { from: 900, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="action_dispatched" [15m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 15m + annotations: + summary: iRacing session active with zero action_dispatched events + description: Session has been active 15+ minutes with no user actions dispatched + labels: + alertname: session-no-actions + severity: warn + trigger_tier: t1 + + - uid: action-failure-streak + title: Action Failure Streak + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="action_result" | level="ERROR" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [2], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: 3+ consecutive action_result errors in session + description: Multiple consecutive action failures detected — possible stuck state or feature regression + labels: + alertname: action-failure-streak + severity: critical + trigger_tier: t1 + + - uid: websocket-disconnect-spike + title: WebSocket Disconnect Spike + condition: B + data: + - refId: A + relativeTimeRange: { from: 300, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="websocket_disconnect" [5m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [2], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: 3+ WebSocket disconnects in 5 minutes + description: Elevated WebSocket disconnect rate detected — dashboard connectivity unstable + labels: + alertname: websocket-disconnect-spike + severity: warn + trigger_tier: t1 + + - uid: incident-detection-zero + title: Incident Detection Zero + condition: B + data: + - refId: A + relativeTimeRange: { from: 1800, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="iracing_incident" [30m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 30m + annotations: + summary: iRacing session active 30+ min with zero incident events + description: Incident detection may be broken — no iracing_incident events despite active session + labels: + alertname: incident-detection-zero + severity: warn + trigger_tier: t1 + + - uid: session-no-end + title: Session No End Event + condition: B + data: + - refId: A + relativeTimeRange: { from: 14400, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="iracing_session_end" [4h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 4h + annotations: + summary: iracing_session_start with no iracing_session_end within 4 hours + description: Session end event not received — possible session lifecycle tracking failure + labels: + alertname: session-no-end + severity: warn + trigger_tier: t1 + + - orgId: 1 + name: Replay & Incident Investigation + folder: Log Sentinel + interval: 1m + rules: + + - uid: replay-no-seeks + title: Replay No Seeks + condition: B + data: + - refId: A + relativeTimeRange: { from: 300, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="iracing_replay_seek" [5m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 5m + annotations: + summary: Replay started but no seek events in 5 minutes + description: Replay mode active with zero iracing_replay_seek events — may indicate broken replay controls + labels: + alertname: replay-no-seeks + severity: warn + trigger_tier: t1 + + - uid: incident-detection-stall + title: Incident Detection Stall in Replay + condition: B + data: + - refId: A + relativeTimeRange: { from: 1800, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="iracing_incident" [30m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 30m + annotations: + summary: Replay session active 30+ min with zero incident events + description: No incidents detected during replay — detector may be broken in replay mode + labels: + alertname: incident-detection-stall + severity: warn + trigger_tier: t1 + + - uid: incident-camera-stuck + title: Incident Camera Stuck + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="iracing_incident" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [2], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Same camera_view on 3+ consecutive incidents + description: Camera may be stuck — same camera_view repeated across multiple incident events + labels: + alertname: incident-camera-stuck + severity: info + trigger_tier: t1 + + - uid: replay-session-no-close + title: Replay Session No Close + condition: B + data: + - refId: A + relativeTimeRange: { from: 7200, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="iracing_session_end" | mode="replay" [2h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 2h + annotations: + summary: Replay session start with no session_end within 2 hours + description: Replay session lifecycle may be broken — no session end event received + labels: + alertname: replay-session-no-close + severity: warn + trigger_tier: t1 + + - uid: action-incident-gap + title: Action-Incident Gap + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward"} | json | event="iracing_incident" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 10m + annotations: + summary: Incident detected, no action_dispatched within 10 minutes + description: User may not have reviewed the incident — no action followed the incident event + labels: + alertname: action-incident-gap + severity: info + trigger_tier: t1 diff --git a/observability/local/grafana/provisioning/alerting/rules-sentinel-health.yml b/observability/local/grafana/provisioning/alerting/rules-sentinel-health.yml new file mode 100644 index 0000000..6488a25 --- /dev/null +++ b/observability/local/grafana/provisioning/alerting/rules-sentinel-health.yml @@ -0,0 +1,246 @@ +apiVersion: 1 + +groups: + - orgId: 1 + name: Sentinel Self-Health + folder: Log Sentinel + interval: 1m + rules: + + - uid: sentinel-stalled + title: Sentinel Cycle Stalled (Health) + condition: B + data: + - refId: A + relativeTimeRange: { from: 5400, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward", component="log-sentinel"} | json | event="sentinel_cycle" [90m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 5m + annotations: + summary: No sentinel_cycle event in 90 minutes + description: Log sentinel appears stalled — no completed analysis cycles + labels: + alertname: sentinel-cycle-stalled-health + severity: critical + trigger_tier: t1 + + - uid: detector-error-rate + title: Detector Error Rate + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward", component="log-sentinel"} | json | event="sentinel_analyst_run" | level="ERROR" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [2], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Analyst run errors exceeding 3 in a single cycle + description: Multiple analysis errors detected — Ollama or Loki connectivity may be failing + labels: + alertname: detector-error-rate + severity: warn + trigger_tier: t1 + + - uid: t1-slow + title: T1 Inference Slow + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'max_over_time({app="sim-steward", component="log-sentinel"} | json | event="sentinel_analyst_run" | tier="t1" | unwrap duration_ms [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [120000], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: T1 inference duration exceeded 120 seconds + description: T1 triage is running slower than expected — GPU may be under load or model is too large + labels: + alertname: t1-slow + severity: warn + trigger_tier: t1 + + - uid: t2-slow + title: T2 Inference Slow + condition: B + data: + - refId: A + relativeTimeRange: { from: 1800, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'max_over_time({app="sim-steward", component="log-sentinel"} | json | event="sentinel_t2_investigation" | unwrap inference_duration_ms [30m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [300000], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: T2 inference duration exceeded 300 seconds + description: T2 investigation is taking too long — deep model may be under heavy load + labels: + alertname: t2-slow + severity: warn + trigger_tier: t1 + + - uid: sentry-flood + title: Sentry Flood + condition: B + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward", component="log-sentinel"} | json | event="sentinel_sentry_issue" [1h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [4], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: More than 5 Sentry-worthy findings in 1 hour + description: Sentinel is creating too many Sentry issues — possible false positive storm or real incident + labels: + alertname: sentry-flood + severity: warn + trigger_tier: t1 + + - uid: findings-flood + title: Findings Flood + condition: B + data: + - refId: A + relativeTimeRange: { from: 600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward", component="log-sentinel"} | json | event="sentinel_finding" [10m])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [19], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: More than 20 findings in a single cycle + description: Finding flood detected — sentinel may be over-sensitive or a real incident is occurring + labels: + alertname: findings-flood + severity: warn + trigger_tier: t1 + + - uid: zero-findings-48h + title: Zero Findings 48h + condition: B + data: + - refId: A + relativeTimeRange: { from: 172800, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="sim-steward", component="log-sentinel"} | json | event="sentinel_finding" [48h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: Alerting + execErrState: Error + for: 1h + annotations: + summary: No sentinel findings at all in 48 hours + description: System may be suppressing findings or the sentinel is not running correctly + labels: + alertname: zero-findings-48h + severity: info + trigger_tier: t1 diff --git a/observability/local/grafana/provisioning/alerting/rules-token-cost.yml b/observability/local/grafana/provisioning/alerting/rules-token-cost.yml new file mode 100644 index 0000000..b2509ba --- /dev/null +++ b/observability/local/grafana/provisioning/alerting/rules-token-cost.yml @@ -0,0 +1,246 @@ +apiVersion: 1 + +groups: + - orgId: 1 + name: Token & Cost Budget + folder: Log Sentinel + interval: 1m + rules: + + - uid: session-cost-spike + title: Session Cost Spike + condition: B + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'max_over_time({app="claude-token-metrics"} | json | unwrap cost_usd [1h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [1.0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Single session cost exceeded $1.00 + description: A Claude session cost more than $1.00 — review for efficiency + labels: + alertname: session-cost-spike + severity: warn + trigger_tier: t1 + + - uid: session-cost-critical + title: Session Cost Critical + condition: B + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'max_over_time({app="claude-token-metrics"} | json | unwrap cost_usd [1h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [3.0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Single session cost exceeded $3.00 + description: Critical cost threshold exceeded — session may be in a runaway loop + labels: + alertname: session-cost-critical + severity: critical + trigger_tier: t2 + + - uid: daily-spend-warning + title: Daily Spend Warning + condition: B + data: + - refId: A + relativeTimeRange: { from: 86400, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'sum_over_time({app="claude-token-metrics"} | json | unwrap cost_usd [24h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [10.0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Rolling 24h spend exceeded $10.00 + description: Daily spend warning threshold hit — review recent session costs + labels: + alertname: daily-spend-warning + severity: warn + trigger_tier: t1 + + - uid: daily-spend-critical + title: Daily Spend Critical + condition: B + data: + - refId: A + relativeTimeRange: { from: 86400, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'sum_over_time({app="claude-token-metrics"} | json | unwrap cost_usd [24h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [25.0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Rolling 24h spend exceeded $25.00 + description: Critical daily spend threshold hit — immediate review required + labels: + alertname: daily-spend-critical + severity: critical + trigger_tier: t2 + + - uid: tool-use-flood + title: Tool Use Flood + condition: B + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'max_over_time({app="claude-token-metrics"} | json | unwrap tool_calls [1h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [100], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Tool calls per session exceeded 100 + description: Unusually high tool call count — possible agentic loop or over-tooling + labels: + alertname: tool-use-flood + severity: warn + trigger_tier: t1 + + - uid: unexpected-model + title: Unexpected Model Used + condition: B + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'count_over_time({app="claude-token-metrics"} | json | model!~"claude-opus-4.*|claude-sonnet-4.*|claude-haiku-4.*" [1h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0], type: gt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 0s + annotations: + summary: Unexpected model name in token metrics + description: A model field value outside the approved set was detected in claude-token-metrics + labels: + alertname: unexpected-model + severity: warn + trigger_tier: t1 + + - uid: cache-hit-rate-low + title: Cache Hit Rate Low + condition: B + data: + - refId: A + relativeTimeRange: { from: 3600, to: 0 } + datasourceUid: loki_local + model: + datasource: { type: loki, uid: loki_local } + editorMode: code + expr: 'avg_over_time({app="claude-token-metrics"} | json | unwrap cache_read_ratio [1h])' + instant: true + refId: A + - refId: B + datasourceUid: __expr__ + model: + type: classic_conditions + refId: B + conditions: + - evaluator: { params: [0.2], type: lt } + operator: { type: and } + query: { params: [A] } + reducer: { type: last } + noDataState: OK + execErrState: Error + for: 15m + annotations: + summary: Cache hit rate below 20% over 1 hour + description: Low cache read ratio — context caching may be misconfigured or inactive + labels: + alertname: cache-hit-rate-low + severity: info + trigger_tier: t1 diff --git a/observability/local/grafana/provisioning/dashboards/claude-cache-context.json b/observability/local/grafana/provisioning/dashboards/claude-cache-context.json index 71ce6d8..4355693 100644 --- a/observability/local/grafana/provisioning/dashboards/claude-cache-context.json +++ b/observability/local/grafana/provisioning/dashboards/claude-cache-context.json @@ -23,6 +23,26 @@ "style": "dark", "templating": { "list": [ + { + "name": "session_id", + "label": "Session", + "type": "query", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "query": "{app=\"claude-token-metrics\"} | json session_id", + "regex": "session_id\":\"([^\"]+)", + "refresh": 2, + "includeAll": true, + "multi": false, + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "sort": 2 + }, { "name": "model", "label": "Model", @@ -94,7 +114,8 @@ }, { "id": 1, - "title": "Cache Hit Rate", + "title": "Context from Cache", + "description": "% of all context tokens served from cache. High values (>90%) are expected — Claude Code reuses a large context window across turns. This is NOT a per-request hit/miss rate; every turn hits the cache.", "type": "gauge", "gridPos": { "x": 0, "y": 1, "w": 6, "h": 5 }, "datasource": { "type": "loki", "uid": "loki_local" }, @@ -102,7 +123,7 @@ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | session_id=~\"$session_id\" | unwrap total_cache_read_tokens [$__interval]))", "legendFormat": "Cache Read", "queryType": "range", "hide": true @@ -110,7 +131,15 @@ { "refId": "B", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | session_id=~\"$session_id\" | unwrap total_cache_creation_tokens [$__interval]))", + "legendFormat": "Cache Creation", + "queryType": "range", + "hide": true + }, + { + "refId": "D", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | session_id=~\"$session_id\" | unwrap total_input_tokens [$__interval]))", "legendFormat": "Input", "queryType": "range", "hide": true @@ -119,7 +148,7 @@ "refId": "C", "datasource": { "type": "__expr__", "uid": "__expr__" }, "type": "math", - "expression": "$A / ($A + $B) * 100", + "expression": "$A / ($A + $B + $D) * 100", "hide": false } ], @@ -162,14 +191,14 @@ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__interval]))", "legendFormat": "Cache Read", "queryType": "range" } ], "options": { "colorMode": "background-gradient", - "graphMode": "none", + "graphMode": "area", "textMode": "value", "justifyMode": "center", "orientation": "auto", @@ -183,6 +212,8 @@ "fieldConfig": { "defaults": { "noValue": "0", + "unit": "short", + "decimals": 1, "color": { "mode": "fixed", "fixedColor": "#B877D9" }, "thresholds": { "mode": "absolute", @@ -202,14 +233,14 @@ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__interval]))", "legendFormat": "Cache Creation", "queryType": "range" } ], "options": { "colorMode": "background-gradient", - "graphMode": "none", + "graphMode": "area", "textMode": "value", "justifyMode": "center", "orientation": "auto", @@ -223,6 +254,8 @@ "fieldConfig": { "defaults": { "noValue": "0", + "unit": "short", + "decimals": 1, "color": { "mode": "fixed", "fixedColor": "#FF9830" }, "thresholds": { "mode": "absolute", @@ -234,7 +267,7 @@ }, { "id": 4, - "title": "Write:Read Ratio", + "title": "Cache Reuse Ratio", "type": "stat", "gridPos": { "x": 18, "y": 1, "w": 6, "h": 5 }, "datasource": { "type": "loki", "uid": "loki_local" }, @@ -242,7 +275,7 @@ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | session_id=~\"$session_id\" | unwrap total_cache_creation_tokens [$__interval]))", "legendFormat": "Creation", "queryType": "range", "hide": true @@ -250,7 +283,7 @@ { "refId": "B", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | session_id=~\"$session_id\" | unwrap total_cache_read_tokens [$__interval]))", "legendFormat": "Read", "queryType": "range", "hide": true @@ -259,13 +292,13 @@ "refId": "C", "datasource": { "type": "__expr__", "uid": "__expr__" }, "type": "math", - "expression": "$A / $B", + "expression": "$B / $A", "hide": false } ], "options": { "colorMode": "background-gradient", - "graphMode": "none", + "graphMode": "area", "textMode": "value", "justifyMode": "center", "orientation": "auto", @@ -279,14 +312,14 @@ "fieldConfig": { "defaults": { "noValue": "0", - "decimals": 2, + "decimals": 1, "color": { "mode": "thresholds" }, "thresholds": { "mode": "absolute", "steps": [ - { "value": null, "color": "#73BF69" }, - { "value": 1, "color": "#FF9830" }, - { "value": 2, "color": "#F2495C" } + { "value": null, "color": "#F2495C" }, + { "value": 2, "color": "#FF9830" }, + { "value": 5, "color": "#73BF69" } ] } }, @@ -301,7 +334,7 @@ }, { "id": 5, - "title": "Cache Hit Rate Trend", + "title": "Context from Cache Trend", "type": "timeseries", "gridPos": { "x": 0, "y": 7, "w": 24, "h": 8 }, "datasource": { "type": "loki", "uid": "loki_local" }, @@ -309,25 +342,9 @@ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (session_id) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__interval]))", - "legendFormat": "Cache Read — {{session_id}}", - "queryType": "range", - "hide": true - }, - { - "refId": "B", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum by (session_id) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__interval]))", - "legendFormat": "Input — {{session_id}}", - "queryType": "range", - "hide": true - }, - { - "refId": "C", - "datasource": { "type": "__expr__", "uid": "__expr__" }, - "type": "math", - "expression": "$A / ($A + $B) * 100", - "hide": false + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | session_id=~\"$session_id\" | unwrap total_cache_read_tokens [$__interval])) / (sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | session_id=~\"$session_id\" | unwrap total_cache_read_tokens [$__interval])) + sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | session_id=~\"$session_id\" | unwrap total_cache_creation_tokens [$__interval])) + sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, session_id, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | session_id=~\"$session_id\" | unwrap total_input_tokens [$__interval]))) * 100", + "legendFormat": "Context from Cache %", + "queryType": "range" } ], "options": { @@ -342,11 +359,14 @@ "noValue": "0", "color": { "mode": "palette-classic" }, "custom": { - "drawStyle": "points", - "pointSize": 6, - "lineWidth": 0, - "fillOpacity": 0, - "showPoints": "always", + "drawStyle": "line", + "lineInterpolation": "smooth", + "gradientMode": "opacity", + "lineWidth": 2, + "fillOpacity": 18, + "pointSize": 5, + "showPoints": "auto", + "spanNulls": 3600000, "axisLabel": "Hit Rate %", "thresholdsStyle": { "mode": "line" @@ -371,7 +391,7 @@ }, { "id": 6, - "title": "Cache Hit Rate by Model", + "title": "Context from Cache by Model", "type": "barchart", "gridPos": { "x": 0, "y": 16, "w": 12, "h": 8 }, "datasource": { "type": "loki", "uid": "loki_local" }, @@ -379,25 +399,9 @@ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "avg by (model) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", - "legendFormat": "Cache Read — {{model}}", - "queryType": "range", - "hide": true - }, - { - "refId": "B", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "avg by (model) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", - "legendFormat": "Input — {{model}}", - "queryType": "range", - "hide": true - }, - { - "refId": "C", - "datasource": { "type": "__expr__", "uid": "__expr__" }, - "type": "math", - "expression": "$A / ($A + $B) * 100", - "hide": false + "expr": "sum by (model) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range])) / (sum by (model) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range])) + sum by (model) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range])) + sum by (model) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))) * 100", + "legendFormat": "{{model}}", + "queryType": "instant" } ], "options": { @@ -411,6 +415,7 @@ "fieldConfig": { "defaults": { "unit": "percent", + "decimals": 1, "min": 0, "max": 100, "noValue": "0", @@ -429,7 +434,7 @@ }, { "id": 7, - "title": "Cache Hit Rate by Effort", + "title": "Context from Cache by Effort", "type": "barchart", "gridPos": { "x": 12, "y": 16, "w": 12, "h": 8 }, "datasource": { "type": "loki", "uid": "loki_local" }, @@ -437,25 +442,9 @@ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "avg by (effort) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", - "legendFormat": "Cache Read — {{effort}}", - "queryType": "range", - "hide": true - }, - { - "refId": "B", - "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "avg by (effort) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", - "legendFormat": "Input — {{effort}}", - "queryType": "range", - "hide": true - }, - { - "refId": "C", - "datasource": { "type": "__expr__", "uid": "__expr__" }, - "type": "math", - "expression": "$A / ($A + $B) * 100", - "hide": false + "expr": "sum by (effort) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range])) / (sum by (effort) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range])) + sum by (effort) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range])) + sum by (effort) (sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))) * 100", + "legendFormat": "{{effort}}", + "queryType": "instant" } ], "options": { @@ -469,6 +458,7 @@ "fieldConfig": { "defaults": { "unit": "percent", + "decimals": 1, "min": 0, "max": 100, "noValue": "0", @@ -501,14 +491,14 @@ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json hook_type, model, project, effort | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | hook_type=\"pre-compact\" [$__range]))", + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json hook_type, model, project, effort | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | hook_type=\"pre-compact\" [$__interval]))", "legendFormat": "Compactions", "queryType": "range" } ], "options": { "colorMode": "background-gradient", - "graphMode": "none", + "graphMode": "area", "textMode": "value", "justifyMode": "center", "orientation": "auto", @@ -522,6 +512,7 @@ "fieldConfig": { "defaults": { "noValue": "0", + "decimals": 0, "color": { "mode": "thresholds" }, "thresholds": { "mode": "absolute", @@ -556,14 +547,17 @@ "fieldConfig": { "defaults": { "noValue": "0", + "decimals": 0, "color": { "mode": "fixed", "fixedColor": "#FF9830" }, "custom": { - "drawStyle": "bars", - "barAlignment": 0, - "fillOpacity": 60, - "lineWidth": 1, + "drawStyle": "line", + "lineInterpolation": "smooth", + "gradientMode": "opacity", + "fillOpacity": 25, + "lineWidth": 2, "pointSize": 5, - "showPoints": "never", + "showPoints": "auto", + "spanNulls": 3600000, "axisLabel": "Compactions" }, "thresholds": { @@ -587,21 +581,37 @@ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "avg(sum_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json hook_type, model, project, effort, compaction_count, assistant_turns | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | hook_type=\"pre-compact\" | compaction_count > 0 | unwrap assistant_turns [$__range]))", - "legendFormat": "Avg Turns", - "queryType": "range" + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json project | project=~\"$project\" [$__range]))", + "legendFormat": "Total Turns", + "queryType": "range", + "hide": true + }, + { + "refId": "B", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json hook_type, project | project=~\"$project\" | hook_type=\"pre-compact\" [$__range]))", + "legendFormat": "Total Compactions", + "queryType": "range", + "hide": true + }, + { + "refId": "C", + "datasource": { "type": "__expr__", "uid": "__expr__" }, + "type": "math", + "expression": "$A / $B", + "hide": false } ], "options": { "colorMode": "background-gradient", - "graphMode": "none", + "graphMode": "area", "textMode": "value", "justifyMode": "center", "orientation": "auto", "text": { "titleSize": 12, "valueSize": 36 }, "reduceOptions": { "values": false, - "calcs": ["mean"], + "calcs": ["lastNotNull"], "fields": "" } }, @@ -626,7 +636,8 @@ }, { "id": 11, - "title": "Per-Turn Token Flow", + "title": "Per-Turn Token Flow (excl. Cache Read)", + "description": "Tokens spent per turn: input, output, and cache creation. Cache Read is excluded — it dominates the scale and is shown separately in Cache Trend above.", "type": "timeseries", "gridPos": { "x": 0, "y": 33, "w": 12, "h": 8 }, "datasource": { "type": "loki", "uid": "loki_local" }, @@ -634,22 +645,22 @@ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json model, project, effort, running_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap running_input_tokens [$__interval]))", + "expr": "sum(sum_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json model, project, effort, turn_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap turn_input_tokens [$__interval]))", "legendFormat": "Input Tokens", "queryType": "range" }, { "refId": "B", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json model, project, effort, running_output_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap running_output_tokens [$__interval]))", + "expr": "sum(sum_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json model, project, effort, turn_output_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap turn_output_tokens [$__interval]))", "legendFormat": "Output Tokens", "queryType": "range" }, { - "refId": "C", + "refId": "D", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json model, project, effort, running_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap running_cache_read_tokens [$__interval]))", - "legendFormat": "Cache Read Tokens", + "expr": "sum(sum_over_time({app=\"claude-dev-logging\", component=\"tokens\"} | json model, project, effort, turn_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap turn_cache_creation_tokens [$__interval]))", + "legendFormat": "Cache Creation Tokens", "queryType": "range" } ], @@ -660,18 +671,31 @@ "fieldConfig": { "defaults": { "noValue": "0", + "unit": "short", + "decimals": 1, "color": { "mode": "palette-classic" }, "custom": { "drawStyle": "line", - "lineWidth": 1, - "fillOpacity": 30, + "lineInterpolation": "smooth", + "gradientMode": "opacity", + "lineWidth": 2, + "fillOpacity": 18, "pointSize": 5, - "showPoints": "never", - "stacking": { "mode": "normal", "group": "A" }, - "axisLabel": "Tokens" + "showPoints": "auto", + "spanNulls": 3600000, + "stacking": { "mode": "none" }, + "axisLabel": "Tokens", + "scaleDistribution": { "type": "log", "log": 2 } } }, - "overrides": [] + "overrides": [ + { + "matcher": { "id": "byName", "options": "Cache Creation Tokens" }, + "properties": [ + { "id": "color", "value": { "mode": "fixed", "fixedColor": "#FF9830" } } + ] + } + ] } }, { @@ -696,14 +720,17 @@ "fieldConfig": { "defaults": { "noValue": "0", + "decimals": 0, "color": { "mode": "fixed", "fixedColor": "#FF6D00" }, "custom": { - "drawStyle": "bars", - "barAlignment": 0, - "fillOpacity": 50, - "lineWidth": 1, + "drawStyle": "line", + "lineInterpolation": "smooth", + "gradientMode": "opacity", + "lineWidth": 2, + "fillOpacity": 20, "pointSize": 5, - "showPoints": "never", + "showPoints": "auto", + "spanNulls": 3600000, "axisLabel": "Output Tokens" } }, @@ -720,34 +747,34 @@ "id": 13, "title": "Token Type Distribution", "type": "piechart", - "gridPos": { "x": 0, "y": 42, "w": 12, "h": 7 }, + "gridPos": { "x": 0, "y": 42, "w": 12, "h": 8 }, "datasource": { "type": "loki", "uid": "loki_local" }, "targets": [ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__interval]))", "legendFormat": "Input", "queryType": "range" }, { "refId": "B", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_output_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_output_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__interval]))", "legendFormat": "Output", "queryType": "range" }, { "refId": "C", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__interval]))", "legendFormat": "Cache Read", "queryType": "range" }, { "refId": "D", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__interval]))", "legendFormat": "Cache Creation", "queryType": "range" } @@ -766,6 +793,8 @@ "fieldConfig": { "defaults": { "noValue": "0", + "unit": "short", + "decimals": 1, "color": { "mode": "palette-classic" } }, "overrides": [] @@ -773,15 +802,16 @@ }, { "id": 14, - "title": "Token Efficiency", + "title": "Output Efficiency", "type": "gauge", + "description": "Output tokens per total context token consumed. Higher = more output generated per unit of input.", "gridPos": { "x": 12, "y": 42, "w": 12, "h": 7 }, "datasource": { "type": "loki", "uid": "loki_local" }, "targets": [ { "refId": "A", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_output_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_output_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__interval]))", "legendFormat": "Output", "queryType": "range", "hide": true @@ -789,24 +819,32 @@ { "refId": "B", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_input_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__interval]))", "legendFormat": "Input", "queryType": "range", "hide": true }, { - "refId": "C", + "refId": "D", "datasource": { "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_creation_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__interval]))", "legendFormat": "Cache Creation", "queryType": "range", "hide": true }, { - "refId": "D", + "refId": "E", + "datasource": { "type": "loki", "uid": "loki_local" }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json model, project, effort, total_cache_read_tokens | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__interval]))", + "legendFormat": "Cache Read", + "queryType": "range", + "hide": true + }, + { + "refId": "C", "datasource": { "type": "__expr__", "uid": "__expr__" }, "type": "math", - "expression": "$A / ($B + $C)", + "expression": "$A / ($B + $D + $E)", "hide": false } ], @@ -822,18 +860,18 @@ }, "fieldConfig": { "defaults": { + "noValue": "0", "unit": "percentunit", "min": 0, - "max": 1, - "noValue": "0", + "max": 0.05, "decimals": 2, "color": { "mode": "thresholds" }, "thresholds": { "mode": "absolute", "steps": [ { "value": null, "color": "#F2495C" }, - { "value": 0.2, "color": "#FF9830" }, - { "value": 0.5, "color": "#73BF69" } + { "value": 0.002, "color": "#FF9830" }, + { "value": 0.01, "color": "#73BF69" } ] } }, diff --git a/observability/local/grafana/provisioning/dashboards/claude-code-overview.json b/observability/local/grafana/provisioning/dashboards/claude-code-overview.json index de6c222..a75af81 100644 --- a/observability/local/grafana/provisioning/dashboards/claude-code-overview.json +++ b/observability/local/grafana/provisioning/dashboards/claude-code-overview.json @@ -73,6 +73,69 @@ "h": 1 } }, + { + "id": 20, + "title": "Session Cost", + "type": "stat", + "transparent": true, + "gridPos": { + "x": 20, + "y": 1, + "w": 4, + "h": 4 + }, + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "targets": [ + { + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki_local" + }, + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json session_id | session_id=~\"$session_id\" | unwrap cost_usd [$__interval]))", + "legendFormat": "Cost", + "queryType": "range" + } + ], + "options": { + "colorMode": "background-gradient", + "graphMode": "area", + "textMode": "value", + "justifyMode": "center", + "orientation": "auto", + "text": { + "titleSize": 12, + "valueSize": 36 + }, + "reduceOptions": { + "values": false, + "calcs": ["sum"], + "fields": "" + } + }, + "fieldConfig": { + "defaults": { + "noValue": "$0.00", + "unit": "currencyUSD", + "decimals": 4, + "color": { + "mode": "thresholds" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "value": null, "color": "#73BF69" }, + { "value": 5, "color": "#FF9830" }, + { "value": 20, "color": "#F2495C" } + ] + } + }, + "overrides": [] + } + }, { "id": 1, "title": "Tool Calls", @@ -81,7 +144,7 @@ "gridPos": { "x": 0, "y": 1, - "w": 5, + "w": 4, "h": 4 }, "datasource": { @@ -95,14 +158,14 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=~\"tool|mcp-.*\"} | json hook_type, session_id | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" [$__range]))", + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=~\"tool|mcp-.*\"} | json hook_type, session_id | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" [$__interval]))", "legendFormat": "Tool Calls", "queryType": "range" } ], "options": { "colorMode": "background-gradient", - "graphMode": "none", + "graphMode": "area", "textMode": "value", "justifyMode": "center", "orientation": "auto", @@ -144,9 +207,9 @@ "type": "stat", "transparent": true, "gridPos": { - "x": 5, + "x": 4, "y": 1, - "w": 5, + "w": 4, "h": 4 }, "datasource": { @@ -160,14 +223,14 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(count_over_time({app=\"claude-dev-logging\", level=\"ERROR\"} | json session_id | session_id=~\"$session_id\" [$__range]))", + "expr": "sum(count_over_time({app=\"claude-dev-logging\", level=\"ERROR\"} | json session_id | session_id=~\"$session_id\" [$__interval]))", "legendFormat": "Errors", "queryType": "range" } ], "options": { "colorMode": "background-gradient", - "graphMode": "none", + "graphMode": "area", "textMode": "value", "justifyMode": "center", "orientation": "auto", @@ -216,9 +279,9 @@ "type": "stat", "transparent": true, "gridPos": { - "x": 10, + "x": 8, "y": 1, - "w": 5, + "w": 4, "h": 4 }, "datasource": { @@ -232,14 +295,14 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"agent\"} | json hook_type, session_id | session_id=~\"$session_id\" | hook_type=\"subagent-start\" [$__range]))", + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"agent\"} | json hook_type, session_id | session_id=~\"$session_id\" | hook_type=\"subagent-start\" [$__interval]))", "legendFormat": "Agents", "queryType": "range" } ], "options": { "colorMode": "background-gradient", - "graphMode": "none", + "graphMode": "area", "textMode": "value", "justifyMode": "center", "orientation": "auto", @@ -281,9 +344,9 @@ "type": "stat", "transparent": true, "gridPos": { - "x": 15, + "x": 12, "y": 1, - "w": 5, + "w": 4, "h": 4 }, "datasource": { @@ -297,14 +360,14 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"user\"} | json hook_type, session_id | session_id=~\"$session_id\" | hook_type=\"user-prompt-submit\" [$__range]))", + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"user\"} | json hook_type, session_id | session_id=~\"$session_id\" | hook_type=\"user-prompt-submit\" [$__interval]))", "legendFormat": "Prompts", "queryType": "range" } ], "options": { "colorMode": "background-gradient", - "graphMode": "none", + "graphMode": "area", "textMode": "value", "justifyMode": "center", "orientation": "auto", @@ -346,7 +409,7 @@ "type": "stat", "transparent": true, "gridPos": { - "x": 20, + "x": 16, "y": 1, "w": 4, "h": 4 @@ -362,14 +425,14 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"user\"} | json hook_type, session_id | session_id=~\"$session_id\" | hook_type=\"permission-request\" [$__range]))", + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"user\"} | json hook_type, session_id | session_id=~\"$session_id\" | hook_type=\"permission-request\" [$__interval]))", "legendFormat": "Permissions", "queryType": "range" } ], "options": { "colorMode": "background-gradient", - "graphMode": "none", + "graphMode": "area", "textMode": "value", "justifyMode": "center", "orientation": "auto", @@ -515,7 +578,7 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum by (component) (count_over_time({app=\"claude-dev-logging\"} | json session_id | session_id=~\"$session_id\" [$__range]))", + "expr": "sum by (component) (count_over_time({app=\"claude-dev-logging\"} | json session_id | session_id=~\"$session_id\" [$__interval]))", "legendFormat": "{{component}}", "queryType": "range" } @@ -597,6 +660,21 @@ } ] }, + { + "matcher": { + "id": "byName", + "options": "tokens" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#FF6D00", + "mode": "fixed" + } + } + ] + }, { "matcher": { "id": "byName", @@ -757,7 +835,7 @@ { "id": 9, "title": "Top Tools Used", - "description": "% share of each tool across all post-tool-use events.", + "description": "Tool call counts across all post-tool-use events. mcp__ prefix stripped for readability.", "type": "table", "transparent": true, "gridPos": { @@ -777,8 +855,8 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum by (tool_name) (count_over_time({app=\"claude-dev-logging\", component=~\"tool|mcp-.*\"} | json hook_type, tool_name, session_id | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" | tool_name != \"\" [$__range])) / ignoring(tool_name) group_left() sum(count_over_time({app=\"claude-dev-logging\", component=~\"tool|mcp-.*\"} | json hook_type, tool_name, session_id | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" | tool_name != \"\" [$__range])) * 100", - "legendFormat": "{{tool_name}}", + "expr": "sum by (short_name) (count_over_time({app=\"claude-dev-logging\", component=~\"tool|mcp-.*\"} | json hook_type, tool_name, session_id | session_id=~\"$session_id\" | hook_type=\"post-tool-use\" | tool_name != \"\" | line_format \"{{.tool_name}}\" | regexp \"(?:mcp__)?(?P.*)\" [$__range]))", + "legendFormat": "{{short_name}}", "queryType": "range" } ], @@ -824,8 +902,8 @@ }, "fieldConfig": { "defaults": { - "unit": "percent", - "decimals": 1, + "unit": "short", + "decimals": 0, "custom": { "inspect": false, "width": 0 @@ -843,22 +921,12 @@ "properties": [ { "id": "custom.width", - "value": 200 - }, - { - "id": "max", - "value": 100 - }, - { - "id": "min", - "value": 0 + "value": 90 }, { "id": "custom.cellOptions", "value": { - "type": "gauge", - "mode": "basic", - "valueDisplayMode": "color" + "type": "auto" } } ] @@ -871,7 +939,7 @@ "properties": [ { "id": "custom.width", - "value": 170 + "value": 200 } ] } diff --git a/observability/local/grafana/provisioning/dashboards/claude-token-cost.json b/observability/local/grafana/provisioning/dashboards/claude-token-cost.json index b32ca30..d03f5fd 100644 --- a/observability/local/grafana/provisioning/dashboards/claude-token-cost.json +++ b/observability/local/grafana/provisioning/dashboards/claude-token-cost.json @@ -103,7 +103,7 @@ "type": "stat", "gridPos": { "x": 0, - "y": 0, + "y": 1, "w": 6, "h": 5 }, @@ -118,45 +118,38 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__interval]))", "legendFormat": "Total Spend", "queryType": "range" } ], "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": ["sum"], "fields": "", "values": false - }, - "textMode": "auto" + } }, "fieldConfig": { "defaults": { "noValue": "$0.00", "unit": "currencyUSD", + "decimals": 4, "color": { "mode": "thresholds" }, "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "green" - }, - { - "value": 5, - "color": "yellow" - }, - { - "value": 20, - "color": "red" - } + { "value": null, "color": "#73BF69" }, + { "value": 5, "color": "#FF9830" }, + { "value": 20, "color": "#F2495C" } ] } }, @@ -169,7 +162,7 @@ "type": "stat", "gridPos": { "x": 6, - "y": 0, + "y": 1, "w": 6, "h": 5 }, @@ -184,37 +177,36 @@ "type": "loki", "uid": "loki_local" }, - "expr": "count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json | hook_type=\"session-end\" | project=~\"$project\" [$__range])", + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json | hook_type=\"session-end\" | project=~\"$project\" [$__interval]))", "legendFormat": "Sessions", "queryType": "range" } ], "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": ["sum"], "fields": "", "values": false - }, - "textMode": "auto" + } }, "fieldConfig": { "defaults": { "noValue": "0", + "decimals": 0, "color": { "mode": "fixed", - "fixedColor": "blue" + "fixedColor": "#5794F2" }, "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "blue" - } + { "value": null, "color": "#5794F2" } ] } }, @@ -227,7 +219,7 @@ "type": "stat", "gridPos": { "x": 12, - "y": 0, + "y": 1, "w": 6, "h": 5 }, @@ -253,7 +245,7 @@ "type": "loki", "uid": "loki_local" }, - "expr": "count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json | hook_type=\"session-end\" | project=~\"$project\" [$__range])", + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json | hook_type=\"session-end\" | project=~\"$project\" [$__range]))", "legendFormat": "Sessions", "queryType": "range", "hide": true @@ -270,39 +262,32 @@ } ], "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false - }, - "textMode": "auto" + } }, "fieldConfig": { "defaults": { "noValue": "$0.00", "unit": "currencyUSD", + "decimals": 4, "color": { "mode": "thresholds" }, "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "green" - }, - { - "value": 5, - "color": "yellow" - }, - { - "value": 20, - "color": "red" - } + { "value": null, "color": "#73BF69" }, + { "value": 5, "color": "#FF9830" }, + { "value": 20, "color": "#F2495C" } ] } }, @@ -315,7 +300,7 @@ "type": "stat", "gridPos": { "x": 18, - "y": 0, + "y": 1, "w": 6, "h": 5 }, @@ -330,49 +315,38 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", - "legendFormat": "Total Spend", - "queryType": "range", - "hide": true - }, - { - "refId": "B", - "datasource": { - "type": "__expr__", - "uid": "__expr__" - }, - "type": "math", - "expression": "$A * 2592000 / ${__range_s}", - "hide": false + "expr": "sum(rate({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range])) * 2592000", + "legendFormat": "Projected Monthly", + "queryType": "range" } ], "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false - }, - "textMode": "auto" + } }, "fieldConfig": { "defaults": { "noValue": "$0.00", "unit": "currencyUSD", + "decimals": 2, "color": { - "mode": "fixed", - "fixedColor": "orange" + "mode": "thresholds" }, "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "orange" - } + { "value": null, "color": "#73BF69" }, + { "value": 100, "color": "#FF9830" }, + { "value": 300, "color": "#F2495C" } ] } }, @@ -385,7 +359,7 @@ "collapsed": false, "gridPos": { "x": 0, - "y": 5, + "y": 6, "w": 24, "h": 1 } @@ -396,7 +370,7 @@ "type": "timeseries", "gridPos": { "x": 0, - "y": 5, + "y": 7, "w": 24, "h": 8 }, @@ -411,8 +385,8 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__interval]))", - "legendFormat": "Cost", + "expr": "sum by (model) (sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__interval]))", + "legendFormat": "{{model}}", "queryType": "range" } ], @@ -422,30 +396,33 @@ "placement": "bottom" }, "tooltip": { - "mode": "single", - "sort": "none" + "mode": "multi", + "sort": "desc" } }, "fieldConfig": { "defaults": { "unit": "currencyUSD", + "decimals": 4, "color": { "mode": "palette-classic" }, "custom": { - "drawStyle": "bars", - "barAlignment": 0, - "fillOpacity": 80, - "lineWidth": 1, + "drawStyle": "line", + "lineInterpolation": "smooth", + "fillOpacity": 18, + "gradientMode": "opacity", + "lineWidth": 2, "pointSize": 5, - "showPoints": "never", - "spanNulls": false, + "showPoints": "auto", + "spanNulls": 3600000, "stacking": { - "mode": "none", + "mode": "normal", "group": "A" }, "axisCenteredZero": false, "axisColorMode": "text", + "axisLabel": "USD", "scaleDistribution": { "type": "linear" } @@ -453,10 +430,7 @@ "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "green" - } + { "value": null, "color": "#73BF69" } ] } }, @@ -469,7 +443,7 @@ "collapsed": false, "gridPos": { "x": 0, - "y": 13, + "y": 15, "w": 24, "h": 1 } @@ -480,7 +454,7 @@ "type": "piechart", "gridPos": { "x": 0, - "y": 13, + "y": 16, "w": 8, "h": 8 }, @@ -495,15 +469,16 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum by (model) (sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", + "expr": "sum by (model) (sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__interval]))", "legendFormat": "{{model}}", "queryType": "range" } ], "options": { "pieType": "donut", + "displayLabels": ["name", "percent"], "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": ["sum"], "fields": "", "values": false }, @@ -520,6 +495,7 @@ "fieldConfig": { "defaults": { "unit": "currencyUSD", + "decimals": 4, "color": { "mode": "palette-classic" } @@ -533,7 +509,7 @@ "type": "piechart", "gridPos": { "x": 8, - "y": 13, + "y": 16, "w": 8, "h": 8 }, @@ -548,15 +524,16 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum by (project) (sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__range]))", + "expr": "sum by (project) (sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap cost_usd [$__interval]))", "legendFormat": "{{project}}", "queryType": "range" } ], "options": { "pieType": "donut", + "displayLabels": ["name", "percent"], "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": ["sum"], "fields": "", "values": false }, @@ -573,6 +550,7 @@ "fieldConfig": { "defaults": { "unit": "currencyUSD", + "decimals": 4, "color": { "mode": "palette-classic" } @@ -582,11 +560,11 @@ }, { "id": 8, - "title": "Turns by Effort", + "title": "Sessions by Effort", "type": "piechart", "gridPos": { "x": 16, - "y": 13, + "y": 16, "w": 8, "h": 8 }, @@ -601,15 +579,16 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum by (effort) (count_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" [$__range]))", + "expr": "sum by (effort) (count_over_time({app=\"claude-token-metrics\"} | json model, project, effort, cost_usd | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | cost_usd != \"\" [$__interval]))", "legendFormat": "{{effort}}", "queryType": "range" } ], "options": { "pieType": "donut", + "displayLabels": ["name", "percent"], "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": ["sum"], "fields": "", "values": false }, @@ -638,18 +617,19 @@ "collapsed": false, "gridPos": { "x": 0, - "y": 21, + "y": 24, "w": 24, "h": 1 } }, { "id": 9, - "title": "Cache Hit Rate", + "title": "Context from Cache", + "description": "% of all context tokens served from cache. High values are expected — Claude Code reuses a large context window. Every turn hits the cache; this measures token efficiency, not request hit rate.", "type": "gauge", "gridPos": { "x": 0, - "y": 21, + "y": 25, "w": 8, "h": 7 }, @@ -723,18 +703,9 @@ "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "red" - }, - { - "value": 30, - "color": "yellow" - }, - { - "value": 50, - "color": "green" - } + { "value": null, "color": "#F2495C" }, + { "value": 30, "color": "#FF9830" }, + { "value": 50, "color": "#73BF69" } ] } }, @@ -743,11 +714,11 @@ }, { "id": 10, - "title": "Cache Read Tokens", + "title": "Total Cache Read Tokens", "type": "stat", "gridPos": { "x": 8, - "y": 21, + "y": 25, "w": 8, "h": 7 }, @@ -762,38 +733,37 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__interval]))", "legendFormat": "Cache Read Tokens", "queryType": "range" } ], "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": ["sum"], "fields": "", "values": false - }, - "textMode": "auto" + } }, "fieldConfig": { "defaults": { "noValue": "0", "unit": "short", + "decimals": 1, "color": { "mode": "fixed", - "fixedColor": "green" + "fixedColor": "#73BF69" }, "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "green" - } + { "value": null, "color": "#73BF69" } ] } }, @@ -803,10 +773,11 @@ { "id": 11, "title": "Cache Trend", + "description": "Cache Read vs Cache Creation tokens per interval. Cache Creation (right axis) is orders of magnitude smaller — dual axes show both trends clearly.", "type": "timeseries", "gridPos": { "x": 16, - "y": 21, + "y": 25, "w": 8, "h": 7 }, @@ -831,8 +802,8 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__interval]))", - "legendFormat": "Input Tokens", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__interval]))", + "legendFormat": "Cache Creation", "queryType": "range" } ], @@ -842,12 +813,13 @@ "placement": "bottom" }, "tooltip": { - "mode": "single", + "mode": "multi", "sort": "none" } }, "fieldConfig": { "defaults": { + "unit": "short", "color": { "mode": "palette-classic" }, @@ -872,14 +844,19 @@ "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "green" - } + { "value": null, "color": "#73BF69" } ] } }, - "overrides": [] + "overrides": [ + { + "matcher": { "id": "byName", "options": "Cache Creation" }, + "properties": [ + { "id": "custom.axisPlacement", "value": "right" }, + { "id": "color", "value": { "mode": "fixed", "fixedColor": "#FF9830" } } + ] + } + ] } }, { @@ -888,20 +865,20 @@ "collapsed": false, "gridPos": { "x": 0, - "y": 28, + "y": 32, "w": 24, "h": 1 } }, { "id": 12, - "title": "Token Distribution", + "title": "Token Type Distribution", "type": "piechart", "gridPos": { "x": 0, - "y": 28, + "y": 33, "w": 8, - "h": 7 + "h": 8 }, "datasource": { "type": "loki", @@ -914,7 +891,7 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_input_tokens [$__interval]))", "legendFormat": "Input", "queryType": "range" }, @@ -924,7 +901,7 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__interval]))", "legendFormat": "Output", "queryType": "range" }, @@ -934,7 +911,7 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_read_tokens [$__interval]))", "legendFormat": "Cache Read", "queryType": "range" }, @@ -944,15 +921,16 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_cache_creation_tokens [$__interval]))", "legendFormat": "Cache Creation", "queryType": "range" } ], "options": { - "pieType": "pie", + "pieType": "donut", + "displayLabels": ["name", "percent"], "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": ["sum"], "fields": "", "values": false }, @@ -968,6 +946,8 @@ }, "fieldConfig": { "defaults": { + "unit": "short", + "decimals": 1, "color": { "mode": "palette-classic" } @@ -978,10 +958,11 @@ { "id": 13, "title": "Total Output Tokens", + "description": "Total output tokens generated across all sessions in the selected time range.", "type": "stat", "gridPos": { "x": 8, - "y": 28, + "y": 33, "w": 5, "h": 7 }, @@ -996,37 +977,37 @@ "type": "loki", "uid": "loki_local" }, - "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__range]))", + "expr": "sum(sum_over_time({app=\"claude-token-metrics\"} | json | model=~\"$model\" | project=~\"$project\" | effort=~\"$effort\" | unwrap total_output_tokens [$__interval]))", "legendFormat": "Output Tokens", "queryType": "range" } ], "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": ["sum"], "fields": "", "values": false - }, - "textMode": "auto" + } }, "fieldConfig": { "defaults": { "noValue": "0", + "unit": "short", + "decimals": 1, "color": { "mode": "fixed", - "fixedColor": "purple" + "fixedColor": "#B877D9" }, "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "purple" - } + { "value": null, "color": "#B877D9" } ] } }, @@ -1035,11 +1016,12 @@ }, { "id": 14, - "title": "Output / Dollar", + "title": "Output Tokens / Dollar", + "description": "Output tokens generated per dollar spent. Higher = more efficient.", "type": "stat", "gridPos": { "x": 13, - "y": 28, + "y": 33, "w": 5, "h": 7 }, @@ -1082,31 +1064,31 @@ } ], "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false - }, - "textMode": "auto" + } }, "fieldConfig": { "defaults": { "noValue": "0", + "decimals": 0, + "unit": "locale", "color": { "mode": "fixed", - "fixedColor": "green" + "fixedColor": "#73BF69" }, "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "green" - } + { "value": null, "color": "#73BF69" } ] } }, @@ -1119,7 +1101,7 @@ "type": "stat", "gridPos": { "x": 18, - "y": 28, + "y": 33, "w": 6, "h": 7 }, @@ -1145,7 +1127,7 @@ "type": "loki", "uid": "loki_local" }, - "expr": "count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json | hook_type=\"session-end\" | project=~\"$project\" [$__range])", + "expr": "sum(count_over_time({app=\"claude-dev-logging\", component=\"lifecycle\"} | json | hook_type=\"session-end\" | project=~\"$project\" [$__range]))", "legendFormat": "Sessions", "queryType": "range", "hide": true @@ -1162,31 +1144,30 @@ } ], "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, "reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false - }, - "textMode": "auto" + } }, "fieldConfig": { "defaults": { "noValue": "0", + "decimals": 1, "color": { "mode": "fixed", - "fixedColor": "blue" + "fixedColor": "#5794F2" }, "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "blue" - } + { "value": null, "color": "#5794F2" } ] } }, @@ -1199,18 +1180,18 @@ "collapsed": false, "gridPos": { "x": 0, - "y": 35, + "y": 40, "w": 24, "h": 1 } }, { "id": 16, - "title": "Avg Cost per Model", + "title": "Avg Cost per Session / Model", "type": "barchart", "gridPos": { "x": 0, - "y": 35, + "y": 41, "w": 12, "h": 8 }, @@ -1234,7 +1215,7 @@ "orientation": "horizontal", "barWidth": 0.8, "groupWidth": 0.7, - "showValue": "auto", + "showValue": "always", "stacking": "none", "legend": { "displayMode": "list", @@ -1249,16 +1230,14 @@ "fieldConfig": { "defaults": { "unit": "currencyUSD", + "decimals": 2, "color": { "mode": "palette-classic" }, "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "green" - } + { "value": null, "color": "#73BF69" } ] } }, @@ -1267,11 +1246,11 @@ }, { "id": 17, - "title": "Output Tokens per Model", + "title": "Avg Output Tokens per Model", "type": "barchart", "gridPos": { "x": 12, - "y": 35, + "y": 41, "w": 12, "h": 8 }, @@ -1295,7 +1274,7 @@ "orientation": "horizontal", "barWidth": 0.8, "groupWidth": 0.7, - "showValue": "auto", + "showValue": "always", "stacking": "none", "legend": { "displayMode": "list", @@ -1309,16 +1288,15 @@ }, "fieldConfig": { "defaults": { + "unit": "short", + "decimals": 1, "color": { "mode": "palette-classic" }, "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "green" - } + { "value": null, "color": "#73BF69" } ] } }, @@ -1331,7 +1309,7 @@ "collapsed": false, "gridPos": { "x": 0, - "y": 43, + "y": 49, "w": 24, "h": 1 } @@ -1342,7 +1320,7 @@ "type": "table", "gridPos": { "x": 0, - "y": 43, + "y": 50, "w": 24, "h": 10 }, @@ -1480,10 +1458,7 @@ "thresholds": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "green" - } + { "value": null, "color": "#73BF69" } ] } }, @@ -1514,18 +1489,9 @@ "value": { "mode": "absolute", "steps": [ - { - "value": null, - "color": "green" - }, - { - "value": 1, - "color": "yellow" - }, - { - "value": 5, - "color": "red" - } + { "value": null, "color": "#73BF69" }, + { "value": 1, "color": "#FF9830" }, + { "value": 5, "color": "#F2495C" } ] } } diff --git a/observability/local/grafana/provisioning/dashboards/simsteward-deploy-health.json b/observability/local/grafana/provisioning/dashboards/simsteward-deploy-health.json index 7be1cdb..83539b8 100644 --- a/observability/local/grafana/provisioning/dashboards/simsteward-deploy-health.json +++ b/observability/local/grafana/provisioning/dashboards/simsteward-deploy-health.json @@ -1,5 +1,6 @@ { "annotations": { "list": [] }, + "description": "Deploy health — deploy markers, plugin bring-up, bridge start, and error volume. deploy.ps1 pushes event=deploy_marker when SIMSTEWARD_LOKI_URL is set. post_deploy_warn=true means post-deploy tests/*.ps1 failed.", "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 1, @@ -8,19 +9,146 @@ "liveNow": false, "panels": [ { - "gridPos": { "h": 3, "w": 24, "x": 0, "y": 0 }, - "id": 1, + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "decimals": 0, + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "#5794F2", "value": null }] } + }, + "overrides": [] + }, + "gridPos": { "h": 5, "w": 8, "x": 0, "y": 0 }, + "id": 8, + "options": { + "colorMode": "background-gradient", + "graphMode": "area", + "justifyMode": "center", + "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "sum(count_over_time({app=\"sim-steward\", env=\"${env}\"} | json | event=\"deploy_marker\" [$__range]))", + "queryType": "range", + "refId": "A" + } + ], + "title": "Deploys This Period", + "type": "stat" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "decimals": 0, + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "#73BF69", "value": null }, { "color": "#F2495C", "value": 1 }] } + }, + "overrides": [] + }, + "gridPos": { "h": 5, "w": 8, "x": 8, "y": 0 }, + "id": 9, + "options": { + "colorMode": "background-gradient", + "graphMode": "area", + "justifyMode": "center", + "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "sum(count_over_time({app=\"sim-steward\", env=\"${env}\"} | json | event=\"deploy_marker\" | post_deploy_warn=\"true\" [$__range]))", + "queryType": "range", + "refId": "A" + } + ], + "title": "Post-Deploy Warnings", + "type": "stat" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "decimals": 0, + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "#73BF69", "value": null }, { "color": "#F2495C", "value": 1 }] } + }, + "overrides": [] + }, + "gridPos": { "h": 5, "w": 8, "x": 16, "y": 0 }, + "id": 10, + "options": { + "colorMode": "background-gradient", + "graphMode": "area", + "justifyMode": "center", + "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "sum(count_over_time({app=\"sim-steward\", env=\"${env}\"} | json | event=\"bridge_start_failed\" [$__range]))", + "queryType": "range", + "refId": "A" + } + ], + "title": "Bridge Start Failures", + "type": "stat" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "drawStyle": "line", + "lineInterpolation": "smooth", + "gradientMode": "opacity", + "fillOpacity": 20, + "lineWidth": 2, + "pointSize": 5, + "showPoints": "auto", + "spanNulls": 3600000, + "axisLabel": "Deploys" + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { "h": 6, "w": 24, "x": 0, "y": 5 }, + "id": 11, "options": { - "code": { "language": "markdown", "showLineNumbers": false, "showMiniMap": false }, - "content": "**Deploy health** — Correlates `deploy.ps1` with plugin bring-up in Loki.\n\n- **Deploy markers** — Lines pushed at end of `deploy.ps1` when `SIMSTEWARD_LOKI_URL` is set (`event=deploy_marker`). `post_deploy_warn=true` means post-deploy `tests/*.ps1` failed after retry.\n- **Plugin / bridge** — `plugin_ready` and `bridge_start_failed` show whether SimHub loaded the plugin and WebSocket started.\n- **Errors** — Structured ERROR lines; spike after a bad deploy often means SimHub/plugin mismatch or WS failure.\n\nOpen repo `deploy.ps1` console output for copy failures; this dashboard is **telemetry**, not a full deploy log.", - "mode": "markdown" + "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } }, - "title": "About", - "type": "text" + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "sum(count_over_time({app=\"sim-steward\", env=\"${env}\"} | json | event=\"deploy_marker\" [$__interval]))", + "legendFormat": "Deploys", + "queryType": "range", + "refId": "A" + } + ], + "title": "Deploy Frequency", + "type": "timeseries" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 10, "w": 24, "x": 0, "y": 3 }, + "gridPos": { "h": 10, "w": 24, "x": 0, "y": 11 }, "id": 2, "options": { "dedupStrategy": "none", @@ -41,12 +169,12 @@ "refId": "A" } ], - "title": "Deploy markers (deploy.ps1 → Loki)", + "title": "Deploy Markers (deploy.ps1 → Loki)", "type": "logs" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 9, "w": 12, "x": 0, "y": 13 }, + "gridPos": { "h": 9, "w": 12, "x": 0, "y": 21 }, "id": 3, "options": { "dedupStrategy": "none", @@ -67,12 +195,12 @@ "refId": "A" } ], - "title": "Plugin / bridge lifecycle", + "title": "Plugin / Bridge Lifecycle", "type": "logs" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 9, "w": 12, "x": 12, "y": 13 }, + "gridPos": { "h": 9, "w": 12, "x": 12, "y": 21 }, "id": 4, "options": { "dedupStrategy": "none", @@ -93,28 +221,30 @@ "refId": "A" } ], - "title": "WebSocket bridge failures", + "title": "WebSocket Bridge Failures", "type": "logs" }, { "datasource": { "type": "loki", "uid": "loki_local" }, "fieldConfig": { "defaults": { - "color": { "mode": "palette-classic" }, + "color": { "mode": "fixed", "fixedColor": "#F2495C" }, "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "drawStyle": "bars", - "fillOpacity": 40, - "lineWidth": 1, - "showPoints": "never" + "drawStyle": "line", + "lineInterpolation": "smooth", + "gradientMode": "opacity", + "fillOpacity": 25, + "lineWidth": 2, + "pointSize": 5, + "showPoints": "auto", + "spanNulls": 3600000, + "axisLabel": "Errors" }, "unit": "short" }, "overrides": [] }, - "gridPos": { "h": 8, "w": 24, "x": 0, "y": 22 }, + "gridPos": { "h": 8, "w": 24, "x": 0, "y": 30 }, "id": 5, "options": { "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -124,18 +254,18 @@ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "sum(count_over_time({app=\"sim-steward\", env=\"${env}\", level=\"ERROR\"} [5m]))", - "legendFormat": "ERROR lines / 5m", + "expr": "sum(count_over_time({app=\"sim-steward\", env=\"${env}\", level=\"ERROR\"} [$__interval]))", + "legendFormat": "ERROR lines / interval", "queryType": "range", "refId": "A" } ], - "title": "ERROR log volume (5m buckets)", + "title": "ERROR Log Volume", "type": "timeseries" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 12, "w": 24, "x": 0, "y": 30 }, + "gridPos": { "h": 12, "w": 24, "x": 0, "y": 38 }, "id": 6, "options": { "dedupStrategy": "none", @@ -156,12 +286,12 @@ "refId": "A" } ], - "title": "Recent ERROR lines (full)", + "title": "Recent ERROR Lines (Full)", "type": "logs" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 10, "w": 24, "x": 0, "y": 42 }, + "gridPos": { "h": 10, "w": 24, "x": 0, "y": 50 }, "id": 7, "options": { "dedupStrategy": "none", @@ -182,7 +312,7 @@ "refId": "A" } ], - "title": "Failed actions (action_result success=false)", + "title": "Failed Actions (action_result success=false)", "type": "logs" } ], @@ -210,7 +340,7 @@ "time": { "from": "now-24h", "to": "now" }, "timepicker": {}, "timezone": "browser", - "title": "Sim Steward — Deploy health", + "title": "Sim Steward — Deploy Health", "uid": "simsteward-deploy-health", "version": 1, "weekStart": "" diff --git a/observability/local/grafana/provisioning/dashboards/simsteward-log-sentinel.json b/observability/local/grafana/provisioning/dashboards/simsteward-log-sentinel.json index 63b945f..79bb00d 100644 --- a/observability/local/grafana/provisioning/dashboards/simsteward-log-sentinel.json +++ b/observability/local/grafana/provisioning/dashboards/simsteward-log-sentinel.json @@ -1,5 +1,6 @@ { "annotations": { "list": [] }, + "description": "Autonomous log-analysis pipeline — 16 detectors (app + ops), three-tier LLM (T0 detect → T1 deduplicate → T2 investigate). Cycle every 5 min. component=log-sentinel.", "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 1, @@ -14,46 +15,37 @@ "title": "Sentinel Health", "type": "row" }, - { - "gridPos": { "h": 3, "w": 24, "x": 0, "y": 1 }, - "id": 1, - "options": { - "code": { "language": "markdown", "showLineNumbers": false, "showMiniMap": false }, - "content": "**Log Sentinel v2** — Autonomous log-analysis pipeline.\n\n- **16 detectors** across two categories: **app** (plugin crashes, bridge failures, action errors) and **ops** (deploy issues, disk pressure, connectivity).\n- **Three-tier LLM** — T0 detectors emit findings; T1 deduplicates and prioritises; T2 investigates high-severity findings and writes root-cause reports.\n- All structured events carry `component=log-sentinel`. Cycle runs every 5 minutes by default.", - "mode": "markdown" - }, - "title": "About", - "type": "text" - }, { "datasource": { "type": "loki", "uid": "loki_local" }, "fieldConfig": { "defaults": { + "decimals": 0, "color": { "mode": "thresholds" }, - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] } + "thresholds": { "mode": "absolute", "steps": [{ "color": "#73BF69", "value": null }] } }, "overrides": [] }, - "gridPos": { "h": 6, "w": 6, "x": 0, "y": 4 }, + "gridPos": { "h": 6, "w": 6, "x": 0, "y": 1 }, "id": 2, "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", - "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, - "textMode": "auto" + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } }, "targets": [ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_cycle\"} [$__range])", + "expr": "sum(count_over_time({component=\"log-sentinel\"} | json | event=\"sentinel_cycle\" [$__range]))", "queryType": "range", "refId": "A" } ], - "title": "Cycles completed", + "title": "Cycles Completed", "type": "stat" }, { @@ -66,15 +58,19 @@ "axisCenteredZero": false, "axisColorMode": "text", "drawStyle": "line", - "fillOpacity": 20, + "fillOpacity": 18, + "gradientMode": "opacity", + "lineInterpolation": "smooth", "lineWidth": 2, - "showPoints": "auto" + "pointSize": 5, + "showPoints": "auto", + "spanNulls": 3600000 }, "unit": "ms" }, "overrides": [] }, - "gridPos": { "h": 6, "w": 10, "x": 6, "y": 4 }, + "gridPos": { "h": 6, "w": 10, "x": 6, "y": 1 }, "id": 3, "options": { "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -84,88 +80,151 @@ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "{component=\"log-sentinel\", event=\"sentinel_cycle\"} | json | unwrap duration_ms", + "expr": "avg(avg_over_time({component=\"log-sentinel\"} | json | event=\"sentinel_cycle\" | unwrap duration_ms [$__interval]))", + "legendFormat": "Avg Cycle Duration", "queryType": "range", "refId": "A" } ], - "title": "Cycle duration", + "title": "Cycle Duration", "type": "timeseries" }, { "datasource": { "type": "loki", "uid": "loki_local" }, "fieldConfig": { "defaults": { + "decimals": 0, "color": { "mode": "thresholds" }, - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }, { "color": "red", "value": 1 }] } + "thresholds": { "mode": "absolute", "steps": [{ "color": "#73BF69", "value": null }, { "color": "#F2495C", "value": 1 }] } }, "overrides": [] }, - "gridPos": { "h": 6, "w": 8, "x": 16, "y": 4 }, + "gridPos": { "h": 6, "w": 8, "x": 16, "y": 1 }, "id": 4, "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", - "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, - "textMode": "auto" + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } }, "targets": [ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_detector_run\"} | json | error!=\"\" | error!=\"null\" | error!=\"None\" [$__range])", + "expr": "sum(count_over_time({component=\"log-sentinel\", event=\"sentinel_detector_run\"} | json | error != \"\" [$__range]))", "queryType": "range", "refId": "A" } ], - "title": "Detector errors", + "title": "Detector Errors", "type": "stat" }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 10 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 7 }, "id": 101, - "title": "Per-Detector Timing", + "title": "Findings Overview", "type": "row" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 8, "w": 24, "x": 0, "y": 11 }, - "id": 5, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "drawStyle": "line", + "lineInterpolation": "smooth", + "fillOpacity": 18, + "gradientMode": "opacity", + "lineWidth": 2, + "pointSize": 5, + "showPoints": "auto", + "spanNulls": 3600000, + "axisLabel": "Findings", + "stacking": { "mode": "normal", "group": "A" } + } + }, + "overrides": [ + { + "matcher": { "id": "byName", "options": "App" }, + "properties": [{ "id": "color", "value": { "fixedColor": "#F2495C", "mode": "fixed" } }] + }, + { + "matcher": { "id": "byName", "options": "Ops" }, + "properties": [{ "id": "color", "value": { "fixedColor": "#FF9830", "mode": "fixed" } }] + } + ] + }, + "gridPos": { "h": 8, "w": 16, "x": 0, "y": 8 }, + "id": 17, "options": { - "dedupStrategy": "none", - "enableLogDetails": true, - "prettifyLogMessage": true, - "showCommonLabels": false, - "showLabels": false, - "showTime": true, - "sortOrder": "Descending", - "wrapLogMessage": true + "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "multi", "sort": "desc" } }, "targets": [ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "{component=\"log-sentinel\", event=\"sentinel_detector_run\"} | json", + "expr": "sum(count_over_time({component=\"log-sentinel\", event=\"sentinel_finding\"} | json | category=\"app\" [$__interval]))", + "legendFormat": "App", "queryType": "range", "refId": "A" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "sum(count_over_time({component=\"log-sentinel\", event=\"sentinel_finding\"} | json | category=\"ops\" [$__interval]))", + "legendFormat": "Ops", + "queryType": "range", + "refId": "B" } ], - "title": "Detector runs", - "type": "logs" + "title": "Findings Over Time", + "type": "timeseries" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" } + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 8 }, + "id": 18, + "options": { + "pieType": "donut", + "displayLabels": ["name", "percent"], + "legend": { "displayMode": "table", "placement": "right", "values": ["value", "percent"] }, + "tooltip": { "mode": "single" }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "sum by (severity) (count_over_time({component=\"log-sentinel\", event=\"sentinel_finding\"} | json [$__range]))", + "legendFormat": "{{severity}}", + "queryType": "range", + "refId": "A" + } + ], + "title": "Findings by Severity", + "type": "piechart" }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 19 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 16 }, "id": 102, "title": "App Findings", "type": "row" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 8, "w": 16, "x": 0, "y": 20 }, + "gridPos": { "h": 8, "w": 16, "x": 0, "y": 17 }, "id": 6, "options": { "dedupStrategy": "none", @@ -186,50 +245,52 @@ "refId": "A" } ], - "title": "App findings", + "title": "App Findings", "type": "logs" }, { "datasource": { "type": "loki", "uid": "loki_local" }, "fieldConfig": { "defaults": { + "decimals": 0, "color": { "mode": "thresholds" }, - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }, { "color": "yellow", "value": 5 }, { "color": "red", "value": 15 }] } + "thresholds": { "mode": "absolute", "steps": [{ "color": "#73BF69", "value": null }, { "color": "#FF9830", "value": 5 }, { "color": "#F2495C", "value": 15 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 20 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 17 }, "id": 7, "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", - "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, - "textMode": "auto" + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } }, "targets": [ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_finding\"} | json | category=\"app\" [$__range])", + "expr": "sum(count_over_time({component=\"log-sentinel\", event=\"sentinel_finding\"} | json | category=\"app\" [$__range]))", "queryType": "range", "refId": "A" } ], - "title": "App finding count", + "title": "App Finding Count", "type": "stat" }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 28 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 25 }, "id": 103, "title": "Ops Findings", "type": "row" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 8, "w": 16, "x": 0, "y": 29 }, + "gridPos": { "h": 8, "w": 16, "x": 0, "y": 26 }, "id": 8, "options": { "dedupStrategy": "none", @@ -250,51 +311,92 @@ "refId": "A" } ], - "title": "Ops findings", + "title": "Ops Findings", "type": "logs" }, { "datasource": { "type": "loki", "uid": "loki_local" }, "fieldConfig": { "defaults": { + "decimals": 0, "color": { "mode": "thresholds" }, - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }, { "color": "yellow", "value": 5 }, { "color": "orange", "value": 15 }] } + "thresholds": { "mode": "absolute", "steps": [{ "color": "#73BF69", "value": null }, { "color": "#FF9830", "value": 5 }, { "color": "#F2495C", "value": 15 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 29 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 26 }, "id": 9, "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", - "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, - "textMode": "auto" + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } }, "targets": [ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_finding\"} | json | category=\"ops\" [$__range])", + "expr": "sum(count_over_time({component=\"log-sentinel\", event=\"sentinel_finding\"} | json | category=\"ops\" [$__range]))", "queryType": "range", "refId": "A" } ], - "title": "Ops finding count", + "title": "Ops Finding Count", "type": "stat" }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 37 }, - "id": 104, - "title": "T2 LLM Activity", + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 34 }, + "id": 110, + "title": "Per-Detector Timing", "type": "row" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 38 }, - "id": 10, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "drawStyle": "line", + "lineInterpolation": "smooth", + "fillOpacity": 10, + "gradientMode": "opacity", + "lineWidth": 2, + "pointSize": 4, + "showPoints": "auto", + "spanNulls": 3600000, + "axisLabel": "Duration (ms)" + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 24, "x": 0, "y": 35 }, + "id": 19, + "options": { + "legend": { "displayMode": "table", "placement": "right", "showLegend": true, "calcs": ["mean", "max"] }, + "tooltip": { "mode": "multi", "sort": "desc" } + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "avg by (detector) (avg_over_time({component=\"log-sentinel\"} | json | event=\"sentinel_detector_run\" | unwrap duration_ms [$__interval]))", + "legendFormat": "{{detector}}", + "queryType": "range", + "refId": "A" + } + ], + "title": "Detector Duration by Name", + "type": "timeseries" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 8, "w": 24, "x": 0, "y": 43 }, + "id": 5, "options": { "dedupStrategy": "none", "enableLogDetails": true, @@ -309,86 +411,190 @@ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "{component=\"log-sentinel\", event=\"sentinel_t2_run\"} | json", + "expr": "{component=\"log-sentinel\", event=\"sentinel_detector_run\"} | json", "queryType": "range", "refId": "A" } ], - "title": "T2 run metrics", + "title": "Detector Runs", "type": "logs" }, + { + "collapsed": false, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 51 }, + "id": 104, + "title": "T2 LLM Activity", + "type": "row" + }, { "datasource": { "type": "loki", "uid": "loki_local" }, "fieldConfig": { "defaults": { + "decimals": 0, "color": { "mode": "thresholds" }, - "thresholds": { "mode": "absolute", "steps": [{ "color": "green", "value": null }] } + "thresholds": { "mode": "absolute", "steps": [{ "color": "#73BF69", "value": null }] } }, "overrides": [] }, - "gridPos": { "h": 4, "w": 6, "x": 12, "y": 38 }, + "gridPos": { "h": 6, "w": 6, "x": 0, "y": 52 }, "id": 11, "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", - "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, - "textMode": "auto" + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } }, "targets": [ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_t2_run\"} [$__range])", + "expr": "sum(count_over_time({component=\"log-sentinel\", event=\"sentinel_t2_run\"} [$__range]))", "queryType": "range", "refId": "A" } ], - "title": "T2 investigations", + "title": "T2 Investigations", "type": "stat" }, { "datasource": { "type": "loki", "uid": "loki_local" }, "fieldConfig": { "defaults": { + "decimals": 0, "color": { "mode": "thresholds" }, - "thresholds": { "mode": "absolute", "steps": [{ "color": "blue", "value": null }] } + "thresholds": { "mode": "absolute", "steps": [{ "color": "#5794F2", "value": null }] } }, "overrides": [] }, - "gridPos": { "h": 4, "w": 6, "x": 18, "y": 38 }, + "gridPos": { "h": 6, "w": 6, "x": 6, "y": 52 }, "id": 12, "options": { - "colorMode": "background", + "colorMode": "background-gradient", + "graphMode": "area", + "justifyMode": "center", + "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "sum(count_over_time({component=\"log-sentinel\", event=\"sentinel_t2_run\"} | json | trigger=\"proactive\" [$__range]))", + "queryType": "range", + "refId": "A" + } + ], + "title": "Proactive Polls", + "type": "stat" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "unit": "ms", + "decimals": 0, + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "#8AB8FF", "value": null }, { "color": "#FF9830", "value": 10000 }, { "color": "#F2495C", "value": 30000 }] } + }, + "overrides": [] + }, + "gridPos": { "h": 6, "w": 6, "x": 12, "y": 52 }, + "id": 20, + "options": { + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", - "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, - "textMode": "auto" + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false } }, "targets": [ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_t2_run\"} | json | trigger=\"proactive\" [$__range])", + "expr": "avg(avg_over_time({component=\"log-sentinel\"} | json | event=\"sentinel_t2_run\" | unwrap duration_ms [$__range]))", "queryType": "range", "refId": "A" } ], - "title": "Proactive polls", + "title": "Avg T2 Duration", "type": "stat" }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "decimals": 0, + "color": { "mode": "thresholds" }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "#B877D9", "value": null }] } + }, + "overrides": [] + }, + "gridPos": { "h": 6, "w": 6, "x": 18, "y": 52 }, + "id": 21, + "options": { + "colorMode": "background-gradient", + "graphMode": "area", + "justifyMode": "center", + "orientation": "auto", + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["mean"], "fields": "", "values": false } + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "avg(avg_over_time({component=\"log-sentinel\"} | json | event=\"sentinel_t2_run\" | unwrap tokens_used [$__range]))", + "queryType": "range", + "refId": "A" + } + ], + "title": "Avg T2 Tokens", + "type": "stat" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 8, "w": 24, "x": 0, "y": 58 }, + "id": 10, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{component=\"log-sentinel\", event=\"sentinel_t2_run\"} | json", + "queryType": "range", + "refId": "A" + } + ], + "title": "T2 Run Metrics", + "type": "logs" + }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 46 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 66 }, "id": 105, "title": "T2 Investigation Reports", "type": "row" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 10, "w": 24, "x": 0, "y": 47 }, + "gridPos": { "h": 10, "w": 24, "x": 0, "y": 67 }, "id": 13, "options": { "dedupStrategy": "none", @@ -409,19 +615,19 @@ "refId": "A" } ], - "title": "Investigation reports", + "title": "Investigation Reports", "type": "logs" }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 57 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 77 }, "id": 106, "title": "Sentry Issues", "type": "row" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 6, "w": 16, "x": 0, "y": 58 }, + "gridPos": { "h": 6, "w": 16, "x": 0, "y": 78 }, "id": 14, "options": { "dedupStrategy": "none", @@ -442,50 +648,52 @@ "refId": "A" } ], - "title": "Sentry issues created", + "title": "Sentry Issues Created", "type": "logs" }, { "datasource": { "type": "loki", "uid": "loki_local" }, "fieldConfig": { "defaults": { + "decimals": 0, "color": { "mode": "thresholds" }, - "thresholds": { "mode": "absolute", "steps": [{ "color": "blue", "value": null }] } + "thresholds": { "mode": "absolute", "steps": [{ "color": "#5794F2", "value": null }] } }, "overrides": [] }, - "gridPos": { "h": 6, "w": 8, "x": 16, "y": 58 }, + "gridPos": { "h": 6, "w": 8, "x": 16, "y": 78 }, "id": 15, "options": { - "colorMode": "background", + "colorMode": "background-gradient", "graphMode": "area", - "justifyMode": "auto", + "justifyMode": "center", "orientation": "auto", - "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false }, - "textMode": "auto" + "textMode": "value", + "text": { "titleSize": 12, "valueSize": 36 }, + "reduceOptions": { "calcs": ["sum"], "fields": "", "values": false } }, "targets": [ { "datasource": { "type": "loki", "uid": "loki_local" }, "editorMode": "code", - "expr": "count_over_time({component=\"log-sentinel\", event=\"sentinel_sentry_issue\"} [$__range])", + "expr": "sum(count_over_time({component=\"log-sentinel\", event=\"sentinel_sentry_issue\"} [$__range]))", "queryType": "range", "refId": "A" } ], - "title": "Sentry issues", + "title": "Sentry Issues", "type": "stat" }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 64 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 84 }, "id": 107, "title": "Process Logs", "type": "row" }, { "datasource": { "type": "loki", "uid": "loki_local" }, - "gridPos": { "h": 10, "w": 24, "x": 0, "y": 65 }, + "gridPos": { "h": 10, "w": 24, "x": 0, "y": 85 }, "id": 16, "options": { "dedupStrategy": "none", @@ -506,7 +714,7 @@ "refId": "A" } ], - "title": "Sentinel process logs", + "title": "Sentinel Process Logs", "type": "logs" } ], diff --git a/observability/local/log-sentinel/analyst.py b/observability/local/log-sentinel/analyst.py new file mode 100644 index 0000000..ce4f94b --- /dev/null +++ b/observability/local/log-sentinel/analyst.py @@ -0,0 +1,376 @@ +"""LLM-driven analyst — T1 fast scan and T2 deep investigation.""" + +import json +import logging +import re +import time +from dataclasses import dataclass, field + +from circuit_breaker import CircuitBreaker +from config import Config +from loki_client import LokiClient +from ollama_client import OllamaClient +from prompts import ( + T1_SYSTEM, T1_SUMMARY_PROMPT, T1_ANOMALY_PROMPT, + T2_SYSTEM, T2_INVESTIGATION_PROMPT, + LOGQL_GEN_SYSTEM, LOGQL_GEN_PROMPT, + build_stream_guide, format_log_sample, format_logql_results, +) +from timeline import TimelineEvent + +logger = logging.getLogger("sentinel.analyst") + + +@dataclass +class T1Result: + summary: str + cycle_notes: str + anomalies: list[dict] + model: str + summary_duration_ms: int + anomaly_duration_ms: int + raw_summary_response: str + raw_anomaly_response: str + + @property + def needs_t2(self) -> bool: + return any(a.get("needs_t2") for a in self.anomalies) + + @property + def total_duration_ms(self) -> int: + return self.summary_duration_ms + self.anomaly_duration_ms + + +@dataclass +class T2Result: + root_cause: str + issue_type: str + confidence: str + correlation: str + impact: str + recommendation: str + logql_queries_used: list[str] + sentry_worthy: bool + model: str + inference_duration_ms: int + logql_gather_duration_ms: int + raw_response: str = field(repr=False) + + @property + def total_duration_ms(self) -> int: + return self.inference_duration_ms + self.logql_gather_duration_ms + + +class Analyst: + def __init__( + self, + ollama: OllamaClient, + loki: LokiClient, + breaker: CircuitBreaker, + config: Config, + ): + self.ollama = ollama + self.loki = loki + self.breaker = breaker + self.config = config + self._stream_guide = build_stream_guide() + + # ── T1 ────────────────────────────────────────────────────────────────── + + def run_t1( + self, + start_ns: int, + end_ns: int, + counts: dict[str, int], + sim_steward_sample: list[dict], + claude_dev_sample: list[dict], + claude_token_sample: list[dict], + ) -> T1Result: + window_minutes = max(1, int((end_ns - start_ns) / 1e9 / 60)) + counts_text = "\n".join(f" {k}: {v}" for k, v in counts.items()) + + samples = dict( + sim_steward_sample=format_log_sample(sim_steward_sample), + sim_steward_count=len(sim_steward_sample), + claude_dev_sample=format_log_sample(claude_dev_sample), + claude_dev_count=len(claude_dev_sample), + claude_token_sample=format_log_sample(claude_token_sample), + claude_token_count=len(claude_token_sample), + ) + + system = T1_SYSTEM.format(stream_guide=self._stream_guide) + + # Call A: summary (/no_think — fast) + summary_prompt = T1_SUMMARY_PROMPT.format( + window_minutes=window_minutes, + counts=counts_text, + **samples, + ) + summary_text = "" + cycle_notes = "" + summary_ms = 0 + raw_summary = "" + try: + raw_summary, summary_ms = self.ollama.generate( + self.config.ollama_model_fast, + system + "\n\n" + summary_prompt, + think=False, + ) + self.breaker.record_success() + parsed = _parse_json(raw_summary) + summary_text = parsed.get("summary", "") + cycle_notes = parsed.get("cycle_notes", "") + except Exception as e: + self.breaker.record_failure() + logger.error("T1 summary call failed: %s", e) + + # Call B: anomaly scan (/think — reasoning) + anomaly_prompt = T1_ANOMALY_PROMPT.format( + summary=summary_text or "(summary unavailable)", + counts=counts_text, + **samples, + ) + anomalies = [] + anomaly_ms = 0 + raw_anomaly = "" + try: + raw_anomaly, anomaly_ms = self.ollama.generate( + self.config.ollama_model_fast, + system + "\n\n" + anomaly_prompt, + think=True, + ) + self.breaker.record_success() + parsed = _parse_json(raw_anomaly) + anomalies = _normalize_anomalies(parsed.get("anomalies", [])) + except Exception as e: + self.breaker.record_failure() + logger.error("T1 anomaly call failed: %s", e) + + logger.info( + "T1 complete: %d anomalies (%d need T2), summary=%dms anomaly=%dms", + len(anomalies), + sum(1 for a in anomalies if a.get("needs_t2")), + summary_ms, + anomaly_ms, + ) + + return T1Result( + summary=summary_text, + cycle_notes=cycle_notes, + anomalies=anomalies, + model=self.config.ollama_model_fast, + summary_duration_ms=summary_ms, + anomaly_duration_ms=anomaly_ms, + raw_summary_response=raw_summary, + raw_anomaly_response=raw_anomaly, + ) + + # ── T2 ────────────────────────────────────────────────────────────────── + + def run_t2( + self, + t1_result: T1Result, + timeline: list[TimelineEvent], + start_ns: int, + end_ns: int, + ) -> T2Result: + window_minutes = max(1, int((end_ns - start_ns) / 1e9 / 60)) + t2_anomalies = [a for a in t1_result.anomalies if a.get("needs_t2")] + + # Step 1: generate LogQL queries + gather_start = time.time() + queries = self._generate_logql_queries(t2_anomalies, window_minutes) + + # Step 2: execute queries + logql_results = self._execute_logql_queries(queries, start_ns, end_ns) + gather_ms = int((time.time() - gather_start) * 1000) + + # Step 3: build T2 prompt + from timeline import TimelineBuilder + # Use a simple formatter — timeline already built, just need text + timeline_text = _format_timeline_for_prompt(timeline) + + anomaly_descriptions = "\n".join( + f"- [{a.get('severity','?').upper()}] {a.get('id','?')}: {a.get('description','')}" + for a in t2_anomalies + ) + + system = T2_SYSTEM.format(stream_guide=self._stream_guide) + prompt = T2_INVESTIGATION_PROMPT.format( + anomaly_descriptions=anomaly_descriptions, + window_minutes=window_minutes, + timeline_text=timeline_text, + logql_results=format_logql_results(logql_results), + logql_queries_list=json.dumps(queries), + ) + + # Step 4: T2 inference + raw = "" + infer_ms = 0 + try: + raw, infer_ms = self.ollama.generate( + self.config.ollama_model_deep, + system + "\n\n" + prompt, + think=True, + ) + self.breaker.record_success() + except Exception as e: + self.breaker.record_failure() + logger.error("T2 inference failed: %s", e) + + parsed = _parse_json(raw) + result = T2Result( + root_cause=parsed.get("root_cause", "Unable to determine root cause."), + issue_type=_normalize_issue_type(parsed.get("issue_type", "unknown")), + confidence=_normalize_confidence(parsed.get("confidence", "low")), + correlation=parsed.get("correlation", "No correlations identified."), + impact=parsed.get("impact", "Impact unknown."), + recommendation=parsed.get("recommendation", "Investigate manually."), + logql_queries_used=queries, + sentry_worthy=bool(parsed.get("sentry_worthy", False)), + model=self.config.ollama_model_deep, + inference_duration_ms=infer_ms, + logql_gather_duration_ms=gather_ms, + raw_response=raw, + ) + + logger.info( + "T2 complete: confidence=%s sentry=%s gather=%dms infer=%dms queries=%d", + result.confidence, result.sentry_worthy, + gather_ms, infer_ms, len(queries), + ) + return result + + # ── LogQL helpers ──────────────────────────────────────────────────────── + + def _generate_logql_queries( + self, + anomalies: list[dict], + window_minutes: int, + ) -> list[str]: + if not anomalies: + return [] + + # Seed with any suggested_logql from T1 + seeded = [a.get("suggested_logql", "") for a in anomalies if a.get("suggested_logql")] + + anomaly_descriptions = "\n".join( + f"- {a.get('id','?')}: {a.get('description','')}" for a in anomalies[:5] + ) + prompt = LOGQL_GEN_SYSTEM + "\n\n" + LOGQL_GEN_PROMPT.format( + anomaly_descriptions=anomaly_descriptions, + window_minutes=window_minutes, + ) + try: + raw, _ = self.ollama.generate( + self.config.ollama_model_fast, + prompt, + think=False, + temperature=0.0, + ) + generated = json.loads(raw) if raw.strip().startswith("[") else [] + if isinstance(generated, list): + # Combine seeded + generated, validate all + combined = seeded + [q for q in generated if isinstance(q, str)] + valid = [q.strip() for q in combined if _valid_logql(q)] + return valid[:5] + except Exception as e: + logger.warning("LogQL gen failed: %s", e) + + # Fall back to seeded only + return [q for q in seeded if _valid_logql(q)][:5] + + def _execute_logql_queries( + self, + queries: list[str], + start_ns: int, + end_ns: int, + ) -> dict[str, list[dict]]: + results = {} + for query in queries: + try: + lines = self.loki.query_lines(query, start_ns, end_ns, limit=50) + results[query] = lines + except Exception as e: + logger.warning("LogQL execute failed (%s): %s", query[:60], e) + results[query] = [] + return results + + +# ── Helpers ────────────────────────────────────────────────────────────────── + +def _parse_json(text: str) -> dict: + """Extract and parse the first JSON object or array from text.""" + if not text: + return {} + # Try direct parse first + text = text.strip() + try: + return json.loads(text) + except json.JSONDecodeError: + pass + # Find first {...} or [...] block + for start_char, end_char in [('{', '}'), ('[', ']')]: + start = text.find(start_char) + end = text.rfind(end_char) + if start != -1 and end > start: + try: + return json.loads(text[start:end + 1]) + except json.JSONDecodeError: + pass + return {} + + +def _normalize_anomalies(raw: list) -> list[dict]: + if not isinstance(raw, list): + return [] + valid = [] + for a in raw: + if not isinstance(a, dict): + continue + valid.append({ + "id": str(a.get("id", "unknown"))[:64], + "stream": a.get("stream", "unknown"), + "description": str(a.get("description", ""))[:500], + "severity": a.get("severity", "info") if a.get("severity") in ("info", "warn", "critical") else "info", + "needs_t2": bool(a.get("needs_t2", False)), + "suggested_logql": str(a.get("suggested_logql", ""))[:300], + }) + return valid + + +def _normalize_confidence(v: str) -> str: + return v if v in ("high", "medium", "low") else "low" + + +def _normalize_issue_type(v: str) -> str: + valid = ("error_spike", "config", "regression", "user_behavior", "infra", "unknown") + return v if v in valid else "unknown" + + +def _valid_logql(q: str) -> bool: + q = q.strip() + return bool(q) and q.startswith("{") and "|" in q + + +def _format_timeline_for_prompt(events: list[TimelineEvent], max_events: int = 60) -> str: + """Minimal timeline formatter used by analyst (avoids circular import with TimelineBuilder).""" + if not events: + return "(no timeline events)" + + truncated = len(events) > max_events + shown = events[-max_events:] if truncated else events + + lines = [] + for i, ev in enumerate(shown, 1): + try: + t = ev.ts_iso[11:19] + except (IndexError, TypeError): + t = "??:??:??" + sid = f" session={ev.session_id[:8]}" if ev.session_id else "" + lines.append(f" [{i:03d}] {t} {ev.stream:<25} {ev.event_type}{sid}") + + if truncated: + lines.append(f" [... {len(events) - max_events} earlier events not shown]") + + return "\n".join(lines) diff --git a/observability/local/log-sentinel/app.py b/observability/local/log-sentinel/app.py index c990164..c8af441 100644 --- a/observability/local/log-sentinel/app.py +++ b/observability/local/log-sentinel/app.py @@ -1,9 +1,10 @@ -"""Log Sentinel v2 — Flask health/status/manual-trigger + background sentinel loop.""" +"""Log Sentinel v3 — Flask health/status/trigger + background sentinel loop.""" import logging import threading +import time -from flask import Flask, jsonify +from flask import Flask, jsonify, request from config import Config from loki_handler import LokiHandler @@ -27,27 +28,113 @@ @app.route("/health", methods=["GET"]) def health(): - return jsonify({"status": "ok", "service": "log-sentinel", "version": "2.0"}) + return jsonify({"status": "ok", "service": "log-sentinel", "version": "3.0"}) @app.route("/run", methods=["POST"]) def manual_run(): - sentinel.run_cycle() - return jsonify({"status": "ok", "message": "Cycle triggered"}) + result = sentinel.run_cycle() + return jsonify({ + "status": "ok", + "cycle_id": result.cycle_id, + "cycle_num": result.cycle_num, + "window_minutes": result.window_minutes, + "timeline_event_count": result.timeline_event_count, + "anomaly_count": result.anomaly_count, + "duration_ms": result.duration_ms, + "summary": result.t1.summary if result.t1 else None, + "anomalies": result.t1.anomalies if result.t1 else [], + "evidence_packet_count": len(result.t1.evidence_packets) if result.t1 else 0, + "error": result.error, + }) + + +@app.route("/run_t2", methods=["POST"]) +def manual_run_t2(): + t = threading.Thread(target=sentinel.run_t2_cycle, daemon=True) + t.start() + return jsonify({"status": "accepted", "message": "T2 cycle started in background"}) + + +@app.route("/run_t3", methods=["POST"]) +def manual_run_t3(): + t = threading.Thread(target=sentinel.run_t3_cycle, daemon=True) + t.start() + return jsonify({"status": "accepted", "message": "T3 cycle started in background"}) + + +@app.route("/trigger", methods=["POST"]) +def grafana_trigger(): + """Receive Grafana alert webhook. Dedup, parse, and dispatch trigger_cycle().""" + payload = request.get_json(silent=True) or {} + alerts = payload.get("alerts", []) + if not alerts: + return jsonify({"status": "ignored", "reason": "no alerts"}), 200 + + fired_names = [] + now = time.time() + trigger_tier = "t1" + alert_lines = [] + + for alert in alerts: + labels = alert.get("labels", {}) + annotations = alert.get("annotations", {}) + alertname = labels.get("alertname", "unknown") + tier = labels.get("trigger_tier", "t1") + severity = labels.get("severity", "warn") + starts_at = alert.get("startsAt", "") + + # Dedup: skip if same alertname fired within dedup window + last_ts = sentinel._trigger_dedup.get(alertname, 0) + if now - last_ts < config.dedup_window_sec: + continue + + sentinel._trigger_dedup[alertname] = now + fired_names.append(alertname) + if tier == "t2": + trigger_tier = "t2" + + description = annotations.get("description", annotations.get("summary", "")) + alert_lines.append( + f" Alert: {alertname} ({severity})\n" + f" Fired: {starts_at}\n" + f" {description}" + ) + + if not fired_names: + return jsonify({"status": "deduped"}), 200 + + alert_context = "\n".join(alert_lines) + sentinel.loki.push_trigger( + { + "alertname": ",".join(fired_names), + "trigger_tier": trigger_tier, + "alert_count": len(fired_names), + }, + env=config.env_label, + ) + + # Run in background — webhook must return fast + t = threading.Thread( + target=sentinel.trigger_cycle, + args=(alert_context, trigger_tier, fired_names), + daemon=True, + ) + t.start() + + return jsonify({"status": "accepted", "alerts": fired_names, "tier": trigger_tier}), 202 @app.route("/status", methods=["GET"]) def status(): - app_dets = [d.name for d in sentinel.detectors if d.category == "app"] - ops_dets = [d.name for d in sentinel.detectors if d.category == "ops"] return jsonify({ - "version": "2.0", - "poll_interval_sec": config.poll_interval_sec, + "version": "3.0", + "sentinel_mode": config.sentinel_mode, + "t1_interval_sec": config.t1_interval_sec, + "t2_interval_sec": config.t2_interval_sec, + "t3_interval_sec": config.t3_interval_sec, "lookback_sec": config.lookback_sec, "t2_enabled": config.t2_enabled, - "t2_proactive_interval_sec": config.t2_proactive_interval_sec, - "dedup_window_sec": config.dedup_window_sec, - "detectors": {"app": app_dets, "ops": ops_dets, "total": len(sentinel.detectors)}, "models": {"fast": config.ollama_model_fast, "deep": config.ollama_model_deep}, "sentry_enabled": sentinel.sentry.enabled, "stats": sentinel._stats, diff --git a/observability/local/log-sentinel/baseline.py b/observability/local/log-sentinel/baseline.py new file mode 100644 index 0000000..c50b1e0 --- /dev/null +++ b/observability/local/log-sentinel/baseline.py @@ -0,0 +1,229 @@ +"""Baseline manager — rolling stats from Loki → baselines.json. + +T3 calls compute_and_save() to recompute baselines from the Loki window. +T1 calls load() + get_prompt_context() to inject baseline values into its prompt. +T3 calls get_threshold_recommendations() to surface T0 alert calibration suggestions. + +No ML, no LLM — simple rolling math (mean, count rates, p95 where sample size allows). +""" + +import json +import logging +import os +import statistics +from datetime import datetime, timezone + +from loki_client import LokiClient + +logger = logging.getLogger("sentinel.baseline") + +DEFAULT_PATH = "/data/baselines.json" + +# Metric definitions: key, logql, how to compute the value +_METRICS = [ + { + "key": "sim_steward.error_rate.per_min", + "logql": '{app="sim-steward"} | json | level="ERROR"', + "compute": "rate_per_min", + "description": "ERROR log rate (per minute)", + }, + { + "key": "sim_steward.action_count.per_session", + "logql": '{app="sim-steward"} | json | event="action_dispatched"', + "compute": "count_per_session", + "description": "Actions dispatched per iRacing session", + }, + { + "key": "sim_steward.websocket_disconnect.per_hour", + "logql": '{app="sim-steward"} | json | event="websocket_disconnect"', + "compute": "rate_per_hour", + "description": "WebSocket disconnects per hour", + }, + { + "key": "claude.cost_per_session.mean_usd", + "logql": '{app="claude-token-metrics"} | json', + "compute": "field_mean", + "field": "cost_usd", + "description": "Mean Claude session cost (USD)", + }, + { + "key": "claude.tool_calls.per_session", + "logql": '{app="claude-dev-logging"} | json | event="tool_use"', + "compute": "count_per_session", + "description": "Tool calls per Claude session", + }, + { + "key": "claude.error_rate.per_min", + "logql": '{app="claude-dev-logging"} | json | level="ERROR"', + "compute": "rate_per_min", + "description": "Claude session ERROR rate (per minute)", + }, +] + +# Known T0 alert thresholds for recommendation comparison +# Format: alert_name → (baseline_key, window_minutes, current_threshold) +_ALERT_MAPPINGS = [ + ("error-spike-general", "sim_steward.error_rate.per_min", 10, 10), + ("claude-error-spike", "claude.error_rate.per_min", 5, 5), + ("websocket-disconnect-spike", "sim_steward.websocket_disconnect.per_hour", 5, 3), +] + + +class BaselineManager: + def __init__(self, loki: LokiClient, baseline_path: str = DEFAULT_PATH): + self.loki = loki + self.path = baseline_path + self._cache: dict = {} + + def load(self) -> dict: + """Load baselines.json from disk. Returns empty dict if not found.""" + try: + if os.path.exists(self.path): + with open(self.path) as f: + self._cache = json.load(f) + logger.info("Loaded baselines from %s (%d metrics)", self.path, len(self._cache)) + else: + logger.info("No baselines.json at %s — starting fresh", self.path) + self._cache = {} + except Exception as e: + logger.warning("Failed to load baselines: %s", e) + self._cache = {} + return self._cache + + def compute_and_save(self, lookback_sec: int = 86400) -> dict: + """ + Query Loki over the lookback window, compute rolling metrics, write baselines.json. + Preserves existing values for metrics where no new data is found. + """ + end_ns = self.loki.now_ns() + start_ns = end_ns - lookback_sec * 1_000_000_000 + updated = dict(self._cache) + computed_count = 0 + + for metric in _METRICS: + try: + value = self._compute_metric(metric, start_ns, end_ns, lookback_sec) + if value is not None: + updated[metric["key"]] = round(value, 4) + computed_count += 1 + logger.debug("Baseline %s = %.4f", metric["key"], value) + except Exception as e: + logger.warning("Baseline compute failed for %s: %s", metric["key"], e) + + # Persist + try: + dirpath = os.path.dirname(os.path.abspath(self.path)) + os.makedirs(dirpath, exist_ok=True) + with open(self.path, "w") as f: + json.dump(updated, f, indent=2) + self._cache = updated + logger.info( + "Baselines saved to %s (%d computed, %d total)", + self.path, computed_count, len(updated), + ) + except Exception as e: + logger.warning("Failed to save baselines: %s", e) + + return updated + + def get_prompt_context(self) -> str: + """Format baseline values for injection into T1 LLM prompt.""" + if not self._cache: + return "(no baseline data available yet — first run or no historical data)" + + lines = ["Historical baseline for this system (use these to judge what is anomalous):"] + for key, value in sorted(self._cache.items()): + metric = next((m for m in _METRICS if m["key"] == key), None) + description = metric["description"] if metric else key.replace(".", " | ").replace("_", " ") + lines.append(f" {description}: {value}") + lines.append( + "Flag metrics that exceed baselines by 3x or more as anomalous. " + "Use these values to calibrate 'high', 'normal', and 'low' thresholds." + ) + return "\n".join(lines) + + def get_threshold_recommendations(self) -> list[dict]: + """ + Compare computed baselines against known T0 alert thresholds. + Returns recommendation dicts for alerts that appear mis-calibrated. + Emitted by T3 as sentinel_threshold_recommendation events. + """ + if not self._cache: + return [] + + recommendations = [] + for alert_name, baseline_key, window_minutes, current_threshold in _ALERT_MAPPINGS: + baseline_val = self._cache.get(baseline_key) + if baseline_val is None: + continue + + # Suggested threshold: 5x the baseline rate scaled to the alert window + suggested = round(baseline_val * window_minutes * 5, 1) + if suggested <= 0: + continue + + delta_pct = abs(suggested - current_threshold) / max(current_threshold, 0.001) + if delta_pct < 0.25: + continue # Less than 25% difference — not worth recommending + + recommendations.append({ + "alert": alert_name, + "current_threshold": current_threshold, + "suggested_threshold": suggested, + "basis": ( + f"{baseline_key}={baseline_val:.3f}/min × {window_minutes}min window × 5x safety margin" + ), + "confidence": min(0.9, 0.5 + delta_pct * 0.2), + "direction": "lower" if suggested < current_threshold else "higher", + }) + + return recommendations + + # ── Private ─────────────────────────────────────────────────────────── + + def _compute_metric( + self, metric: dict, start_ns: int, end_ns: int, lookback_sec: int + ) -> float | None: + lines = self.loki.query_lines(metric["logql"], start_ns, end_ns, limit=1000) + if not lines: + return None + + compute = metric.get("compute", "count") + + if compute == "rate_per_min": + minutes = lookback_sec / 60 + return len(lines) / minutes if minutes > 0 else None + + elif compute == "rate_per_hour": + hours = lookback_sec / 3600 + return len(lines) / hours if hours > 0 else None + + elif compute == "count_per_session": + # Group by session_id, compute mean count per session + sessions: dict[str, int] = {} + no_session = 0 + for line in lines: + sid = line.get("session_id") + if sid: + sessions[sid] = sessions.get(sid, 0) + 1 + else: + no_session += 1 + if sessions: + return statistics.mean(sessions.values()) + # Fallback: total / estimated sessions (assume 1 session per hour) + estimated_sessions = max(1, lookback_sec / 3600) + return len(lines) / estimated_sessions + + elif compute == "field_mean": + field = metric.get("field", "") + values = [] + for line in lines: + v = line.get(field) + try: + values.append(float(v)) + except (TypeError, ValueError): + pass + return statistics.mean(values) if values else None + + else: + return float(len(lines)) diff --git a/observability/local/log-sentinel/config.py b/observability/local/log-sentinel/config.py index 70997e2..2ea06a1 100644 --- a/observability/local/log-sentinel/config.py +++ b/observability/local/log-sentinel/config.py @@ -10,8 +10,8 @@ def __init__(self): self.grafana_user = os.environ.get("GRAFANA_USER", "admin") self.grafana_password = os.environ.get("GRAFANA_PASSWORD", "admin") self.ollama_url = os.environ.get("OLLAMA_URL", "http://host.docker.internal:11434") - self.ollama_model_fast = os.environ.get("OLLAMA_MODEL_FAST", "deepseek-r1:8b") - self.ollama_model_deep = os.environ.get("OLLAMA_MODEL_DEEP", "llama3.3:70b-instruct-q4_K_M") + self.ollama_model_fast = os.environ.get("OLLAMA_MODEL_FAST", "qwen3:8b") + self.ollama_model_deep = os.environ.get("OLLAMA_MODEL_DEEP", "qwen3:32b") self.poll_interval_sec = int(os.environ.get("SENTINEL_POLL_INTERVAL_SEC", "60")) self.lookback_sec = int(os.environ.get("SENTINEL_LOOKBACK_SEC", "300")) self.t2_enabled = os.environ.get("SENTINEL_T2_ENABLED", "true").lower() == "true" @@ -19,6 +19,16 @@ def __init__(self): self.dedup_window_sec = int(os.environ.get("SENTINEL_DEDUP_WINDOW_SEC", "300")) self.env_label = os.environ.get("SIMSTEWARD_LOG_ENV", "local") self.sentry_dsn = os.environ.get("SENTINEL_SENTRY_DSN", "") + # v3 additions + self.sentinel_mode = os.environ.get("SENTINEL_MODE", "dev") # "dev" | "prod" + self.t1_interval_sec = int(os.environ.get("SENTINEL_T1_INTERVAL_SEC", "300")) # 5 min + self.t2_interval_sec = int(os.environ.get("SENTINEL_T2_INTERVAL_SEC", "900")) # 15 min + self.t3_interval_sec = int(os.environ.get("SENTINEL_T3_INTERVAL_SEC", "7200")) # 2h (dev default) + self.merge_window_sec = int(os.environ.get("SENTINEL_MERGE_WINDOW_SEC", "10")) # T0 batch window + self.sentry_auth_token = os.environ.get("SENTRY_AUTH_TOKEN", "") + self.sentry_org = os.environ.get("SENTRY_ORG", "") + self.sentry_project = os.environ.get("SENTRY_PROJECT", "") + self.baseline_path = os.environ.get("SENTINEL_BASELINE_PATH", "/data/baselines.json") @classmethod def from_env(cls): diff --git a/observability/local/log-sentinel/detectors/__init__.py b/observability/local/log-sentinel/detectors/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/observability/local/log-sentinel/detectors/__pycache__/__init__.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 6cb6326ee76023d9ec9633d4ad878aafc2afa1af..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 196 zcmey&%ge<81P3M^%>>bpK?DpiLK&Y~fQ+dO=?t2Tek&P@n1H;`AgNo)&Q>v@#i>Qb zG3A+gB^fa(sbw+6nYp^fC8_0!MJXV5Mro36K~8CUW?oEw5>RPbVp3*KW=UmCPJVJ? z4v*B~1|oII{Q}8q>wGH_pOl4YTXe z7%JuIWR3^IaVX|msj1qBpu6gFZ+YlL`cR3!IZ7q=R-A-%y-K`=!l`Os+HZF45IF9= zBYF0lnQy+C`TqSrKMw@F2-=pre^KTd5c-A;>W!}v&p!m>9ug5oA|=u%sS`A&1wu0? znG-B#ZJIslI^o7{n|7V#PVkr)P#m=*(Y+msT$(+|IP$PZUsgEm(~z9A7R^$*I1oHc|@)=&zXFp*KzB9;Z5F=SoH zXgMKU&~!OlG?Zz1WA#eLFl4Oh$*1HF3Ci;2_OL^kafU1=`O z6J~far~{tt5jpJ5^9hgDc+7sukn@oGC7u$ySPjlArP?bkCUfPA<;s$KSZ*vEMXY6M zV63+w5Jb(-S159y9;DGJY6R%ln@An3&1y`F*;QN4fvgnWfyQd4f)TU9$EKJl`oN{r zJxHgc==fHIG}l;N11ROHbJnG0K-#HBkl}itKD7-Y9}z^d*Ewjcy;D#HVFEry0;r}2M^Y) zB8HNT7#1U9^j*IGRsoHL>%61;Q>Y#of~|X<7C^t6YXT)=r!B9pOO6ldPl>8Ak(>YY zK52hFns>xJIco7)tQbl*qgw2>4Ax39$Eh{~fP{xoVv_qhOSG`dqIxVUQDTL{gis3C zLP@uo<(#nVDKSujv0O&M0Ev9f`*e#XVut?n_H}T>{&roN>edYz&das$7367IGy2=^ss%&w3acU}qXW$heKVKk}Jgwwp zEh}3MvJTIJdC4e9#(vAE7V=kB=_(0Gh#n>{8y2I=n&p!eUDAu$tgP#n8*V6TIZSkB zv07$I#;t^O;X+Z%SrLgu@?~jLu1I(-R+rTYOzaD9b`pSYaW(bhEo7d`VDNind5KP7 zdr>oR942rxbUJY`VWU~fKwmng%UDleQ#4~Tos*~2o0LZA$>MnT6}6aGv~*z{Sf?}N ziVCVps|9#9^!aWb^r~pG3foEID}cC~l52KJUYW7n&Z5=q*nQ~LWRi4NA;Ba4ztGJh zv%RCtn~|;ai93nP@LD8M=Kc}fVn&SrFn#Gzi|J;leR9iBl%e9T*xgR`+dVRj(uy(x)*gM zOhJ6MEzlNufdb*^3jukWDu)(xf8%?f1h;}qWRYf;!r^EZm zxKWaHBLjgc0RUlTya*r#yY|D{bNC_LLplPqc1Qe2-vQl+$n-VMf+AzH$Jg0*SG;m& z9l>XbUHaLVfqxMazgyo$PSs=_fN^$ns%K$>MDp{8(A`6jx;C%E9BCr857m>EM*RR- z(Dl@%(VLPtJw{HjTV2BD)c+cg&|^eHZ|06d3Z+LjQgxjVp@TIN;NUny9VpKxT&FGf zgd(ds2+ex5vj#58=MpYVbZ2=LUC}`8Bzv&qg@S4^U=+)x7Ou%S!Qeh%!iNbbGo4X& zq9TY{#}u7pjdZ>6(?#eYRs!MMqc=zACKu?nz>e7=(;K?I`{wSs^K0JL*@4Ggcy7lh z#v`tIm1{PGk+RpB(I0V5t6USz+@85PbL)C#Vl~)NbeOYTu` zBvNhD(U9+<^rAGVCp0#N0CztlxSMjl(U{C8W4crBH}bHet8Okpx*y`=ME)gLFQajr z4F5f(&qV;I^}bO+Y`|bZa!)sSqsE?snG^?i_W=D1d&O&G`~c9@2uaSbwXqEh2ZtbW z@mdpoqCdsJy=`RfWyOXRbC`y4&_?P|uM76Q0p~{#h~Xqna9E6OM&i0=PyNxUPvGv( z4F~^UZkVU?RHCtTc+e?(d-uGlz|{$o8(&3jS&OsQV#MkSEQcN(EDX6|Tp{Y8P zpe=t5r|e5uzD>*LFw}zxZ-*}7wxiY#I^0EOiCer(tO*{!8Yxx)qFK&duGK}BzczHh z_+@ar>|=G}M0H4ZVmv|Qldd0r`XIc07Cni@XHS{Y*zE8(p{-@s3^&fl@5C#+7T#M6 z?<~8`=(hR3JAIYOMP@C!w;JhQJi9pkMd?9lxpO75)-za5{-hcjD!cw3++I2GC@8E3 z1<2iN!A>*Oyg;pn+CKI_5jy7rW~=bAf8C8j&6TTb!1EFmI|-_pEj!HS{Y(8zJu8uw z;gur<3E%2mm|E?8XK8Zz#KYd}`Oxj)&ERi1v$19V!kr6dW0M)%z1Y9lvm99- zUOrNdoG`bvRHi`o;^fkeC9OI*T8*6t?njMXtBqY|SMO@$S+l9tjGu%9w|T!q9*=LG zLHss5sU?CICrP_e~DMc^6x{%;dqb%cD5mEvPB2oGNc;X08BC6Yz592X)5luE%QYqB==MqW` zrU21*=t?vxznGTkl|zx}v>Y{!HOtaz{2c1n$t0l`I1#I4p`2u;Hi@Hl$u{Yb?34VY zfXYKQ+%@dZ2|MQ}UDyV@Uhg&c?%t%=WuA=Wt~l1idcB+UdRcFkR7HJpU(jzRY@dx%Wp^xX`L_n`l@|8nzHQS@4PV83>tpa=5UzNI;`__GKy?H2AWvf;e&Tu{oSyqhakP zLGHNWi6$fIm>fpW(+qE+#?DC#+6^u)>qa1q&T&4x?x+RznotX z^khuFIH@IO`ZZm?6rnMcr_<;9Q^|BZp-ighK=oqeTq2p!uS_P@Xe0?`yk7&?NGNg= zh9mq`m~P!I22xiHhuL8?9@ywb17=SZSfsrQ#Z{8)>Yf#H{`wovYtHPzsy{eu&s8_v zXu8&v-M?DBeb#Yb@O&b4uL#|9p*zBG4r?7NLdTr_jEz`69+eGJeL~RlGJ> zm)R~wW(t9Q*YI{xPt3@gm{3Gjkws0`w1ldN{m+Qtz{-H(oL1HIVf{)FyeU=F z`wFd|ElLsFxJOiKD^_to;Bz;{m#hVZ&82^(Vhd zID16H0{<^(1gT(CMi2opNjWB_(f105|M}_%B3Pj%&Lw5WPC<@#K{xHj`CT|797Q7f za_{SrRBE)#3@H5o)qVgU;42|Xzryq2DsiO?&C5RK?^Tt!K~n25!#32a7f@mSeOvQ24*EeWB+u;FVmI1LWm zhP}WexMvOhjfE;KXba9)3xyHEHQ!;Nj%wq^ItH?7tT&K_FVZCb4z1YJ+Z zTx`z0>gijstax_a_4KWINquwPPqvJF(zSP`YwyQhqbr{Fw~x(A+4`KXE^B@F^_;)) zhVPp1Lq6BmdF#y0Gr6`cxz=3^-3tv17Z&Rm`Na#%%?EPrJ#*TeJimM4@WRMq_2Tei z@A82&%dIctpjU3ba=#~-+ZxREY+vJT?G1Ss>HL;uE#T>_`6uyI*Z#v!wmd`IVY80~ zHiQ}rZ1+HbG|W;6WIcsIRtj{`U3egj6^GU2ki_-6oWDY z4;?{?VaKrnJMP(fwaQQA#IpHSIf4TqXA#0<&%t41ty# zm#psrNduhInve_AnaU z2|nyFJw5^lJ|Ts8MzQ-?yS;>j!%(+XqT5h8g#qCfh`9o>mJu`Th>>!7LdWERSpon_TbDOGYXJRv1J2RYEeOl!jfq+9v?T9qDQCkepS}5rq)3yf-w|A2_TUT2g=P5?xl;^;R>4+vOYMCuMn83mTquQXAj-eSy zyI&6dPb}NTj4jb?0nh?47z71Ij$9-bqeD0?zT1uX7EFAj>$5}@C>W1ThI~T<<5F_W z1pCvdfW~PU2u;KniLf48jGj#~|JZfm2u|^PC;;_aw$AeRT04F)Hp_or*Ia=Ki@{+2 z;A&vU?2$j!wjd`0KL&bNeZ9HvZCPQ}- z_B^|0=YZ-2(s|mlR_{itb2F;z82BpHLFQ^w-82gw6~cy@!~X=S?`(;lD#LgE9$H3* z6@e~UC9V`kAk;w6DCW{nLEw;wtRZ$2U|@b0z|2+3u2HYJlKB4u-Y&uV0C#?n10u!T z+llo-yeC%zKWJ~Vdza)c?n&}MPVSYe9)tgfg`6C|4`iB5@W$knJ}o{o3fv1BctV*{ z2Ml*vmx;42@RH~OC=45_WSS4d9d^U#m~CvTQNt3H3eWO;`xK~Zzk~vE)V>`*8=G}AVBV{1%f@a^-<+O%bG3bVwQgi~ zELXzjH&z292(G%U<=ss7@^br*h1c)49|W?-EOV}LS=@EU|70cDwT>*L7vhUAE;sMb zwRg`=%pF}G7+aE;PA|8PmtnAXBMgffC%|w$<8%Yy84PbjLgP1tFuNgz!-gvyo>61z zqztkr4A+N960ZhO^M%7x399Ls_$z8Syq@+@JVd+^Xu26NTUi~yyRo#NVwyuS?ooES z_2DptO+66}M|8-=Ak@nm%y$YEFbg(c1kxtO0xzuEdr*9r+HRwuRHb@IFE@-`HfYoj-9|3cBUm6?eM zjY8-uJvNXqOg%(TzE0Z9K1+m1F<5kyPIA^-9CH@OuxH51lL(9!Ne78rrpPGQL8dH0 z>v5L*204QR7#Pk|a62BORHcUNrARVu%J4OlyBJAoXiVYe^-T~d7H`3yhBSPY1pZffH4t~GX@CNd2mzTWj7r9#&kvksr8hkh$ zQ={My?x+g4;;0VycFnw9;s%VW;(b*cI0qkGsT~^~;8Su^W8Pu7*_|}ZzBn@1VQ-nf z0*cJc4US+4`%I5~h9cl_W7-ZVz($MZQ*!KYr0=h!_cPM`8R>oGxL~o=W<8Gx{^qAS Po2B~w=obWj*xULKX}?@o diff --git a/observability/local/log-sentinel/detectors/__pycache__/base.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/base.cpython-313.pyc deleted file mode 100644 index 987099bce2a6cc9e57d3bfbe29f7830afcccb938..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1032 zcmY*XO=}cE5bgQiO?IP_7&IUZ29Z5vLDY+gU=o5P5YbV*3^cv7y_1p2Oswun*qm}R zM-c*EJ$ln0;l-;LZ4d+z@fKLUTQ#$S_TlxbuI}opS2gK&XBe&zFW+acEyli)@ppMs zGTA_K!XgGNvLbuS+O)ymbifJ8cDCG254@VYTmEJMLC7w!%PjJiSmej{O{a+itu*Mh z4wqMjlHpKlndlP2tk4ohB9Y+;tw))J@LK;Tz3AD-xx2b%=EOj05Q*NEdZ!%rEE8;G z#V{*Uq-WN~NiH8$62@yH*^x=}SA+i4m^?yq!eRy%7GXygI9FI?gByFXA30ai8@UkF zmPc0H!o=HA5YNQ3q{0n1YAvy+?oH&hpcA#Bo6huRO^3=^pH}m;jZWhZ@W{A@*pPu1N#ldm6%>hGM{5ApXQb4`ML1!p)w2&*A6CUws;n%+yiBts6sgDx zrNcVZFnmU}hagpzYt;`Q){(1d6i~GTn_N+z8GYArpIw!CL%{nSClMtR>&NmH~myG72n?* z8^1X+^ZyFeZw|YZNPuv}PJPzyOpr1-hp^q7GcC?ZJI>7v=ey;w%87S4e^!Zn>N&&t zCRSv78*5E=%U|%R0SFc&1~IfUiZ8%qeJq}KV_DW`*89Phzq6$;?CLK+usScU|7N(=$@~G6D-4A1M0o`aW4bBer0cc1r6da;*9ohXGkPaJXHoa-=c+rU1~mt)3U91}-3+}L08rvtU|lcFx> zb#_7)ss)jqP=RQorb(*8%2H9x&*o*36>o})uCanrfS+0xWJ%P99uu=^%53?bpoz8_ z6~owfT2cyiruMM5^;XsK=`^|P6H)kVmfOyP+w7Dy9?TL4`W729xGd+2N!}MZNuVN+d zvY}WMT#tyWwu)|o=O9vCS!#;fCgHFgb=a9z=;uhdniB45$NdN$cJvib)*VG>&mp9E zv)&z7pgH$p2bRLIu#eco3m1)9jEDY}g z@82;>f&ZhPCV6T-&&i8uL$>i&g4rM!8-E`>4e^_9ZR!WP_Q4=RTt_n|EiV{{-CQTv zMRw1{vn@dPWP?DvM}0fTBO3rjiDx5nhPnvF0O)rtuOzLEhVuh6uYMEXfZQ8)4rl#v z!XTd&=4&Sbvg69TufX}W&P5cgzF6k%=Jvvl8Q3K|O7FZ&&XZ+^sb{k8qJz-252br9 znGvT_r(Dr=^HMpfV9_1TJOA~WC=GhQmxLKRih2Z_*NRTE8LRe4KMZz;8q zBQvP!0@h8pq!iU!?2>q^QRJQP<7_Qqm(QomzNID>#@VzBw?VGp2$0&*9}AVr@stg1 zupr(Xl+{uR%6uxr=GAgVRiH&2Pc?Uu`YHPp_J*b^X`jgytR|yklhL4+U`d}fJ-VdJ zfLg1T%L1M?nY^HjC6#dEO{pL%dC?5Q5^u}=+oXTgf)y+dl9hzKUh9}p6h{XbtcKpEBf(?&}O2Lde1HMqj0x`^&H3>Z`BkA@(j-bb+ z@|*5k0#<5~3#w9TS~)Q?Q)4)%b=DYG7Bn5!snIiRe~m6{X~&&raGPP%t?6o|7CKd~ z=(7z|wG0bDK=+oU&#***B{`t6*Q+`^%r+wKbYZ2`1j2bjy+= znr?y(R+kE9o8vl1iXgW$XfLtvnNh1)(?YN zquFiei4(5IydUAL@c4*FTMA{eq3;v~{^+$A(cOzj7mhBSUuj#9W?%7~!oLTj+ca6_Y zeo3sYhkK1k=VEvv{E^Q{^xV68_o|V2$><(ljjy)UhsnMb_ovJ!O#RU5_5L&K$+Po8Bbr?9T8|!h*xj={uM_Gm^tb>cGnQN@jItt-4nFJ@?t3&qDR=l{$NsNF_c@ ze0gBZ7&x-&p;Pe<2JPpm&Gz8_j;~O#efK{-C^oQ>B>U|6AZbLJ$R0#SHm51np4|Eo zg<`#P6MqS}&1v)6=erN@)vQl1xdC~{CKL=Ci#5G``=#3O`|E0#sb1zGC_@(W}H_uM{d zgk!hQZn!T{p3bGq8;HDD-K%f@F7%bf+a@e+ayatN^%q!#NR_d!&8LC6gOUh*={#A~ zxPt6MJ!ral*`6jk)-&evApx{mv=^1AB05g*MMWy@svU#^51m411@@*OS4F6LP_p$5 zYhT=#lP1;8{|0o+Qy4?T34EVuTRi+9p~03$;IXzphzBqs z)9};!fZRqKZraoOFx)mj`pb9b!vA^L(*Ay}9!zZ^%G3HqII=;*`@b4-!_NaD^s5L< z-=kRiQaWJzc)p7fK6rTODcm0$95Ig*|n=}Y46{nP-mhc-wPy{NJY&I#vXyw=wv#{PZGix9z zRXJ5g97==`IrPAhiXP~#R6VqnD$%M3r$Mb5ft0G^kXvY|DsFwV8;6v3qSzazjHDL5tq_!5Ro5^amf*8;t$7dtszb@^!M zNSK6uJ#@Oq>x8F{A^xFeB*nu<(sJ%l}+^E6C|OEg-t&S9!qjz+EG zHtOMvS(4Xmw?Na2d|OIj%Ai|zJWN21AIwr5?05NCIczz3%PD}`G&q&B@rg1fQ-j&u z8Jr6a4diM>eK5X>GKdHem8c6u(nTWc5((*YCai}tN=D_2`(%#(tV|-hLZTcuYaQZ% zb&j`=V3BjkVtyp19|@DPr4kF}I067pNRGlYegzM*wD6b+zNDtMmjshsc{oeozz^E&ZdR)|1M}^r?{}%{35knk_ z{MW;&SbfewBk{WavNu#cV#tk1kM5BCK4vI-oWl#`9uaH}t8Iz;-yqdBs|X` z4|(OlD8WZ@&n=lNi0E|cL&(&<-D|3@`%TTp7qGpzduU|j_(=Ch+GXu5b)A&T)ZlQJ zFqOg*Kt9Dnp5@sP8Z8%#SuzCyWj$PQxrQ%TdFX$J9)wxW3rPiVzLR;RGi5a+mnPR4O1tB`ll=$ z@E$}eV5ZgsB}rIF*88jLXqZrBLXL75+M32D2m3Py#e`;nVDC&Ok1u4_5XAK}<%w-2 zyIioGj5`5KFJvby8|cW`E<_5lu#IBJvm9*0cK*=0;BL(ekuFWKaA0Ab{+<8pNl6DL zp2D;R?(+(&b@x=%T1&^R&Y8~H-SaJ5D&fb?ZPyRqh*d%hvF7XBukEU}ZKxdoeb@f^ zwgX>x+#9$xG&3}tx_$E3_TKv)6}8%S;Bn%$*-P_@t-mGu<`RAPPRu9Xsl@r_gV%P~ z5?xn^7ZRH)!wYT6*@2nRHT+)=8nZ`BeT zuNSJD_I-8v!NC2GADo}te`0RmXmxa~I-aZM@!a@iwQy!`+^UY9t)8{3?M3e9^vvmn z&et9dk3Q6lpW@%i-wJn5JnY*2WBeD*covd68kbbGv8R?Ce3poJMxUd2B(}5>H76@g zFaF$)+6ROe%RQ)TOrZSbxVvF+gZ%yO=0R=!=jd1}##GbfDPo!|YMMniU$(g(HzC!s z_KKy+G$$=WJsU2^aZQtK22XxckuYdXwk_)U`9^yAc%5ldFAFtlW<8Hs6J-xm*tH#O z%kUwR=0dFt{x6_!fcON}V#zCqYEA7|hL>b5l$<@jgrK`8-y8q3;cu>zck8>3rJ_tR z&Agifys?~X+mJGr>(HPi8dq~DJpsQyBEvowh5)fmgVKok#bo+_96yc#5l0!E!cj7H vPe95(vJZy*ouC?sB}ov3M=1Riz5Wz!d4kqIL0g`OdxYfd#=j7BzK4GRwn`-6 diff --git a/observability/local/log-sentinel/detectors/__pycache__/flow_gap.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/flow_gap.cpython-313.pyc deleted file mode 100644 index 6455acafa4dd83ccbd617fe81b2ed908b5fa01ae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2892 zcma)8VN4rG7M@-2+KX*$8iLO?2`pR!xGm<%K|DAK#4^$lL7Je66T(@Mh$^Q8 z6M?f5k$hU3kk1B5P(?}9i!?cfv|wHw4%F8mp;9QVERKv}7Z+R=Un|287YAo8=c-yV z%2b_MP>a~YCBrrClIl9DJBQVAV4t!}rj0W{@rBX?i=H$nt}o*d7>CA9y9g>k#zz+l z7Cv9WWMR}O%;7?P?|T2pnGqN-qdX!4I3#KU31}jbv;dJcDTPQ-lk*`kUCFm_m7|gt zN}hZ7Ux!-Hp)#Seo{b=DcltuTwwvDTrP<9UHzWZJPr@qpj$>4+bn;Y zcO`IXYDHiXUmN?brbQ-EY|WBZ5TIOHRas$lB%h<0(EL@?cIWa%d?ioK`9bR9s|G1@ zdag1vShgwqF0O=(=y$KyyX-O3;NO^#WsW&TcFO*!GYzPW@pL^Owo1IHeFq3 z$z48Xnw%)Ni937)d7XXVD9HyvBPJJkz*&%Bw?gd4yl)4-@BQDQUq(a77IQ*Y;A)RI zO&D&>+JRi49la${aR>s?j?Q!<q21N8%{19DpOc$=*uh+|9i0}=%pqila-nQ+hggI3`5N#Q;PXK3 zYFYojJwExmu+uuYO_JS`6SMsMceP9L<^*nzUCC+&EO=X=Fz@3Yxszo&R| zV2|COIg*#Uo7!Np+N8Jt*jF$;evx**(@GTJ+VHrnU-MJo#4regO4e zo#OX`iYk1zG>g)$Q!GfSZdewR@fCm$wtvo{u1+v@Dx`pQYRs1{%p^b{doj!*gJb!K zTH%?i{_Dn{)p?Urj^~T9Qw_*h0kO*JV!LWPt~%>f?4oMgDn}tTEi;J|EXsSmXwtG} zEU;)}5uIzY*5=4EX0QcA!v*9i`b;{$nEC+F_x0W#(AfJ2_4n#!>e%TPrZjPnDHJ>+ zfR0RdO&6R9(aQY1K>!2`hKoxMr|=cCi0uMqF-!qMVP1C~-96GUu;AD(zUHzBK3u9v!mf|qd;!C%fEtr{jUzl|Srkb9(e6Po7u!W5ah7Bo^O!{&j_dwN^1YVp z-bRXG2rDYuP2@}mClPr_Eq|0C7J-k5EVG@G@;m8Yhsa_u`3&TO`{y9=6U>2Uo zh7D&kTd`QAVJIvMa@zy&#V8XM)}b7>)JKhQ9Ad$GI9Y-p{pszBn5oZm8Tt`mI@F5Z zLraR+k-9bg@$~I$8yy2n;jJL*eQBfP;$Ks@Ppno}Dr*=1_Tl=83)P_uw~TwKrSN*k z#UJC(d+nV|=e|#*YKgvTqOX=1swRfk!W)U#mLmU29bP)?{i^5I_2ug~177FhyT|Vw zuXVms?R=%ydE)cm-c-D{H}K-BvZAa;Rw7>drPbt0a!0btPcPRp2!4CvTtIMDSaa)-L>`^&uzgi*Ws^w8BEVUUUjzb*y=} zX=VnVhy-36kfGodwrKr&Mf@A8`%jg+*}f5@Ah>N9Q;yXG@dh~q4E`da@Q^^80YMPH bMZfz2^?!#_-=Y3rf&)VGruAP0KVSX-|J;`X diff --git a/observability/local/log-sentinel/detectors/__pycache__/incident_anomaly.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/incident_anomaly.cpython-313.pyc deleted file mode 100644 index c71d21d2f6c1def99f562d90ddd759beaff2a711..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4586 zcmbUlTTC3+_3qB>`-#~#yVwiM!!|1$f_OAGHAxH@8yq0r@dj)*nhv|OF!3@=?#$YO zJVZYl;pW4(lE_%K6x`&4Djy|MrMgl-(n^*3xhtA5xpk_jPx)^nyKR)Oo;$-VgpyXx z6`cDx=iYl>_nb5P$meq-XivWNTd6sK(4XnVZ1(NJqc>pi4&o3-oQ1Pqu#8%|J=Sv5GBG6;!{5mc;>rMoUX8<)Ms$xcsOL9iain1CPn6R zp_o^*QbtW!h<89xMB|nMhPh){lCzRL3&X(RVkR$suPEZhK_N3IX6R97qkPl$Xb8YN zD2=cM)Mn)@nBlCLKHx=UWkyZPVL!2Ar+B%bv2+o5yNl zl}(ze+PMIw>?Um-jU7eEgW{AD{9|wH-Cl^JX_twcdM2s+vN<>U-fdZB$J9d7`*j?h z_5BJV;rmpE{WzMY+S6MKC4=U8r{}C3lXCbG#{zzz+W}+iw&@i1Z=`T^)7=MlPBlls z@jhpA&JHW*;@q6)lygU4(_wR$sT`pnS>r6gV?kc6V->Sd(V7m_?g;A`>fM|^0_5f!xYVO(TO)0_S zeuhvHZdmwWY9k4p?QOb+hrhLjYq$d`H`uW@0# z_F8J}GD5)V;k|qszDVEG*p+Y1t@VF!3;h3AZvVrt#y;G(0=)4=bewnJUwny$*c>MoWhqt-(n>t5M{O}1zb2`3fEVIRP6Z}%wr~w%u}6>Fe1=A3 z;x?9fMa&E@^}Yfjt-o23=Gzrjye8mm^NF~46(WCsv&jo@p5FXg{MGo2NYkvKDgD$W+@NW4nZNk$}oQOO99Dnwo_@M;(F)@in0WFzNm6b?4!4V9DNKq|>yPfkX|yMdqGDo`RB>J*;i2Tn^JkO%dPH5iHjfv|Mch70j*t^j?Q7zi2McUS*>z<9C`|Q@qvGU2W(v#!A6G{i4(+;(+ z9xojk_^4-VaH>2wRT{Wdx+Ih$Gjwn4PE1R5Z4FG8;V(L+HMVMpxi4y5G5=%a@&z78 z(6Ns9q!pjez7~$&p1L))I=61!3?E+^*20HwU%qvD&A%CbVk?~9_|7l-KI|(!HNM$B z0n-wf1`==dZp&)!$HzClN7tLm-s2y8kD8J;9N&s{mSdft#JaAJ0cil_>ncaOHUnK+ zFmgL^EAYNUi#5G_>CPoB)}kG5SsPwEx!$!du1{`^mg=9@8jh^x?pXERZ)SH z&QOA9DCnnvR$w}y#fs7g;0>gCV#`BdfaUX_*F{TgNIO`!JfR(KD6vrkR@l?F$m&D| zLASwfOn&J57abbM>L;G0$JPKC&(IiZHp-n|uO5KacMxQ=-PmoaUA3wuWug0LQ3Kjl zl~!xnR@8*1q4G7_G(BB!Jyr)2;GQ^YL=$Wd^)rnqXK6sR;)5?9R<=R~WKDWZ`kj6n zY8a!Xs<$Mob)<=d?;Do0nbntB3DL?rNoC&C%xXvHwpuCfa5#%h4iolq;;KeV48a7! zM~gEH$&9WN%jG5e@y^b^&d!7z(?mmTxqLx@jQhNhFN$X|hHUA&xVT^p@hBzn<#1tM zQVO{Oo)=WyN!J`$gzT3Q@d%x{&O(A(px;&r7E{X+1`F4W-PWwCPz}@YYj{ioW|)E` z{3|~M01?maztMKR?dEqk-HpqGn#28rSF}KF#md-x4+5c^+?z`!XY|Vl-h+2tB~SAs zWU=`^b$Tld^j}q2n=SO2^N8jPZ~0ovzLxg~*O~XuZ?*Q6TYEOR`)!-8gPXpg6)S|v z8?Rn}_088y&e)d^JRy2a=u@}`jtPCG#DJ6A?q>9pU@!A?OE2@wr_Y>1zdq$2urt50 zSpX6jO`KvDaw(>T?AN}es-AftFw`@v^)mH83ulqxLr`X>tNyynACc(+W)F2O7+q$k zSXvoRF%-7L14~)@-i9xh5RBlHlGe*I^9?k#k_5VGx>avettp0{0sCNX`krhZdvaHe z$BgSvgIj?l!4Ml3@!sV89MgZe%4}%tmYph+E<>l`YfTFLlp$wYHID`o%CZmti)ZG@Y>LN_}+yIg5iBm N?>^%GL1()3{|1^{F`57X diff --git a/observability/local/log-sentinel/detectors/__pycache__/mcp_health.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/mcp_health.cpython-313.pyc deleted file mode 100644 index 24d884813870cf23973529773e8a97ec32bbe16a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4560 zcmbUlTWs6b^@^n4l10B|MUE-ij;$tAk|s{`UfZ~hA9b9DRoc|f2w9?SwKVBoQc3N$ ztI>}^#a;=TAPz7f4p1x#wjmy{j|>>Fd~Cor>_dUJh9OcQSkaIC*>l=;{o1*tXuFw> z4ke&-&%HeNyzb#S=V~Pw^dWej`RGsiU)Lh^8JXCRvx=<9fLumA!iab9)SzR4!W2hv zdXOGqFk|A(pmV^5T_)}vbPss2heKf$N4zV7c=ssNLtFf?m-meNc<-ohFT?vH=nV$@ zVHVg+@qTj_oCqX>v)#R-CT2Bm=-5e4Qxu6C&*Y^77F8}I=QveT&U4v}B&n%Kgl)oM zxQ{6XSraj!f#)--Xzf?X-2Gx+&gJC^Ktso7vXc06LBuo1GTBKnYbj;HRg3q+{4yFv z*a22kyaUrbg&Cg4PM#Tc@y=2AsD}`P1z;Um;N{)eH{nTmjoP8?bf1`!v`JfmN5sX3 zZz?-2OqwqYTA5Z2XO_q}Tv*f!Sk6+G3MPb;K*U=sb0Fk0+KFT;?KtIFM^Qz2M^ANC zrh$qXsrne9o+^dxgmqV%u19^pKuC6{-5c_uc)F*mf$T|pPT8JVCz5AuS>4su9PAiM zBD4ul=Q^I=5x31M?XmgDz7g;0>c)DxY+8Gi{b}&^A=s7)AoSYnN2ntdQI_{4g9xd# zN_D{uXuJns0~+vTnmOgSrL2u#aBOl~z(t#SphxX9s$nK%i=v3Vr zg^i^HX{H{X4yx2nq*8F=4G1ZpZgSqW_IaPAhMFvER7wu=bt#Ik=NtIO1FPr6q2Wt} zbKg%Pf@;2_6-|6|y5`$-sX==vKK%dDZQ_GXWZ0*@3GbG*SlxsovPQvgNzve;=z(=H zG<3o8VcG?L`Z4$^v@W(o8`}N9vCV#G-?pYVmO=5W=WP0RYnp{fYU6zXxjoKJe7Fg( zRUfjONL%lt*akj=_#t0S+G%HyO#gTOD&l-Q-!am-8v9OLJ7VX?cJZAX(eH9Rd6sFa z1HK;)esXXeCAOU~0$C|j$cX|eM5^H>gCJ+7L<1R4lGg>pJE&9$O+Z!ZSd=LAlJCbyUo8|D3zTfp9fHCW)LX;&b_|$SDOaGa(x8g8Zhe zoR<@n5v+<3vZS6GPSfUBW)HoSnVvopw;YkoiRY4%GBE)~HNJz(DpS*nEXvxEAH)gy zyWo>#>Z$m9+`HTxsv;*mhR0Gv!G=c_&xv5u46x2?63A8yQ&Sl{1Eyv)aY7-OcrKq4 z<*aDbh$`3)>)?Aqd)n}W)VCzzEmFV@=6nXrv%z(K0gp7Tb_}FHFx?~O4swQ1#8|=B z5&2XR`~WAE&n0L~k}mEfU>ksh8%GJ=O2AeE+6dqXh!M~ZU^c{)4upszQb25tjgxNW zO>WjXrE=XzXM-?klhC$IDpD?C3xmXL2{V|yJg&^LtDCF7u&f}gRc>s?J}t0;@cX|WHp=8@Cs^XTom$fsxCN8QO+5(B+8i7 zPlF-xY+7kBxqMbj)EbN`O5+&HEJWeB)j_Fv3z-H;(Ay1}(LiQT>Sg#yk7g|!jFgu} z)6WJ&J`^}}EB>(Qi3s887&jQg(qITH2$^rkd-5^FV+Tjmz(pNB4+c$+LPUgrMK*f z>aq5s@3V%MVy_;KUh`b_ENm%_Rl>WA{d#@$UVYcy`mU1uUj44S^}Ft~;fM9jSN31r zKR;NitJLo(_C9FZwlGs^+Fd;UAkx0jd!z4qU+HY6<4`4XxH!1vLTp31`MFByIi2mO zu*t>RyX@02R$rtlp_I->@3HN7S=jt{HbGu&nLquHP`l1X?y<4EY;2+96Six)3pGX_ z??5fl`Ijo;L@}uQ!}Gf<{-_>`E;uTon9hc;gf50YcI&ONYiF*W(ObLp=+4Dp>HXXJ za->gh+cB?{leuzKBm`%#o_(0ucfUVfiJksy^v-iPIo-F?h6#CtzU_v*2INgy&-yK_~P)#$Msm}jh5>zdaO(5lBL|O$(xgl z@7(FHq)wER!{x0f_4xkV4Sxv#E_`SEpJyru&Xo7RUgk!~uGZ_V4-?5-;hW*b?Ulq) z{i)Qg?wj3+3|N)*4VL7+Q9MHebe3fZIrVQ@0-3_1lK>zJbc_!Sb$`)+FhX9ly$z_Z+XJ`pPZ+ zdi2b4$kST$7{7qJ1BHML6}l< z1xW-vAV4C{NF;N>EF=iyd8}&WTbC6NDoM~kM7qCuu2&B=%=IlXBMxVHes~FyagkYk^*6zP zn>aphZ?y6|>D05N%GwV}nB=!|62N5yEyMb^RjPFtEXUgAD!qE`;wiJahYXsA>o7^A z9f)T-P{KKT=mjhN+R2+EZsNwVGR2V;BOI=#z@vapiQ1%+JIYOiCqyi#3`!6lksbg; zYmRUW0I1APbsEwaPMI?+n>wdSzE=q;$5{^BPIeF=!B0&9m_rW(jU-Cvdn$G(uD` zpAoXheD+dql!4tGYC{u_add=kL*tGFb;4|K60|{+QsjLFnx+>>Z8m(TGg86A;i^ub z%Sb8_S2b)0px6X}sN|YAhn6VXS^FSVcVW+u-Y$l|dJt&5G+Xw^SCGS5dq2c3(J+3k zk`nfTFNl7^a`ZI^N1sU47;ZtxDOm_xe^!ws$oG{m>*dVGScnYunpFf$fOI2)eB*4J|s!}Syq>(u7~fT9k^ zeU$nfwSA7dK1GpFQP*Ra;BbWJ1CJ3ImwIX1v2TH1LNJu*$7EOvFpk;_*)I?bri}jr DUMv!< diff --git a/observability/local/log-sentinel/detectors/__pycache__/plugin_lifecycle.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/plugin_lifecycle.cpython-313.pyc deleted file mode 100644 index a6fe0db4f5004df2d7f15261b85ed4d525367132..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4987 zcmbtYU2NOd6~2@xiIObKie*>+kxk2TEG3nr)Nxw7&9B`$P12+_qvgly2wJ3Mwlb++ z(#fo5UG}ieMIYKA4dS2$8f+cV0(}r31G0QGtP$AyS~|et^8WNY@s9+PS2p z$c~C-EdgDgd+)jDe)sVFUEK`?dF=s>IzhKwL%~!ib|d#}VqV z13OqkJC8UI)0nns`iSeW8@pK)K{3R+x)JBjI>wx~4D8`NS%zb>Ue25HAhe6-eBI~; z8v6>~q`$m#O46mA&X$Uc1zBN>a$d?U<%$w3os|?_V+BQIv83q&*4d(}mb8&OL~fEY zTMi1EWUp2+^gU0>iYO}uAcK=c#B+-hUYZng^HR=+Vz>3P?SlSgltq|=^Eo&QJ2?lY zIVW~;bk@zevVfZ~17f#@*vokUHIww34?SzaJz7IsgK>ueezVO!3|~Kt=@d#O)0HDw zOgEPFMXcl;HXN%BLQC{@cLX-PjCLT!k)dX&297GpQDfCn#hGy?s-q&v$~fYPb0~mc zaly~cA;kkfCgW^J)6^jqA;p{VHkCf{D3Dn9CLt_^M|LtA#T z$wD)(nqLb00EfHQ6Ce8lsJ3#$-+`hBEHzQp(Y$qOk>m(B@T ztYxI?9GlpgP7gr}DnOblBu&qPq-jDyN;)tJ-Z%_RdGuw7^Lt_qQer=4&$BORssb+y z(4N>vII$#SGNP3ORctaEFkx9=GF`f?7bVl9EiNnwc*$gPf-V(QLP=+3QBrb}*(zx{ zp$Ox=uJZaLrXSF~RODYGdCv^VSjYiXKBp=QWLQZohvXS+h`l!ys||5eM{2_|pd}f1 zt|)8sux$5(oa(;aVrIBDCd%5q9mBO9H;newMmOOos+wLN|9=29ijA4=b66G&5^sG) zJ}<~cNd#Sjie*q;x&5FuZ5seZ;8+!Ja5T_8^#=Y#uWM zqEsrXOFVqm7bR@ARP6yvg1B^t*se)~{VC;+s&O_}KWw7R*ch~AIk?e!*+t^OtS*CP zvQSE$gK|hc$9{g{O}3PDRwyC!x`eAd<&H)ntbnnFv%Ga9TlM3x34I>aKd%(qNss9)NV@3;hnEx)(*VY#6=6ZbU4-=VB>`*Tn40Orpjk25V$zya%wv-D zOeSyFs+#Go7m0X?%!K9zjo)x=vyBWj!Zy8C29VBJpN8y17iC4VgV8WZ(psQl8y@q; z$%$-6ldzTr)6nO$qI5Rf5UL10zc`n&cgm`BzPNgR&epD#CJ7 zxT_$pimiT!AU00sI`1Fdh7EUdD{~e4N z?LEuKKkH{JOs~;9P+_`^9-t#eZ0oYu7#Lb+{uz!gPZ{CJl|vT~t(?3zw-!z=KlxcO zT!|#sf(fH7{vLI+Em>)c8*Py*!HdCnJw|lkozquO8__|ddz;ZaP5RGYecl*;_|K6K zA~%vB&8&?-do%i+F_{tu!xL?Gu(Y?dy+kYec_sEBlkCK(l$+ZKgD*I1Y zde79v-hawrgFR!(|GIw46%d~^;)zPz!0q0lYvNnSFL^%;b*_wj97-62!<9Dnc2E4; ziMRG$V!l|UP)FZ-nL;h>wViAJxX}`R{rIcL-<~$QqVIHF?J~OhjYz68axw7T8 z^Y`m<(CFR0dhWWmx^QD}rF+Kc8(p2gKDoN5vOizxE!0%mH~hPf_s(6X){+yI=;PM6 zp0gAQxGi%b``!f$NAII!Nx#YPJc$rIZ~AzCK@}H^gl^#>V++OVOpxdEGS+nB%!xZiO4XoAf()lG`Ag@q(^n8N#uo!K!hx3hGUS$4CSBZo6TC(6&PJ zOJw+?7p9D0_`;!ex8EZ*f?9|YPhvh8H%hgpguHPzforDx7s)wK(i#6TF-3#PPxnBK?3lzs2?4t^JtH= zALXe&xbeb25%ghNaj@hjSie!!Bp}pKP1bjrAtl?UhikY6*KZ?m$DRpoVuUWyDS0fk zTWlJkdk71thA`GVW8o%CUnzfD)Zs0zC2 z`;kywl%B*GszUFHrIJnJgM=lJ$LfNtsd*JI0IL%W!t`KB+(J&m2MCAv$)Dt;k`9$z zlE&oAnNBR6vsXJrT_YMMslm36?a;%d`5FAQUqA!V*w^}c;?=|(kFWXqFH9O9-_Kt* zg6-=Lrz>zf7=D9$tqigFo7*j&Z+k2L*j+@q0=Jl!btk;PU8h~H@Tbg>5$O0NFnBXC z`0nI;&fgsRB);oreAg;>J+T&_TnkJsJB(oH_4BWuf9;hD6aD74KTO1gZ^1SoCj6}y z1teE|G4$)UG3OtsG3N*S4veD@$9)Ig&cC=QXu+pEJTIy_aHH0GJ{p;WKZYbXMlwmw+8<@Ab?0wx6q^i tLc^b;?oZM1mu{V+A}hr&5qYmG&HxpDV`?42YbA5-#MSJ72x&Rge*uC2;dcN4 diff --git a/observability/local/log-sentinel/detectors/__pycache__/resource_health.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/resource_health.cpython-313.pyc deleted file mode 100644 index 8f0b9589b03913415ad9afb07acbdc47eb1e18ce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3968 zcmd5?<0Q@x2|M|LU`Vp43kw@9ZAB=Xh7#D!4kT;S>BPQq9L6)| z&NwvL(xorR6;ibki!f59U8#LY)jp(edE})M4}CB)YQ3YZgt}^_yk#3!RDJ5XGako4 z)E}yqb|l|9bM86ke&^idbMD<~Y6>8DF5US<_7`Dp71VoU)2(EL? zq>H$CO0(zKNjGs@wELWA(o4KN>O}oW@N^@=n{*v#Z5zZV_>!E!jrb5c<`VqfXwgml z87>|u90Q3n*uW`+U(j@eCs@}CB!&4r(dHDK)A_&L{FooVC=T;EoYTniFrUuqS4X#~ zJpi0ra_`c3e52uS7k-^P>q`gTcT@_ z>bc?P5!qHWp(CVvMem+gTu0II+WM+5QQIl7;oDQ1b=Gv=D)%3C+>WgCR{bJ3*8*#} zr=9pWxF>Rs9FIgljm`NvkoD{->)(|G!W6#_LnCkF12@>f-NP>-Jjx0Yp+$(k)e!Fs23TvgJXK%W(>9^~ z8|P{4MNdUfLzaY|%My>1Q);LoOX}Q)ovQP!ENK!$wJgcMa_qc92Uu~kq_ai`#ip0C zq`vllnI)lyeAvyMrUveAmeg$vU3+B-tuKJA`~S|9ecw1=U(OToHQqC2hICzWO1W-& z=d)Nz>t-;OFGzU|jY>7lX1g;v-&+#=LrpG4vPL!~D=W>=y?Ezqj9+0#kIt`n5Ab;` zU$rMGh10xj@GHIp{2_TM!>+VRlO8*W1C;XzWsd3Ka(pE3lh{@@o z0xm3@9wTcg*!1azTuvs-;F)aT476gC!%NvTR#Vt)#yYqPe96!x<6CA>(K6Q*=^Aam zTNi0O+yF$i-Ng!C0@3sse|Dwiyw%kkg$&ag(Kc!%~#(@JV8H=%>Q~3_5KS{Bw`FI6jsX zbxic+^{i?vB-3~)*|39@UMS3s3@eVd=pjs2a8-a+Ho(w8ZS*%qDP9pzlCGf z)_uhbPhcuQ=L27~?JJ(Cws+l|S)C~@thp-fM~Y`Rqx(u{KbgEg`RR>H&+$t1&Ej~S zRUSG~={Z@6zEvE55{~}-!)m1a&yl`Ik-kp{9!CyV!_nJ~tUpB#e%XTBd!EHmD7M+r zT^hUdVR2$J)>S%uNBbSSHu>A|qu7b!J5RzLrT*JDs*%on;nnc3ebwH+PiF7WR(pr4 zJ%`s$u8*&cZg|Vx->L2&UYl7@txatVJs5g8@SykM;^Wz5`AViTyHK9Vmb2H&J%lP> zxqoHz@ayZ`MqlOd+kb3-c)dI`TlxO`k28zqtA$Et=~3_X>cCKyfB(xCe_!+~QP&}n5o^`%@q(;^j*7da?ZoFCUK3&~Uy>DIn>Bi*;mmdlb z&X?a!JWfjGA1zjrSIY^dtQh5v~v(W57>Z9MF zsd&)jB#G7rNiqYHl+)4$h0-AjN}H_IX2Oy*pC!6M|Ng2bN#qEqTh)ZnW3CBxK{u>I zLypt$c1h9=8JdG68wSbF6%4F{)=8>$f)4afLg?w$8t8RGKM2DuR1NmrJW~y~-JIBV z|A6symM&}~`d)XhU;cg5GmExOSlaY2Je6v^LeOewT{5PCxQj$YXESI&O4Ox9wl7^EL^W zG8XElPS@gXFR5RI4hi__M`3_+;|u)kgKD^S+r@g?Hp6Wn3Ab0uT-QG~L$P1_%fbE~ z#CY01gLxL-|J13Mw*oC_H8RNliWy{~_WL9$t)*b2n(KvdbPg_xg!*oJVIQ%g+oeBf z^(e`@ms#6o2mZStXyJNcTTJe4I}5%|CP0LGVxa~s4D%Tp`zspy9Cd$=hMsyaGE8SF Z`4rLj_Poo>w0(q2=~d+`1f;b~{{qDfp`-u+ diff --git a/observability/local/log-sentinel/detectors/__pycache__/sentinel_health.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/sentinel_health.cpython-313.pyc deleted file mode 100644 index 4e9563a2310999f06494df59d37ad2d2a322f25b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3486 zcmbssOKcm*b;#W%DSkxB`k17oXrjuniAu69*|4qDc5L}8C-P^_CXNYgm&>8F@p6}) zS;`SlMh}IGAgF^_s0U`9+^H;I9Dn*lEV^SuYS&IrT0Ia(!Ei!NYht2P_Kre)*z ztULPHbAD$&JtEiEn))v)IR znV!c0-AxB7Ds$ecD_Aw=ws~49lvK2^FP1lRnA>!-1EwYPVTa*;=F@B_A8vegM$<^m z#wxDXZM9^80l9T>S=Ve_ua#AslyM1c))Hu}wtxv(0BhVJWAXU;Pio)*K)OQ~*kl~T zFjQtM-nmk)Q`OcjQ>lW%HOpi~tK0fKQ3Agygi?#Lk#7m@@tst#|8bqviNf>-0Ve!j zrf62RW`L{8@tH3}Zbb2<&WBx*JDny#J;qhX*!_5FvRGh*vf^FcwC9RtGGAnRb(GoU zu1d=so~xHeYes!mH;YyYQ0LW>Zs_(x(XcetfO&Q_&>zVCfMdnrQ_Brt88@aVI)tX8 zxP$Vp9vll0;O!vR2Gc#X(T?`w&k~!kL9V3n@Ny%R*mY{|a3v@4TsZ{u5;};?Z~@)m zR4)xvxXtmrQ4nqf;Te2_RSq2u`P*PM#W&J~1KeOWr8m+D0dBCGkvGza0dBCG(Sj&T zlMfJ5-$AGlz7gG~N(;QrZRriLiI<{+y}!y48Mw3~IW{54Z5|Y#5CE53^`#+)3gV%V zoH!8)B*3U3$w^Ow$?qURsvv;`?Qkz&(-Yi(yWJlyNUz)>gX0z-k9tc2$r4-!f8hOssT%#)}5xAo2MjhBr;zn|N#;qR*yzL*pEuDS} z_9|r{1H)By0hb6aQR@yd^*?^BV>As5?dG3j6~9AZuH+RU5T4%+)y_@%A=avI;K_0LN^?NPcopY$ zk`K!3vK#U3?hXWc?X3G_{&5c{{uLzdd#E`yyclbCbpImpd16ueYghkr;nA&!x19Z_ z*9P8Q>pHU-ThF|`vhPX$ao!oZyf!qwmbtQ+TJOj%?|YPgn0JOxtmP)xI^J27*87H6 z3Qul5zU3S|v$pT-THllt%WOnYvip8zEz#dhcK`h9&#wORVlzANDElzm%nmj)!z&-I zOh46}-tlH`V)fGMq;v9`lex}O1|AMHbK|R*ob1Veq@n}uFHkg<-sncj?DFB)vHSkh z_n)4AHsbVNZ1x>kKIr6CCsW#DcI?@JlfAf;S?B#@pPt@LwBE^P?&vC6{ovWT-}d~z z_p7&@YqIn1P3Ptd&pL;voGI#L*cR6jXY7=dJ-vX-}AceiiXZ?FGlGY-}fk9 zE2L|RYTHzYHvr9&@+I|K7P~y8GUx4KXU=m0|Fkgg5q_ zkTT0xHxMkV;_3&#PW_96J}-|A4@s@nN&tuue=S~1^#XS@`V7hHkA&bFHiHFBP}jc)vhs!{i0Xwrpw z)nNQO|5ed_0D9AP0N8$*pxFuwQs;Vl&;5x{KUqxwdp+6npy4EjHc?3G{5svSA;9_{ z#*6Y#;wki52NoWMurQO4yHQ0cTN?PPeJe?hmEaEpWT2uvVMDD*Y@=x=EMHz@ZF+W#VQAr#6i7hfR0 SZrliqq0al-KM^dRPyP!VxNBPg diff --git a/observability/local/log-sentinel/detectors/__pycache__/session_quality.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/session_quality.cpython-313.pyc deleted file mode 100644 index 96008a9e63d3cf78661823319671a42219c3f113..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3700 zcmcgvU2NOd6~3hYCEK(m+p%RUX8aRdOX7HO+|@?YWpQGs@)9p$xwx}BmX;`)4n;bb zR5Ghu8}wn&Pz+13262D_Nr3`Y-@L~G!{CoCXHgFtM4T^V_Twk|+8G^+2 zW63CDHAA~#9NG?)?A+e(f!;$WkxKK_RqB97jpeAR+NkQ`J!7@eEUd-T!-)69&^&eV zw+N|T-rLlko4G?=IN_}|K8u_P<@O?SDlkmm)$oP<|4em4RI}e0BGx^k85*e#*y16ePGw} ztPPQPKOf+Oe25Qo9pgSO%Eh?O>0o`oSj0nQGan!%o+X+5pOAHN-Fy%*9h&rTJr1T` zM~=S;)6y!}H%>Rf5~|mgcoEQqxPAxOxo?0h0jnHl5I4}k#@0rTybztt4K_s=c6TZZ zbvaL6+V}>jy7@5d*H8mfuOkn?5YZYp(hL!x_#HwK#5!j%dGxX!*7aJ;(rtfEmXxe+ zM-41yuEVgtu813gDi)+tYf-(fYPVF@nX?%~Gekv@)r_1ajSiE8YBOTSfc_}tL|G|f zNw>S!r>BJ4pduQQn%NKvy4||26!WqwNEmBax4p8O(@N1=n_@FsVO@cSJHwXf<7_F+ zHoTN#b)8z5&1o2vK(U>506kSr`fQ&A6?XtK93>=1NGFiF(y6zh&7B$4<-!TwkZy@M zJ9ymf*)Zs~%i6XK{*3)8yQXVuGH3_0&UZk=c2I`~kKr2tUSva&?0{Y@6hypX2Qv_7 zUL%}zlWa~#vfCs*Ba-+9LlcZsc0|$gHxy{O(DQ9Ccvnj;^~hGmED6WmG`7BzrAU2V zG{|&)kFxA7*;r*u(dP`t^kkC8-SyDreQ3p{-iva6wXRr8#|#T8I#cqDPvmU6pqKhs z0szLI(_^(9Tk0z4?3l3%nO)VC>NFI^_+mn|J zJ3#hDRt-Bun4F@ChV6$0Nop2*LC^M*8pM6XBw~AYNy*^|84u>%&!}z(+=_r=Qe;)a z49vRWH_~rMNHNyJcFI5}Yp_ordu?_m&Fd1@)3*T4YC0?3Odlu_!mk!rPPh@JwG~jk zDXz%mE|69ompSQYt?=Rh5+FY&BJ6*@hb5>{k ze(G+@9GKh^b~-PVFYF#3d6fHP?V~mG+vm2ww{vuE=kR=a@dcytp+|F{q&`ZSC+9w+ zc7{_s@rCljZhvjG^6oYap4;i4EnlkoQES()7Jsq$;k?zGc-Z@(*XkXxdcIYes*G+$ zw-(LG^V=!2d(n!Ys7RG-Tj#dkHD8)H=dYPPOT-}YAo1tKpf&mu`8)BlHFD}n#2=4V z+Ymcq_5a{Wf=RUPBc_$94x{#gM;$xNpw-@UpSjC?9Iz5c9=`G54J$EZ^^a8s%&}>6 z`ZY72vJNLJBb7tu#M$iu^XwJ#O2+KZ66H4^ylIUd|6}5cH8f_iuRrMs9f|IvP)F>a z-Kf29^X&?^GceI~`Khhz+e7Bm-1dqYf330p!eZLxJYc? z_!Gl6bud;*RG6)I%=lbm^|RZFZN^*_%>K6yu6*gh%3l)jPh<*p^;h4aPJz3qnE578MEj7QwmgkuwDnBM3Pe>jpVDswN0{1k9bYhRGd=94Osz zjv+oyhkrnB`ecoa^Z(po> z=O|y#=3*6*_m+3-+HYFFba;0O$C}iB=HSJWv;uO)+nWP&4;@C!jaeSmFDS$42wJXP z8xdM2HqLXJ4j>V%Su}w1R1Tf-44@p9q%S+4uB6BIkOma~p{Nw43*@_GhhE=UciB%V z+jCP?bmES6;~NAP6T-v4J__VEs?r``$1W4SGx`2c%FMrZTe?3CnUTRgMEN=%GwoFm zyuaEbVZ9e_MZaukJr60?^M4324=$Yt!t^=q6~7EJo+e}l$ic{l$RHtUGC}TFZe+1t zrt=Q>e#s2>J`-B(F%u=B#lFI?fb!*J3-og#t7V|J+8Qlzd*W8OU3R;{{TMb6yART#z%Fz diff --git a/observability/local/log-sentinel/detectors/__pycache__/silent_session.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/silent_session.cpython-313.pyc deleted file mode 100644 index c420ab6a6e40050772d1e7635805b953010082d7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2058 zcmZuy-ESL35MSS&&lfw6TWY7ao6u95(70)m3R?98icmkJmc}Ait5mggx|}!8Rp-5Q z_neXfQXUX8PpClER6>Bn3%r#-f_I~UI2Ki^C=~V0Rf>3J_RbC|%$hAj7PMmW{4NQT}oL;eA z{8kgwmD85Ji0uecL^oqR4DuaRMKlI+N=A&zhC~%Zrg1~5sz$t;sA|Iauwtl1bX}o+ zMuH~mTB(ocj1amJMbrcFL<4I+5QSxS!{xHIw8Z1Kz`<3D{U#;06hR4T8mPFljSpf1 z&om-ZB{m2Ae;*@uq^l)zMIP@i*T7dr8byXg6!^ytnWz%r_>nFK zqE$3QJ@Ga|7NCILN?B+Up$2R~6R&7nPrEb?Cu)Xv;wD1k^h-yT(PXinX_67fc#ku*}Uru~2I zTB@h3$i4eBWcc+@37R$rj%z(VAcdPOWoEg`m}$B0xyw8rmK{fYr$fUUH8*RNXM3ue z!?tJ_J?5JfGp|W)Y%;5{DQS@N*dRqC_KtF2L7nyukg6<_z-o1H5(0!u5it^ zd|dYghnEF18}k9oY|90`>3gRCGEcc){f2Aa5Pe%xxm?FSSK(kxYE*&y;R>-Dm<|b6 zhS5<h_USPRJidk}cvNi9xa4}WagP4KVCmEy% zj&WhTn2#Q~dwz6GEv;}hTI569PMoP|wO?TT704@6OzwS-bMp{Cc+3I{Pq_ZRaP}GZVr1 z!5?;v1v>}Z#Y5{m4+Rs`?H%KPX0om3TCP2E?5D!9Kp*=g{d5<~?d{}Iar);%xqY6&{kZ&5`SbHZVff3!y+Tmf8w^f&Bvd#n3Dxku;onF1f0O*W9K3WO z7%M$ZD8&~#Nu-YkL&u&L(!&GKP&%FIyoh$>TbT_A4}FVfODV3IrYHo{A$8x(X8Pi;FDDy=kiD#O01h$8zL&Y%Rit8{xtSUMoJbcNhn0s7>aInjwa!-YwV1}QL5Os9^^O(5yT66`kQ<(Tjd?DB{ hkd7RS#ePAT|3Ld6pyC6x|Cu@&%dfrp7lKy^=^v6`Hy;21 diff --git a/observability/local/log-sentinel/detectors/__pycache__/stuck_user.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/stuck_user.cpython-313.pyc deleted file mode 100644 index 41a8a4f8851b25d4596224a6a4a19fd68bc0edd5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4322 zcmcIneM}t36`#G``~KqN_%H#p23oi#5E4V&U^^`aY`_N8Th1oolGSpz;7yj@%j}+Q zE{RJjwK_LaV=J*tq)NqYs?h(46t#|2sZOP;QvW(c6E;&vjoKuV|2xZW6XmbI*<(5C zkAG5ibob`%&YRiy-n{v}H@Cy#0D{*2#-Ef6VTArkADrgdW^Vron9E2&7zvEvI?jx{ zu#2ZWdz>BTFlY1Jard|fdu-l)+&k{WJ{~oqHY9kOkl;;m{cL$X>=%5gfZ$IBg+R9# zp*~IsHlgP^91=pQa4P5qF(I6afJCIvBSh@6=t3YK%^iZFVcC?^CU2Oz^b6fNL&kho zGEEt42LF%cH+e%^lzAy_D!RsFIV(%1oZ+#QRWj;Q&rPa9oFT!CJTK)`Goz$Ucnlqp z47t3nj^WvRRM9euwg7B&Xeq7AKgh{=X-G<+meX_vr%_F5yZwD2mr)8~2At#)7|aST z%n2-Z3mo^{RucRcQ;3tN3+!TcAL4FsB-v9@7SRQW>yn5mG=%e@Oy-$ z7pVX5r~Pc5BbP*pdWWy{f=}=#z2M0}G5|CPQuomTMne0&LOAI?n+1+rr z-N%@Tw(U5BGZk-~gp+NEWw0`cXI_!j4D{s}g!vv}2H?Eo1%ik#jTKW#OKLu3g9J~( zV1UoN_;Y!VS2Uh5M&3JE#=U%?0)6=iZz_wj!Jk#k(;$~?0B@I}(gsgec|lf}%5WNI z0ZR16Ih{n)dQLM%MH9~g6zXS5pxldwoSx1<_7Z^5;Wk5A>^4mKtb{XdkMQytSu+o} zm6aE1aGc*ag|d z6v7CnDuzk?VphTitVq3`&B|JaxD``gGze$t*d#G=Q938yclXnKtnhU5)g)MMkI1-Ffmyfj|X9OS)Ioe7l?1ZoMQ~)-3b8O z_n?ESXfmz^Df|!>b%B}zdlOA*F>8=8Rc>c%3<_9$r4e~(AVqVCk%B!lPp2~SnbfYW zqWtOHTz6K@Eht(_p99e|(ww5ghNM&-d<}G=+ko8!HB^{R^Q{icRkBb|c8Pe(1Eleu zq}5X%p^#?$1(M1#vf4TdKC7nTO8ioM<-}%9yx{#JT5r`pRG7NsLEq_rZ+bI6Q1JXM z_E2His@rpA=F-f{={478-NS{WRzu5`{H6SA*V@o#LvOMEvGwZpCw@2Z?!YzqdSY|` zXz}r}Vr;xHa-+IwrSFZ_!VxQ4{n~k}s{T@VtEyw8s$(_rQB~KiAgXIFMNnJUJIz;{ z-`TqnYAK9Zq59XSE=>J3)bd$#?|Rc8#P`L`=83|@r_tsu{=f!*U^9Bas%pFvy%c@h zYqjkA#nW#+ZMAe-&0TACYu+`y-o4&>ox48$XYm8EIGijdPZpa_S$n^=Dqnqi?fGjh z*P_?Y7f(L3byD6qDHoIT#rao?&FASt&%X7n)%i&Axw)<97B}G6qFL>o7BAeY_V26t zH}Y54mSU);b*rs^qpknL>i&DRX#XFy_+bg5mO!;oge@WT$diGLJ(Di7hoJ?=m zL%<_M6}0Q9dU$&oLAKjHYKuVCTB)I!Btz#fxc2UO zY9ER@cQvP;u{Dl4gB2|oT#lA;Z~oB8ITHh%@e;4)00xzGIr&iB;=AOmo<7Y(2GO;Q z5$ETzz6gQ$QC-=kKz zZY$ir5pI8bXpQ~V=vGJHMn~VeaII^zV`wuxTyO!J9h#a*NnhuaM97&S$Hb9${itzoTzeoxF0|uY1|A)oj1c$+6^Z*$qc*+RW8I; z60S_$P)}>@17si~CO~eMPPN-iN0Oy$z&v<%;#qS|9N1N3vH4y>Rp@073Lsj^@Q^%t zJG1jTR3UuX1C=l}dhFP{NzJ155SFwBnXsy?5f-LGSR`P?b{FyQR0)u?<#`jNm4(yy z1PE0RQzF3M(19$YFM=^ET4hyLFOONl(DLw&>iRd0l_%dg|55d~3vMgYuo2n2y60-^ zMx=LrU^8;q3RPVkyf9W6F0m-uxO?!!$l=do4J-b&zN?4U_ieQA-;DMB)PKv1YC20k z6sjw7F&p0ERm3Zb89fa!9Zc)03RxZE+$h&Yk)1T(GY6MpOmBI_1Gp}$M!B#F+7*{* zUp32kP4SX&^bU>3fVIzn4dR4_!o#-sF0>}TL*-QQ6rO__)W^n2AP}((^D#Q|IconL zb$)`HK0%##Jbg^#O6U%PuB0(hCU$YSgrHkVu1>#|xUz1$u|2x#o+=E diff --git a/observability/local/log-sentinel/detectors/__pycache__/token_usage.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/token_usage.cpython-313.pyc deleted file mode 100644 index c7e805eb4c3c8dd84e14ff84794b533124ddb3e8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3801 zcmcgvO>7&-6`tiT|4C64X_=&cypko^Y(*)yYGw#zK?BP)Y4I;svLV_pfI>g#*V6WiK`6gY0FNJP!*blt`^!!#YSPG4s^Bgic6?Y%i%}=zHfBt)OTnU?bBjc~$;pMaC=BR6TFoXwxmwlhD71 zatJd3+$AuW6DeV&2L+yPu_k24*2)#y{bJliR%`@-#=yf`7N7iXI?+@yD99gH;d_Sn;b!44Z z^JhJRZ|W0-q**}Zy6F9PZ_WFiu`If1CBM_RTfr{`V3lA5O}KJ@XbVmdlq_MoC|8{2J&KEW=zeAUZS17N(9-~aZ|A>J%w*(mF3A)V$hjU%Rk zzz{8jV?Zud_x@U1Ts$^tW6N0BG7GYamAoiDvOA%T~4s1u_#og5Y-IOFKNm7!cq=%9|Aj;Z@464Sg8lNf4($b1^ zFI5BlBGwC)yjzPHhQc|>>ejP-Yr0a?b>6KG;$Q+=h zj3H|#n5=?2eSPS#oCU?cqsJCkh{v8GNh{zxF7+{cie3oDFCc`sQFCyp=4(c~KTUp= zTs_{1{6bN7kxq>iWJX zLr;t+srs}~@4ZAtuid})myx%cL;IWKN1J0)FZ}NQ?p6pvpPzlv(=pKX4eE%-TK%Z2 zdzJk-^*`QbV2*i_3=ed@L}A#3N1c7E%ZwSnWyDLbS4Mt-xdr?pSs`~hyMry2frkNW0GIbEzNNQF1D#EYj*5CPR_|Qgo z&+3`Sd%x_>JigiJK5&t8 z`g?y4e{1pf5|%bSAo=YNS6X4{!*g>E$UW4L=C@|E$ay#op#e0%^&p<7I@UMo@*)Ye zd9(+Wm?ApH?mk{54H1g(o#v?@1t!?c|_Cn!f9 zi{GWhGHGXq5#Lceq2I>mL4*dyI1B_#VVGy=?BCJ8uTko2wC|-yV3@>e?@RdgWD+ga Y#WAr}_e%s#tAk^rck|x>AGH$ipVwcF9smFU diff --git a/observability/local/log-sentinel/detectors/__pycache__/tool_patterns.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/tool_patterns.cpython-313.pyc deleted file mode 100644 index 9f3a527a381ccad72115fba0ace5712c57e8af1d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6283 zcmbVQTW}NC89tI$(n_)}wk2CKFTrVmc15A@Bn(`ou9)etrt(xFZBz?&UN+Aw|Sf6hv_Yy|0a zXLR(R`|ba~|NGBltJRF)JN5HFMc;BF^aaTjKYc-XJP5*d#3PJ&4X-_>IjY54j>x)W zx}ywcsLUMGA2nbDmG#GrM_J5r$c>s1Z>U4OafIp7spDbOIO{c~4<8hgLO97Kd9 z2jVF)DPRyQ_J<@v9X(F+#=}uD5*5cmv>%uX$Apt90Z$zWg(rltx+FDS-tTW9e;th= ztbsjic@5U_TFme|tml~#1Fs)3jUnMZtC$TkX%Qbk*E!5hBU?Mm49B-Qz#X6&uO7uZovf5P~rYpMnGt| zv!^gOhoD9A21=R??_brzSW;CG*NkEMjuzTxHh7SWwL%RZXR?P`aX_!J>vL?m`_^(jZaNpsJ+HHS{JTQ7MU|qbZ`*Fc~8W zu`yf$t5(=CRZ-!73d@4^1cgh2n3N=PHXQksi32-E0+N8Gk;_prIWZCuE{#ahNuQJy zE{AZ0$P=kiUm}(okBTGlQD}N8G#ZUXlT#zHcsLY;blfMws8LafL3d(@aTsouqWBY2 zib1VY)}1P&U4FIF2J4dEfaEHYo0_Lt+2OijnlWYk3l8tJUUt>pI6ZSZb7jHRF>QXx zR@`SB^K9d6%U!lpCb_me+m6M~Gdp085FFHS?{4jGErkia&kE|919U%+i z(3xdp3FwB(m_>2tMH4J(&-^d#eK4lT23RL}D#cCj!HQOq@8z!)&8ea;Y#p&#_o7w?a+@-uuj@7!)Io*IV+eyE=J zxXW9UZayJkA2E|v9&0BDb|-StF>cS+o)*7inuy0QPykSnVxm?hh9-rl)0828S48?-925?4ox02#0MYmugoP4_T`=LZ)caw^TK7bYlcvA-# zj!!1yBH+gEx0=YVu1tm#KL4hsA8=Q=^HN;AWmMQAnkcLUz=Z*G0L-Gvn4lP?)Z}Cc zPbq8|y!?2aNW!IPL=eM*Vu4vM#)21#XCCyLFfnFK?nu!|0LIJ(VMb!A_eZ=ITtj*-BZOj;QsJ;5Nx?AT29jbc?zWI;VgP^&LvBl9$c7XX0bC+c z2!<0=Or(zr{zOSB0;>tGBePYJ1$a+Em$ly4U(SJ$KGJcx`K#!kP2OL6r)>|^zb~Ag z>4S20?Tx;fzRa;~)k1a0v-!VO*M917%l7Jb-(Ik{%8vC5j^?Z;@9^GpG|QE)oM+3u z$}OvlmnxB~?uiq*+!@<_cYEI5zToyPIQ_q?o;!EHr$67*ztD4ZVe7HH^VqcMGiN
TJq8H)i+b+xO<3d#4Y{c4tQOlQfyw)0X%6=Ja__&+Y2Gd)M^KvfZ8O{OMb=!+pa( zWB-*=u5bMC^v%<9{RWUShi98+PiLjs<8ym*b^GOp4Vkgo{_MHDZ(pus|9s>8q50mA zH|F?}+}SsBL!n$KoO>xkMmT%(?86qX+}0sCcjlY>9&~J-^WHwW(6M*Pq+egNSb-YP zX_h?J#;R|SwX*svD{`&J_2e=)TN{U z<}NMZ7hBp!l>q^DTQqzHZ!JZraof1fYft+J;=rJj6Csi7DPhn)EPz!>NlxVvoNo^o z702R!3Ng3=2F4sD>8cek=K@m^elT%MgI+zw4crSE ziWInyNCYUn1{EQQnotg*n8|eN1Q_lQ;oXo(;hyc+V#<`>y`$*PNVa4#9@$Bw1J$t+CsLO_0Ov8Ih}D(Tpl;!07hK70!2#UpxN65!qA! zq35PY_B6zDBIo zzNkYLb<{#u83%EY)Hu!o9+f9e)L7t7sj-w%?NtPc7>jP1!VqI&%91K(#o>yE;cd$_ z$FPbL!A%gF1LTG%;F_;UZorfW5lVr#OuQK|0v+k6#6(+;;G9_|9$;KF>Rml3&+;Zp zE6f3wkc}=qUlB0w(t>QICAKbod3rMZCU5s^fetzN%I$0kbvRL=4prXj^o}For4mKI zEpek$YBWoE!cI<*z&C?m1Sczsz{ept00S5Uo)Gb`SSbe3L5g`L!cdGU@q!q?EK*xl zs-EqQ3BgQP9aXPb=u6u!{HRhV6JHcmyAvdl2?n5lDYNN zcj69^y)5>_AKpw7V7drzB?+-K+y#kZD{cy0YB1S}yPczCSf>`K+whCPwYQXZOoBbvGtvCNfvDrx$8^-_^;Ebqfw|c3)({^%Wj(6KbM*tzde+z`JP+{CAT`} z&d1X{xtcL zpuFvu21^i7@eFYo)VYEd1q`c#C&29zbXXlQ9WJ87at~PdxS>2+@XK^wgaEA?y~@67 zl60Gqqy_wdFpT2IHEQE54J1rm%n9uPFmHlDqFH1i*!ll3#mrlXyB>mB)_wtV5h-kl zI{dN$%bL{!hN-I^zQ)`9T398pQnE@(Cz?dIB&^XDM<>`sk!kr9I5;UuTkJCi=ddDs ztJ2)d;Olq5!(77GL!aj;Sdq{HzzK+SDIPV#t^gWvtwu6K5_K;r0h`m}8KqU&Cyh@E?QeqS+jao*LhfH6k^T+IcfiZqAc4TmzI*n4e~!#YD8w|*p3FO1?>QPN#C%hM z9P8wYs)zQv`}Wqny><5Dg55jKeA?A}*WLHq?pxj2t(kq9uFU9c<*aVD^Zojdx~Hvq zcc08U-mzb^e_)hd9`cAIyVl87?Q%`yZ1byT&r zw~rlYeZNK`bp1}3^FXKJk7mb#KKCtf7Y>CD21%?J3@YYea55fA#fV%HglI7oE7aJ7 z!LcZok|d%NBEH0DG=(xs~cewcpc40;8~dlZ4F}=H zl)lpm5{x%W8UUw3s`p@tL=xg!q!~zlgk($I)q@Z%UF~0F%zAg`_#%S8IcDy)kE~CK zxai)Xug*4Sleac4A`s`9`Pc416^M&RHI;h%+^O5mAHBYaK>WBncjC3b?0!Pz#a5@@ zo{h|P-#WjDK%77LaqXRfCq!KA@aQeYDiGDGe<2Xn)si(1dd-S47>vZjaBh}xJQh<4 zhD1HFVk^e%l7AFl&M^aB6ayTz5R>SUDHa-+!|MS&iPH0=TVHb~3cE|aoIN-WBLE&F oiQvMbPNUI$ieC8=HGGNMK1X$*qqZl8%NmU(v+HX_{^?5p3jjahmH+?% diff --git a/observability/local/log-sentinel/detectors/__pycache__/websocket_health.cpython-313.pyc b/observability/local/log-sentinel/detectors/__pycache__/websocket_health.cpython-313.pyc deleted file mode 100644 index 24d5c3e979c50c61c51065136f2bd99d1ffbc085..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2856 zcmcguO>7&-6&{kiFgiOPyfEVHoW%CeDKiR=2OiELH2<1y=>FkLLkF}2QW zSDsl)5StV}73!&Q8pASBzy^{-da81&3KXb&DC#2(BEWXg07ilqa4(hM^yk<&yOcyb zN)fa`2jJs-^M2-=H*enT(^#w>!5IA6?~QffJ{Q8)kekfY6TsX?7*T`+I5-nH6Qn^^ z@S&N|8Hq|BFU`nj!ZfU+B+4Qzrx6Ypf`>zX3>v9Ka!PIV6mdw&Q7@4qwo7jlM=jZw z1vAE?;Vi2Q)GnH2o{c{dUbz78IH5D*``8rZ$ZLjGHmnM;@srCXle|$Sbor!SnjpQmYj7;dnF-RAl4nzhL$@I)JQT zK5!w>V$o!A;Ba%+3gtulnzLo_$_HOWIA}@mlk=foH2pV(tZ+WOYm^X{4ma1gBKZiG zr!FI;Ulmah`Hfn!IMtsJOmwhEXdxbuI+AIDv|`~QN!=Q;eJc=&rg{1M! zBhGjBuOQ1!tvtJlP!y?Rrf=#uaCSfNe1lW(0}mdTFOEeK!u_6n9(4chxhC^+C&H<` z1T;M4NgkCpJiUk%WuN!{@6mYRc|;m9>PQTg=h01syC;e}}<)a%T);LpQ!H2Y;l*_^_ave&|F$`uBN z6#5pI9m6pRkFe_eyiS+7Qqmn#u?0sKjWV%Hgm)5F(oLAx99we^@~CN7ZkXB)(ZG0b zks9R+(U_xCN4usQCTw3RL2v_{YAw6;4OQPnb!^phsqH!81@NV&c1o>%>+9@um$kFJ zcW3v_7EQ`eL|fv0JDRe=c9f#0JQ0Ul>z_6%bLx&#rMhF->OLNr;(=VPt3}1sMW$A> z;I;JcpF6ZYKhU8 zKtuO>-lky@uNIl;k!o}Bw@*$L@{CYcSTZbUu23e6g_imh{9Ls-wqRB(hE=eOV7k~$ zOu@9FN`O|z7+fHTZ-TpcXl)Vhxrb_eVVQ@0H6Pl(ThG36Glq+oO|4x099ZAO7!zYX62NdNZy&gHNAT2ezB3rt$g=! zJW)^XZ^ZYzUCBG~Tk#JgZf5x2mAhBm%!r#l><%4!ApJr4o$@eMzxe&ecP`bBU9Jy; za`@g`ci(bH_y0Ee>*Ryn!wZcg=O1O>a7RX6^~z><+i=f6P+NEJMjz_wucr?*x(|Fg z{-~RN)g78#JN2vSkEhqKJPbAt&D1BqS09}HkKwO=RruB8w_@)pZg*cjeejd+gKpQr z>WxQT>U+`6qiE02#)$yx=wBUfL_Qt7G-im)DiGTK#ybo-7ON%-zgmb)P#j z{-;d#{wt40j;v2KGDkNfQo3iO9jPz5gVts$mg)Qg#bWV|A=H&zi9dyNL%%?0b5X8n znkZLI&q9Iy82gpl`W zqESI>(sak6MzQJ;243P@i%tsiO+nzL^NA>PHUi|w$c<)ho^s=fo6{T8D{^x6+y;Wb zHEHeA$FYA3*7p~#XW>>wG_7ox0B`4ZIgc0Ni;#+5!^5y0Vlw|yM!iO=d7ob%YJdG_ xF>&QpKLy{W#~^}eXDkC`BNPY({)oo@j7C32=}*z<7vW=p list[Finding]: - findings: list[Finding] = [] - actions = cache.get("ss_actions") - - if not actions: - return findings - - # --- Failure rate --- - total = len(actions) - failures = [a for a in actions if not _is_success(a)] - fail_count = len(failures) - - if total >= 5 and fail_count > 0: - rate = fail_count / total - if rate > 0.20: - severity = "critical" if rate > 0.50 else "warn" - findings.append(Finding( - detector=self.name, - severity=severity, - title=f"Action failure rate {rate:.0%} ({fail_count}/{total})", - summary=f"{fail_count} of {total} actions failed ({rate:.0%})", - category=self.category, - evidence={ - "total": total, - "failures": fail_count, - "rate": round(rate, 3), - }, - escalate_to_t2=True, - logql_query='{app="sim-steward", event="action_result"} | json', - )) - - # --- Consecutive same-action failures --- - _check_consecutive(actions, findings, self) - - return findings - - -def _is_success(line: dict) -> bool: - fields = line.get("fields", {}) - val = fields.get("success") - if isinstance(val, bool): - return val - if isinstance(val, str): - return val.lower() == "true" - return True - - -def _check_consecutive(actions: list[dict], findings: list[Finding], detector: BaseDetector) -> None: - """Detect 3+ consecutive failures of the same action+arg combo.""" - streak_action = None - streak_count = 0 - - for line in actions: - fields = line.get("fields", {}) - combo = f"{fields.get('action', '?')}:{fields.get('arg', '')}" - - if not _is_success(line): - if combo == streak_action: - streak_count += 1 - else: - streak_action = combo - streak_count = 1 - else: - # Success resets the streak - if streak_count >= 3: - findings.append(Finding( - detector=detector.name, - severity="warn", - title=f"Consecutive failures: {streak_action} x{streak_count}", - summary=f"Action {streak_action!r} failed {streak_count} times consecutively", - category=detector.category, - evidence={"action_combo": streak_action, "consecutive": streak_count}, - escalate_to_t2=True, - logql_query='{app="sim-steward", event="action_result"} | json', - )) - streak_action = None - streak_count = 0 - - # Check trailing streak - if streak_count >= 3: - findings.append(Finding( - detector=detector.name, - severity="warn", - title=f"Consecutive failures: {streak_action} x{streak_count}", - summary=f"Action {streak_action!r} failed {streak_count} times consecutively", - category=detector.category, - evidence={"action_combo": streak_action, "consecutive": streak_count}, - escalate_to_t2=True, - logql_query='{app="sim-steward", event="action_result"} | json', - )) diff --git a/observability/local/log-sentinel/detectors/agent_loop.py b/observability/local/log-sentinel/detectors/agent_loop.py deleted file mode 100644 index 5110c5d..0000000 --- a/observability/local/log-sentinel/detectors/agent_loop.py +++ /dev/null @@ -1,133 +0,0 @@ -"""Detect repetitive tool use, retry loops, and agent nesting anomalies.""" - -from collections import Counter, defaultdict - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class AgentLoopDetector(BaseDetector): - name = "agent_loop" - category = "ops" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - tools = cache.get("claude_tools") - agents = cache.get("claude_agents") - - self._check_repetitive_tools(tools, findings) - self._check_retry_loops(tools, findings) - self._check_orphan_agents(agents, findings) - self._check_deep_nesting(agents, findings) - - return findings - - def _check_repetitive_tools(self, tools: list[dict], findings: list[Finding]): - """Same tool 15+ times in one session -> warn.""" - session_tool_counts: dict[tuple[str, str], int] = Counter() - for line in tools: - hook = line.get("hook_type", "") - if hook not in ("post-tool-use", "post-tool-use-failure"): - continue - sid = line.get("session_id", "unknown") - tool = line.get("tool_name", "unknown") - session_tool_counts[(sid, tool)] += 1 - - for (sid, tool), count in session_tool_counts.items(): - if count >= 15: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Repetitive tool use: {tool} {count}x", - summary=f"Session {sid} called {tool} {count} times — possible loop", - category=self.category, - evidence={ - "session_id": sid, - "tool_name": tool, - "call_count": count, - }, - escalate_to_t2=True, - logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', - )) - - def _check_retry_loops(self, tools: list[dict], findings: list[Finding]): - """More than 3 is_retry=true events -> warn.""" - retry_count = 0 - for line in tools: - is_retry = line.get("is_retry") - if is_retry is True or is_retry == "true": - retry_count += 1 - - if retry_count > 3: - findings.append(Finding( - detector=self.name, - severity="warn", - title="Tool retry loop", - summary=f"{retry_count} tool retries detected — possible stuck loop", - category=self.category, - evidence={"retry_count": retry_count}, - escalate_to_t2=True, - logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', - )) - - def _check_orphan_agents(self, agents: list[dict], findings: list[Finding]): - """subagent-start without matching subagent-stop -> warn.""" - started: dict[str, dict] = {} - stopped: set[str] = set() - - for line in agents: - hook = line.get("hook_type", "") - agent_id = line.get("agent_id", "") - if not agent_id: - continue - if hook == "subagent-start": - started[agent_id] = line - elif hook == "subagent-stop": - stopped.add(agent_id) - - for agent_id, line in started.items(): - if agent_id not in stopped: - findings.append(Finding( - detector=self.name, - severity="warn", - title="Long-running agent", - summary=f"Subagent {agent_id} started but has no matching stop event", - category=self.category, - evidence={ - "agent_id": agent_id, - "session_id": line.get("session_id", ""), - }, - escalate_to_t2=True, - logql_query='{app="claude-dev-logging", component="agent"} | json', - )) - - def _check_deep_nesting(self, agents: list[dict], findings: list[Finding]): - """agent_depth >= 3 -> info.""" - seen_depths: set[tuple[str, int]] = set() - - for line in agents: - depth = _safe_int(line.get("agent_depth")) - agent_id = line.get("agent_id", "unknown") - if depth >= 3 and (agent_id, depth) not in seen_depths: - seen_depths.add((agent_id, depth)) - findings.append(Finding( - detector=self.name, - severity="info", - title=f"Deep agent nesting (depth={depth})", - summary=f"Agent {agent_id} reached nesting depth {depth}", - category=self.category, - evidence={ - "agent_id": agent_id, - "agent_depth": depth, - "session_id": line.get("session_id", ""), - }, - logql_query='{app="claude-dev-logging", component="agent"} | json', - )) - - -def _safe_int(val) -> int: - try: - return int(val) - except (TypeError, ValueError): - return 0 diff --git a/observability/local/log-sentinel/detectors/base.py b/observability/local/log-sentinel/detectors/base.py deleted file mode 100644 index 92591e1..0000000 --- a/observability/local/log-sentinel/detectors/base.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Base detector interface for Tier 1.""" - -from abc import ABC, abstractmethod - -from models import Finding -from query_cache import CycleQueryCache - - -class BaseDetector(ABC): - name: str = "base" - category: str = "app" # "app" | "ops" - - @abstractmethod - def detect(self, cache: CycleQueryCache) -> list[Finding]: - """Run detection logic against cached query results. Return findings.""" - ... diff --git a/observability/local/log-sentinel/detectors/claude_session.py b/observability/local/log-sentinel/detectors/claude_session.py deleted file mode 100644 index 9994b2b..0000000 --- a/observability/local/log-sentinel/detectors/claude_session.py +++ /dev/null @@ -1,98 +0,0 @@ -"""Detect Claude Code session lifecycle events and anomalies.""" - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class ClaudeSessionDetector(BaseDetector): - name = "claude_session" - category = "ops" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - lifecycle = cache.get("claude_lifecycle") - tools = cache.get("claude_tools") - - # Build set of session_ids that have post-tool-use events - sessions_with_tools: set[str] = set() - for line in tools: - hook = line.get("hook_type", "") - sid = line.get("session_id", "") - if hook == "post-tool-use" and sid: - sessions_with_tools.add(sid) - - # Track sessions that emitted stop hooks (session-end) - sessions_with_stop: set[str] = set() - - for line in lifecycle: - hook = line.get("hook_type", "") - session_id = line.get("session_id", "unknown") - - if hook == "session-start": - findings.append(Finding( - detector=self.name, - severity="info", - title=f"New Claude session: {session_id}", - summary=f"Claude Code session started: {session_id}", - category=self.category, - evidence={"session_id": session_id, "hook_type": hook}, - logql_query='{app="claude-dev-logging", component="lifecycle"} | json', - )) - - elif hook == "pre-compact": - findings.append(Finding( - detector=self.name, - severity="info", - title=f"Context compaction in {session_id}", - summary=f"Claude Code context compaction triggered in session {session_id}", - category=self.category, - evidence={"session_id": session_id, "hook_type": hook}, - logql_query='{app="claude-dev-logging", component="lifecycle"} | json', - )) - - elif hook == "session-end": - sessions_with_stop.add(session_id) - duration_ms = _safe_int(line.get("session_duration_ms")) - if duration_ms > 7_200_000: - hours = round(duration_ms / 3_600_000, 1) - findings.append(Finding( - detector=self.name, - severity="warn", - title="Long session >2h", - summary=f"Session {session_id} lasted {hours}h ({duration_ms}ms)", - category=self.category, - evidence={ - "session_id": session_id, - "duration_ms": duration_ms, - }, - escalate_to_t2=True, - logql_query='{app="claude-dev-logging", component="lifecycle"} | json', - )) - - elif hook == "stop": - sessions_with_stop.add(session_id) - - # Empty session: stop hook emitted but zero tool-use events - for sid in sessions_with_stop: - if sid and sid not in sessions_with_tools: - findings.append(Finding( - detector=self.name, - severity="warn", - title="Empty session", - summary=f"Session {sid} ended with stop hooks but 0 tool-use events", - category=self.category, - evidence={"session_id": sid}, - escalate_to_t2=True, - logql_query='{app="claude-dev-logging", component="lifecycle"} | json', - )) - - return findings - - -def _safe_int(val) -> int: - """Convert a value to int, returning 0 on failure.""" - try: - return int(val) - except (TypeError, ValueError): - return 0 diff --git a/observability/local/log-sentinel/detectors/error_spike.py b/observability/local/log-sentinel/detectors/error_spike.py deleted file mode 100644 index 91d4cae..0000000 --- a/observability/local/log-sentinel/detectors/error_spike.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Detect error spikes in sim-steward logs.""" - -from collections import Counter - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class ErrorSpikeDetector(BaseDetector): - name = "error_spike" - category = "app" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - errors = cache.get("ss_errors") - count = len(errors) - - if count < 3: - return findings - - messages = [e.get("message", "unknown") for e in errors] - top_messages = Counter(messages).most_common(5) - - if count >= 10: - severity = "critical" - else: - severity = "warn" - - escalate = count >= 5 - - findings.append(Finding( - detector=self.name, - severity=severity, - title=f"Error spike: {count} errors", - summary=f"{count} errors detected in window. Top: {top_messages[0][0]!r} ({top_messages[0][1]}x)", - category=self.category, - evidence={ - "count": count, - "top_messages": [{"message": m, "count": c} for m, c in top_messages], - }, - escalate_to_t2=escalate, - logql_query='{app="sim-steward", level="ERROR"} | json', - )) - - return findings diff --git a/observability/local/log-sentinel/detectors/flow_gap.py b/observability/local/log-sentinel/detectors/flow_gap.py deleted file mode 100644 index aeef491..0000000 --- a/observability/local/log-sentinel/detectors/flow_gap.py +++ /dev/null @@ -1,48 +0,0 @@ -"""Detect expected-flow gaps by delegating to the FlowEngine.""" - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class FlowGapDetector(BaseDetector): - name = "flow_gap" - category = "app" - - def __init__(self, flow_engine): - self.flow_engine = flow_engine - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - all_events = cache.get("ss_all") - - # Filter out noise — host_resource_sample events don't participate in flows - meaningful = [e for e in all_events if e.get("event") != "host_resource_sample"] - - if not meaningful: - return findings - - evaluations = self.flow_engine.evaluate(meaningful) - - for evaluation in evaluations: - for gap in evaluation.gaps: - severity = evaluation.flow.gap_severity or "warn" - findings.append(Finding( - detector=self.name, - severity=severity, - title=f"Flow gap: {evaluation.flow.display_name} — missing {gap.step.label}", - summary=gap.description or f"Expected step {gap.step.label!r} not found in flow {evaluation.flow.display_name!r}", - category=self.category, - evidence={ - "flow": evaluation.flow.name, - "flow_display": evaluation.flow.display_name, - "missing_step": gap.step.id, - "missing_label": gap.step.label, - "matched_steps": list(evaluation.matched_steps.keys()), - }, - escalate_to_t2=severity in ("warn", "critical"), - flow_context=evaluation.flow.name, - logql_query='{app="sim-steward"} | json', - )) - - return findings diff --git a/observability/local/log-sentinel/detectors/incident_anomaly.py b/observability/local/log-sentinel/detectors/incident_anomaly.py deleted file mode 100644 index da4c62a..0000000 --- a/observability/local/log-sentinel/detectors/incident_anomaly.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Detect incident anomalies — bursts and per-driver accumulation.""" - -from collections import Counter, defaultdict - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class IncidentAnomalyDetector(BaseDetector): - name = "incident_anomaly" - category = "app" - - BURST_WINDOW_SEC = 60 - BURST_THRESHOLD = 5 - DRIVER_WARN_THRESHOLD = 15 - DRIVER_INFO_THRESHOLD = 10 - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - incidents = cache.get("ss_incidents") - - if not incidents: - return findings - - # --- Burst detection: 5+ incidents within 60s --- - timestamps = [] - for line in incidents: - ts = _parse_ts(line) - if ts is not None: - timestamps.append(ts) - - timestamps.sort() - max_burst = _max_count_in_window(timestamps, self.BURST_WINDOW_SEC) - - if max_burst >= self.BURST_THRESHOLD: - findings.append(Finding( - detector=self.name, - severity="info", - title=f"Incident burst: {max_burst} in {self.BURST_WINDOW_SEC}s", - summary=f"{max_burst} incidents detected within a {self.BURST_WINDOW_SEC}s window", - category=self.category, - evidence={"burst_count": max_burst, "window_sec": self.BURST_WINDOW_SEC}, - escalate_to_t2=False, - logql_query='{app="sim-steward", event="incident_detected"} | json', - )) - - # --- Per-driver accumulation --- - driver_counts: Counter[str] = Counter() - for line in incidents: - fields = line.get("fields", {}) - driver = fields.get("display_name") or fields.get("unique_user_id") or "unknown" - driver_counts[str(driver)] += 1 - - for driver, count in driver_counts.items(): - if count >= self.DRIVER_WARN_THRESHOLD: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Driver {driver}: {count} incidents", - summary=f"Driver {driver!r} accumulated {count} incidents — exceeds warning threshold", - category=self.category, - evidence={"driver": driver, "incident_count": count}, - escalate_to_t2=True, - logql_query='{app="sim-steward", event="incident_detected"} | json', - )) - elif count >= self.DRIVER_INFO_THRESHOLD: - findings.append(Finding( - detector=self.name, - severity="info", - title=f"Driver {driver}: {count} incidents", - summary=f"Driver {driver!r} accumulated {count} incidents", - category=self.category, - evidence={"driver": driver, "incident_count": count}, - escalate_to_t2=False, - logql_query='{app="sim-steward", event="incident_detected"} | json', - )) - - return findings - - -def _parse_ts(line: dict) -> float | None: - raw = line.get("timestamp") - if raw is None: - return None - try: - return float(raw) - except (ValueError, TypeError): - pass - try: - from datetime import datetime - dt = datetime.fromisoformat(str(raw).replace("Z", "+00:00")) - return dt.timestamp() - except Exception: - return None - - -def _max_count_in_window(sorted_ts: list[float], window_sec: int) -> int: - if not sorted_ts: - return 0 - max_count = 0 - left = 0 - for right in range(len(sorted_ts)): - while sorted_ts[right] - sorted_ts[left] > window_sec: - left += 1 - max_count = max(max_count, right - left + 1) - return max_count diff --git a/observability/local/log-sentinel/detectors/mcp_health.py b/observability/local/log-sentinel/detectors/mcp_health.py deleted file mode 100644 index f8af698..0000000 --- a/observability/local/log-sentinel/detectors/mcp_health.py +++ /dev/null @@ -1,119 +0,0 @@ -"""Detect MCP tool failures and slow calls.""" - -from collections import Counter - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class McpHealthDetector(BaseDetector): - name = "mcp_health" - category = "ops" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - tools = cache.get("claude_tools") - - mcp_failures: list[dict] = [] - mcp_calls_by_service: Counter = Counter() - slow_calls: list[dict] = [] - - for line in tools: - tool_name = line.get("tool_name", "") - if not tool_name.startswith("mcp__"): - continue - - hook = line.get("hook_type", "") - service = _extract_service(tool_name) - duration_ms = _safe_int(line.get("duration_ms")) - - # Count all MCP calls (post-tool-use and post-tool-use-failure) - if hook in ("post-tool-use", "post-tool-use-failure"): - mcp_calls_by_service[service] += 1 - - # Track failures - if hook == "post-tool-use-failure": - mcp_failures.append(line) - - # Slow call detection - if duration_ms > 30_000: - slow_calls.append(line) - - # MCP failure findings - failure_count = len(mcp_failures) - if failure_count > 0: - if failure_count >= 3: - findings.append(Finding( - detector=self.name, - severity="critical", - title=f"MCP failure storm: {failure_count} failures", - summary=f"{failure_count} MCP tool failures detected — possible service outage", - category=self.category, - evidence={ - "failure_count": failure_count, - "tools": [f.get("tool_name", "unknown") for f in mcp_failures], - }, - escalate_to_t2=True, - logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', - )) - else: - for fail in mcp_failures: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"MCP failure: {fail.get('tool_name', 'unknown')}", - summary=f"MCP tool call failed: {fail.get('tool_name', 'unknown')}", - category=self.category, - evidence={ - "tool_name": fail.get("tool_name"), - "error_type": fail.get("error_type", ""), - "session_id": fail.get("session_id", ""), - }, - escalate_to_t2=True, - logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', - )) - - # Slow MCP call findings - for call in slow_calls: - duration = _safe_int(call.get("duration_ms")) - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Slow MCP call: {call.get('tool_name', 'unknown')}", - summary=f"MCP call took {duration}ms (>{30_000}ms threshold)", - category=self.category, - evidence={ - "tool_name": call.get("tool_name"), - "duration_ms": duration, - "session_id": call.get("session_id", ""), - }, - logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', - )) - - # Info: MCP call count per service - if mcp_calls_by_service: - findings.append(Finding( - detector=self.name, - severity="info", - title="MCP call summary", - summary=f"MCP calls by service: {dict(mcp_calls_by_service)}", - category=self.category, - evidence={"calls_by_service": dict(mcp_calls_by_service)}, - logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', - )) - - return findings - - -def _extract_service(tool_name: str) -> str: - """Extract service name from mcp____ pattern.""" - parts = tool_name.split("__") - return parts[1] if len(parts) >= 2 else "unknown" - - -def _safe_int(val) -> int: - try: - return int(val) - except (TypeError, ValueError): - return 0 diff --git a/observability/local/log-sentinel/detectors/plugin_lifecycle.py b/observability/local/log-sentinel/detectors/plugin_lifecycle.py deleted file mode 100644 index 8bd0bc1..0000000 --- a/observability/local/log-sentinel/detectors/plugin_lifecycle.py +++ /dev/null @@ -1,140 +0,0 @@ -"""Detect plugin lifecycle events and restart loops.""" - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class PluginLifecycleDetector(BaseDetector): - name = "plugin_lifecycle" - category = "app" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - lifecycle = cache.get("ss_lifecycle") - - if not lifecycle: - return findings - - plugin_started_count = 0 - has_deploy_marker = False - has_plugin_ready = False - deploy_ts: float | None = None - ready_ts: float | None = None - - for line in lifecycle: - event = line.get("event", "") - - if event == "plugin_started": - plugin_started_count += 1 - findings.append(Finding( - detector=self.name, - severity="info", - title="Plugin started", - summary="sim-steward plugin started (100% logging)", - category=self.category, - evidence={"event": event, "message": line.get("message", "")}, - escalate_to_t2=False, - logql_query='{app="sim-steward"} | json | event="plugin_started"', - )) - - elif event == "iracing_connected": - findings.append(Finding( - detector=self.name, - severity="info", - title="iRacing connected", - summary="iRacing SDK connection established", - category=self.category, - evidence={"event": event}, - escalate_to_t2=False, - logql_query='{app="sim-steward"} | json | event="iracing_connected"', - )) - - elif event == "iracing_disconnected": - findings.append(Finding( - detector=self.name, - severity="info", - title="iRacing disconnected", - summary="iRacing SDK connection lost", - category=self.category, - evidence={"event": event}, - escalate_to_t2=False, - logql_query='{app="sim-steward"} | json | event="iracing_disconnected"', - )) - - elif event == "bridge_start_failed": - findings.append(Finding( - detector=self.name, - severity="critical", - title="Bridge start failed", - summary=f"WebSocket bridge failed to start: {line.get('message', '')}", - category=self.category, - evidence={"event": event, "message": line.get("message", "")}, - escalate_to_t2=True, - logql_query='{app="sim-steward"} | json | event="bridge_start_failed"', - )) - - elif event == "deploy_marker": - has_deploy_marker = True - deploy_ts = _parse_ts(line) - - elif event == "plugin_ready": - has_plugin_ready = True - ready_ts = _parse_ts(line) - - # Restart loop: 2+ starts in one window - if plugin_started_count >= 2: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Plugin restart loop ({plugin_started_count} starts)", - summary=f"Plugin started {plugin_started_count} times in window — possible crash loop", - category=self.category, - evidence={"plugin_started_count": plugin_started_count}, - escalate_to_t2=True, - logql_query='{app="sim-steward"} | json | event="plugin_started"', - )) - - # Deploy without ready within 60s - if has_deploy_marker and not has_plugin_ready: - findings.append(Finding( - detector=self.name, - severity="warn", - title="Deploy without plugin_ready", - summary="deploy_marker seen but plugin_ready not received within window", - category=self.category, - evidence={"deploy_marker": True, "plugin_ready": False}, - escalate_to_t2=False, - logql_query='{app="sim-steward"} | json | event=~"deploy_marker|plugin_ready"', - )) - elif has_deploy_marker and has_plugin_ready and deploy_ts and ready_ts: - gap_sec = ready_ts - deploy_ts - if gap_sec > 60: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Slow deploy: plugin_ready {gap_sec:.0f}s after deploy", - summary=f"plugin_ready arrived {gap_sec:.0f}s after deploy_marker (threshold: 60s)", - category=self.category, - evidence={"gap_sec": round(gap_sec, 1)}, - escalate_to_t2=False, - logql_query='{app="sim-steward"} | json | event=~"deploy_marker|plugin_ready"', - )) - - return findings - - -def _parse_ts(line: dict) -> float | None: - raw = line.get("timestamp") - if raw is None: - return None - try: - return float(raw) - except (ValueError, TypeError): - pass - try: - from datetime import datetime - dt = datetime.fromisoformat(str(raw).replace("Z", "+00:00")) - return dt.timestamp() - except Exception: - return None diff --git a/observability/local/log-sentinel/detectors/resource_health.py b/observability/local/log-sentinel/detectors/resource_health.py deleted file mode 100644 index da6fb8a..0000000 --- a/observability/local/log-sentinel/detectors/resource_health.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Detect host resource problems — CPU, memory, disk.""" - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class ResourceHealthDetector(BaseDetector): - name = "resource_health" - category = "app" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - samples = cache.get("ss_resources") - - if not samples: - return findings - - cpu_values: list[float] = [] - mem_values: list[float] = [] - - for line in samples: - fields = line.get("fields", {}) - cpu = _to_float(fields.get("cpu_percent")) - mem = _to_float(fields.get("memory_percent")) - - if cpu is not None: - cpu_values.append(cpu) - if mem is not None: - mem_values.append(mem) - - # CPU checks — use max observed value - if cpu_values: - peak_cpu = max(cpu_values) - avg_cpu = sum(cpu_values) / len(cpu_values) - - if peak_cpu > 95: - findings.append(Finding( - detector=self.name, - severity="critical", - title=f"CPU critical: {peak_cpu:.0f}% peak", - summary=f"CPU peaked at {peak_cpu:.0f}% (avg {avg_cpu:.0f}%) across {len(cpu_values)} samples", - category=self.category, - evidence={"peak_cpu": round(peak_cpu, 1), "avg_cpu": round(avg_cpu, 1), "samples": len(cpu_values)}, - escalate_to_t2=True, - logql_query='{app="sim-steward", event="host_resource_sample"} | json', - )) - elif peak_cpu > 80: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"CPU elevated: {peak_cpu:.0f}% peak", - summary=f"CPU peaked at {peak_cpu:.0f}% (avg {avg_cpu:.0f}%) across {len(cpu_values)} samples", - category=self.category, - evidence={"peak_cpu": round(peak_cpu, 1), "avg_cpu": round(avg_cpu, 1), "samples": len(cpu_values)}, - escalate_to_t2=False, - logql_query='{app="sim-steward", event="host_resource_sample"} | json', - )) - - # Memory checks - if mem_values: - peak_mem = max(mem_values) - avg_mem = sum(mem_values) / len(mem_values) - - if peak_mem > 95: - findings.append(Finding( - detector=self.name, - severity="critical", - title=f"Memory critical: {peak_mem:.0f}% peak", - summary=f"Memory peaked at {peak_mem:.0f}% (avg {avg_mem:.0f}%) across {len(mem_values)} samples", - category=self.category, - evidence={"peak_mem": round(peak_mem, 1), "avg_mem": round(avg_mem, 1), "samples": len(mem_values)}, - escalate_to_t2=True, - logql_query='{app="sim-steward", event="host_resource_sample"} | json', - )) - elif peak_mem > 85: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Memory elevated: {peak_mem:.0f}% peak", - summary=f"Memory peaked at {peak_mem:.0f}% (avg {avg_mem:.0f}%) across {len(mem_values)} samples", - category=self.category, - evidence={"peak_mem": round(peak_mem, 1), "avg_mem": round(avg_mem, 1), "samples": len(mem_values)}, - escalate_to_t2=False, - logql_query='{app="sim-steward", event="host_resource_sample"} | json', - )) - - return findings - - -def _to_float(val) -> float | None: - if val is None: - return None - try: - return float(val) - except (ValueError, TypeError): - return None diff --git a/observability/local/log-sentinel/detectors/sentinel_health.py b/observability/local/log-sentinel/detectors/sentinel_health.py deleted file mode 100644 index b895005..0000000 --- a/observability/local/log-sentinel/detectors/sentinel_health.py +++ /dev/null @@ -1,86 +0,0 @@ -"""Self-monitoring detector for the sentinel itself — uses in-memory stats, not Loki.""" - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class SentinelHealthDetector(BaseDetector): - name = "sentinel_health" - category = "ops" - - def __init__(self, stats_ref: dict): - """Accept a mutable stats dict updated by sentinel.py each cycle. - - Expected keys: - last_cycle_duration_ms, consecutive_detector_errors, - last_t2_duration_ms, t2_queue_size, cycles_completed - """ - self._stats = stats_ref - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - stats = self._stats - - cycle_ms = _safe_int(stats.get("last_cycle_duration_ms")) - consec_errors = _safe_int(stats.get("consecutive_detector_errors")) - t2_ms = _safe_int(stats.get("last_t2_duration_ms")) - cycles_completed = _safe_int(stats.get("cycles_completed")) - - # Slow cycle - if cycle_ms > 30_000: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Slow cycle ({cycle_ms}ms)", - summary=f"Last sentinel cycle took {cycle_ms}ms (>30s threshold)", - category=self.category, - evidence={"last_cycle_duration_ms": cycle_ms}, - )) - - # Consecutive detector errors - if consec_errors > 2: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Detector failures: {consec_errors} consecutive", - summary=f"{consec_errors} consecutive detector errors — detectors may be broken", - category=self.category, - evidence={"consecutive_detector_errors": consec_errors}, - )) - - # Slow T2 investigation - if t2_ms > 300_000: - findings.append(Finding( - detector=self.name, - severity="warn", - title="T2 investigation very slow (>5min)", - summary=f"Last T2 investigation took {t2_ms}ms ({t2_ms / 60_000:.1f} min)", - category=self.category, - evidence={"last_t2_duration_ms": t2_ms}, - )) - - # Stalled polling: cycles have run before but none recently - # The caller is expected to set "last_cycle_epoch_ms" in stats - # when a cycle completes. If cycles_completed > 0 but no cycle - # has landed recently, the sentinel main loop itself detects this - # and sets "stalled" = True in the stats dict. - if cycles_completed > 0 and stats.get("stalled"): - findings.append(Finding( - detector=self.name, - severity="critical", - title="Sentinel polling stalled", - summary=f"Sentinel has completed {cycles_completed} cycles but appears stalled — no recent cycle", - category=self.category, - evidence={"cycles_completed": cycles_completed}, - escalate_to_t2=True, - )) - - return findings - - -def _safe_int(val) -> int: - try: - return int(val) - except (TypeError, ValueError): - return 0 diff --git a/observability/local/log-sentinel/detectors/session_quality.py b/observability/local/log-sentinel/detectors/session_quality.py deleted file mode 100644 index 7bb0caf..0000000 --- a/observability/local/log-sentinel/detectors/session_quality.py +++ /dev/null @@ -1,96 +0,0 @@ -"""Detect session quality issues from session_digest events.""" - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class SessionQualityDetector(BaseDetector): - name = "session_quality" - category = "app" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - digests = cache.get("ss_digests") - - if not digests: - return findings - - for line in digests: - fields = line.get("fields", {}) - track = fields.get("track_display_name", "unknown track") - total_incidents = _to_int(fields.get("total_incidents", 0)) - action_failures = _to_int(fields.get("action_failures", 0)) - p95_latency = _to_float(fields.get("p95_action_latency_ms", 0)) - plugin_errors = _to_int(fields.get("plugin_errors", 0)) - - # Always emit info for completed sessions - findings.append(Finding( - detector=self.name, - severity="info", - title=f"Session complete: {track}, {total_incidents} incidents", - summary=f"Session digest for {track}: {total_incidents} total incidents", - category=self.category, - evidence={ - "track": track, - "total_incidents": total_incidents, - "action_failures": action_failures, - "p95_action_latency_ms": p95_latency, - "plugin_errors": plugin_errors, - }, - escalate_to_t2=False, - logql_query='{app="sim-steward", event="session_digest"} | json', - )) - - # Quality warnings - if action_failures > 0: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Session had {action_failures} action failures", - summary=f"Session at {track} completed with {action_failures} action failure(s)", - category=self.category, - evidence={"track": track, "action_failures": action_failures}, - escalate_to_t2=True, - logql_query='{app="sim-steward", event="session_digest"} | json', - )) - - if p95_latency and p95_latency > 500: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"High action latency: p95={p95_latency:.0f}ms", - summary=f"Session at {track} had p95 action latency of {p95_latency:.0f}ms (threshold: 500ms)", - category=self.category, - evidence={"track": track, "p95_action_latency_ms": p95_latency}, - escalate_to_t2=True, - logql_query='{app="sim-steward", event="session_digest"} | json', - )) - - if plugin_errors > 0: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Session had {plugin_errors} plugin errors", - summary=f"Session at {track} completed with {plugin_errors} plugin error(s)", - category=self.category, - evidence={"track": track, "plugin_errors": plugin_errors}, - escalate_to_t2=True, - logql_query='{app="sim-steward", event="session_digest"} | json', - )) - - return findings - - -def _to_int(val) -> int: - try: - return int(val) - except (ValueError, TypeError): - return 0 - - -def _to_float(val) -> float: - try: - return float(val) - except (ValueError, TypeError): - return 0.0 diff --git a/observability/local/log-sentinel/detectors/silent_session.py b/observability/local/log-sentinel/detectors/silent_session.py deleted file mode 100644 index 50a3a17..0000000 --- a/observability/local/log-sentinel/detectors/silent_session.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Detect sessions that go silent — iRacing connected but no meaningful events.""" - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class SilentSessionDetector(BaseDetector): - name = "silent_session" - category = "app" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - all_events = cache.get("ss_all") - - if not all_events: - return findings - - has_iracing_connected = False - has_iracing_disconnected = False - resource_only = True - - for line in all_events: - event = line.get("event", "") - - if event == "iracing_connected": - has_iracing_connected = True - elif event == "iracing_disconnected": - has_iracing_disconnected = True - - if event and event != "host_resource_sample": - resource_only = False - - # Session active (connected without disconnect) but only resource samples - session_active = has_iracing_connected and not has_iracing_disconnected - - if session_active and resource_only: - findings.append(Finding( - detector=self.name, - severity="warn", - title="Silent session detected", - summary="iRacing connected but only host_resource_sample events seen — no actions, incidents, or lifecycle events", - category=self.category, - evidence={ - "total_events": len(all_events), - "session_active": True, - "resource_only": True, - }, - escalate_to_t2=True, - logql_query='{app="sim-steward"} | json', - )) - - return findings diff --git a/observability/local/log-sentinel/detectors/stuck_user.py b/observability/local/log-sentinel/detectors/stuck_user.py deleted file mode 100644 index 8f69e0c..0000000 --- a/observability/local/log-sentinel/detectors/stuck_user.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Detect stuck-user patterns — same action repeated rapidly.""" - -from collections import defaultdict - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class StuckUserDetector(BaseDetector): - name = "stuck_user" - category = "app" - - WINDOW_SEC = 30 - WARN_THRESHOLD = 4 - CRITICAL_THRESHOLD = 6 - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - actions = cache.get("ss_actions") - - if not actions: - return findings - - # Group by action+arg combo, collect timestamps - combos: dict[str, list[float]] = defaultdict(list) - for line in actions: - fields = line.get("fields", {}) - combo = f"{fields.get('action', '?')}:{fields.get('arg', '')}" - ts = _parse_ts(line) - if ts is not None: - combos[combo].append(ts) - - for combo, timestamps in combos.items(): - timestamps.sort() - # Sliding window: count events within WINDOW_SEC - max_in_window = _max_count_in_window(timestamps, self.WINDOW_SEC) - - if max_in_window >= self.CRITICAL_THRESHOLD: - findings.append(Finding( - detector=self.name, - severity="critical", - title=f"Stuck user: {combo} x{max_in_window} in {self.WINDOW_SEC}s", - summary=f"Action {combo!r} repeated {max_in_window} times within {self.WINDOW_SEC}s — user likely stuck", - category=self.category, - evidence={"combo": combo, "count_in_window": max_in_window, "window_sec": self.WINDOW_SEC}, - escalate_to_t2=True, - logql_query='{app="sim-steward", event="action_result"} | json', - )) - elif max_in_window >= self.WARN_THRESHOLD: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Stuck user: {combo} x{max_in_window} in {self.WINDOW_SEC}s", - summary=f"Action {combo!r} repeated {max_in_window} times within {self.WINDOW_SEC}s — possible stuck user", - category=self.category, - evidence={"combo": combo, "count_in_window": max_in_window, "window_sec": self.WINDOW_SEC}, - escalate_to_t2=False, - logql_query='{app="sim-steward", event="action_result"} | json', - )) - - return findings - - -def _parse_ts(line: dict) -> float | None: - """Extract a numeric timestamp (epoch seconds) from a log line.""" - raw = line.get("timestamp") - if raw is None: - return None - try: - return float(raw) - except (ValueError, TypeError): - pass - # Try ISO format - try: - from datetime import datetime, timezone - dt = datetime.fromisoformat(str(raw).replace("Z", "+00:00")) - return dt.timestamp() - except Exception: - return None - - -def _max_count_in_window(sorted_ts: list[float], window_sec: int) -> int: - """Sliding window max count over sorted timestamps.""" - if not sorted_ts: - return 0 - max_count = 0 - left = 0 - for right in range(len(sorted_ts)): - while sorted_ts[right] - sorted_ts[left] > window_sec: - left += 1 - max_count = max(max_count, right - left + 1) - return max_count diff --git a/observability/local/log-sentinel/detectors/token_usage.py b/observability/local/log-sentinel/detectors/token_usage.py deleted file mode 100644 index 59468df..0000000 --- a/observability/local/log-sentinel/detectors/token_usage.py +++ /dev/null @@ -1,105 +0,0 @@ -"""Detect high token usage, expensive sessions, and low cache efficiency.""" - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class TokenUsageDetector(BaseDetector): - name = "token_usage" - category = "ops" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - tokens = cache.get("claude_tokens") - - for line in tokens: - session_id = line.get("session_id", "unknown") - cost = _safe_float(line.get("cost_usd")) - output_tokens = _safe_int(line.get("total_output_tokens")) - input_tokens = _safe_int(line.get("total_input_tokens")) - cache_read = _safe_int(line.get("total_cache_read_tokens")) - - # Cost thresholds (check expensive first to avoid duplicate) - if cost > 5.0: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Expensive session: ${cost:.2f}", - summary=f"Session {session_id} cost ${cost:.2f} (>$5 threshold)", - category=self.category, - evidence={ - "session_id": session_id, - "cost_usd": cost, - "total_output_tokens": output_tokens, - "total_input_tokens": input_tokens, - }, - escalate_to_t2=True, - logql_query='{app="claude-token-metrics"} | json', - )) - elif cost > 1.0: - findings.append(Finding( - detector=self.name, - severity="info", - title=f"High-cost session: ${cost:.2f}", - summary=f"Session {session_id} cost ${cost:.2f} (>$1 threshold)", - category=self.category, - evidence={ - "session_id": session_id, - "cost_usd": cost, - "total_output_tokens": output_tokens, - "total_input_tokens": input_tokens, - }, - logql_query='{app="claude-token-metrics"} | json', - )) - - # Token-heavy session - if output_tokens > 100_000: - findings.append(Finding( - detector=self.name, - severity="warn", - title="Token-heavy session", - summary=f"Session {session_id} produced {output_tokens:,} output tokens", - category=self.category, - evidence={ - "session_id": session_id, - "total_output_tokens": output_tokens, - }, - logql_query='{app="claude-token-metrics"} | json', - )) - - # Cache efficiency - denominator = max(input_tokens, 1) - cache_ratio = cache_read / denominator - if cache_ratio < 0.3 and input_tokens > 0: - pct = round(cache_ratio * 100, 1) - findings.append(Finding( - detector=self.name, - severity="info", - title=f"Low cache hit rate ({pct}%)", - summary=f"Session {session_id}: cache read {cache_read:,} / input {input_tokens:,} = {pct}%", - category=self.category, - evidence={ - "session_id": session_id, - "total_cache_read_tokens": cache_read, - "total_input_tokens": input_tokens, - "cache_hit_pct": pct, - }, - logql_query='{app="claude-token-metrics"} | json', - )) - - return findings - - -def _safe_float(val) -> float: - try: - return float(val) - except (TypeError, ValueError): - return 0.0 - - -def _safe_int(val) -> int: - try: - return int(val) - except (TypeError, ValueError): - return 0 diff --git a/observability/local/log-sentinel/detectors/tool_patterns.py b/observability/local/log-sentinel/detectors/tool_patterns.py deleted file mode 100644 index 7128f36..0000000 --- a/observability/local/log-sentinel/detectors/tool_patterns.py +++ /dev/null @@ -1,130 +0,0 @@ -"""Detect tool failure rates, permission friction, and error type spikes.""" - -from collections import Counter - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class ToolPatternsDetector(BaseDetector): - name = "tool_patterns" - category = "ops" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - tools = cache.get("claude_tools") - all_events = cache.get("claude_all") - - self._check_failure_rates(tools, findings) - self._check_permission_friction(all_events, findings) - self._check_error_type_spikes(tools, findings) - self._check_tool_distribution(tools, findings) - - return findings - - def _check_failure_rates(self, tools: list[dict], findings: list[Finding]): - """Per-tool failure rate; warn if >15%.""" - success_counts: Counter = Counter() - failure_counts: Counter = Counter() - - for line in tools: - hook = line.get("hook_type", "") - tool = line.get("tool_name", "") - if not tool: - continue - if hook == "post-tool-use": - success_counts[tool] += 1 - elif hook == "post-tool-use-failure": - failure_counts[tool] += 1 - - all_tools = set(success_counts.keys()) | set(failure_counts.keys()) - for tool in all_tools: - total = success_counts[tool] + failure_counts[tool] - if total == 0: - continue - fail_rate = (failure_counts[tool] / total) * 100 - if fail_rate > 15: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"{tool}: {fail_rate:.0f}% failure rate", - summary=f"{tool} failed {failure_counts[tool]}/{total} calls ({fail_rate:.0f}%)", - category=self.category, - evidence={ - "tool_name": tool, - "total_calls": total, - "failures": failure_counts[tool], - "failure_rate_pct": round(fail_rate, 1), - }, - escalate_to_t2=True, - logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', - )) - - def _check_permission_friction(self, all_events: list[dict], findings: list[Finding]): - """More than 5 permission-request events -> info.""" - perm_count = 0 - for line in all_events: - hook = line.get("hook_type", "") - if hook == "permission-request": - perm_count += 1 - - if perm_count > 5: - findings.append(Finding( - detector=self.name, - severity="info", - title=f"Permission friction: {perm_count} requests", - summary=f"{perm_count} permission requests detected — may slow development flow", - category=self.category, - evidence={"permission_request_count": perm_count}, - logql_query='{app="claude-dev-logging"} | json', - )) - - def _check_error_type_spikes(self, tools: list[dict], findings: list[Finding]): - """Group failures by error_type; escalate connection_refused.""" - error_types: Counter = Counter() - for line in tools: - hook = line.get("hook_type", "") - if hook == "post-tool-use-failure": - err = line.get("error_type", "unknown") - error_types[err] += 1 - - for err_type, count in error_types.most_common(): - escalate = err_type == "connection_refused" - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"Error type spike: {err_type} ({count}x)", - summary=f"Tool error type {err_type!r} occurred {count} times", - category=self.category, - evidence={"error_type": err_type, "count": count}, - escalate_to_t2=escalate, - logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', - )) - - def _check_tool_distribution(self, tools: list[dict], findings: list[Finding]): - """Info: top-5 tools by call count.""" - call_counts: Counter = Counter() - for line in tools: - hook = line.get("hook_type", "") - tool = line.get("tool_name", "") - if hook in ("post-tool-use", "post-tool-use-failure") and tool: - call_counts[tool] += 1 - - if not call_counts: - return - - top5 = call_counts.most_common(5) - summary_parts = [f"{t} ({c}x)" for t, c in top5] - findings.append(Finding( - detector=self.name, - severity="info", - title="Tool usage distribution", - summary=f"Top tools: {', '.join(summary_parts)}", - category=self.category, - evidence={ - "top_tools": [{"tool": t, "count": c} for t, c in top5], - "total_unique_tools": len(call_counts), - }, - logql_query='{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', - )) diff --git a/observability/local/log-sentinel/detectors/websocket_health.py b/observability/local/log-sentinel/detectors/websocket_health.py deleted file mode 100644 index 6512976..0000000 --- a/observability/local/log-sentinel/detectors/websocket_health.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Detect WebSocket connectivity problems.""" - -from detectors.base import BaseDetector -from models import Finding -from query_cache import CycleQueryCache - - -class WebSocketHealthDetector(BaseDetector): - name = "websocket_health" - category = "app" - - def detect(self, cache: CycleQueryCache) -> list[Finding]: - findings: list[Finding] = [] - ws_events = cache.get("ss_ws") - - if not ws_events: - return findings - - connects = 0 - disconnects = 0 - - for line in ws_events: - event = line.get("event", "") - - if event == "ws_client_rejected": - findings.append(Finding( - detector=self.name, - severity="warn", - title="WebSocket client rejected", - summary=f"A WebSocket client was rejected: {line.get('message', '')}", - category=self.category, - evidence={"event": event, "message": line.get("message", "")}, - escalate_to_t2=True, - logql_query='{app="sim-steward"} | json | event="ws_client_rejected"', - )) - - elif event == "bridge_start_failed": - findings.append(Finding( - detector=self.name, - severity="critical", - title="WebSocket bridge failed to start", - summary=f"Bridge start failed: {line.get('message', '')}", - category=self.category, - evidence={"event": event, "message": line.get("message", "")}, - escalate_to_t2=True, - logql_query='{app="sim-steward"} | json | event="bridge_start_failed"', - )) - - elif event == "ws_client_connected": - connects += 1 - elif event == "ws_client_disconnected": - disconnects += 1 - - # Disconnect:connect ratio check - if disconnects >= 3 and connects > 0 and disconnects / connects > 2: - findings.append(Finding( - detector=self.name, - severity="warn", - title=f"High disconnect ratio ({disconnects}:{connects})", - summary=f"{disconnects} disconnects vs {connects} connects — possible instability", - category=self.category, - evidence={ - "connects": connects, - "disconnects": disconnects, - "ratio": round(disconnects / connects, 2), - }, - escalate_to_t2=False, - logql_query='{app="sim-steward"} | json | event=~"ws_client_connected|ws_client_disconnected"', - )) - - return findings diff --git a/observability/local/log-sentinel/evidence.py b/observability/local/log-sentinel/evidence.py new file mode 100644 index 0000000..1a9199a --- /dev/null +++ b/observability/local/log-sentinel/evidence.py @@ -0,0 +1,235 @@ +"""Evidence packet model — pre-assembles log context for T2 consumption. + +T1 identifies an anomaly, then EvidenceBuilder: + 1. Finds which feature invocations contain the anomalous signal + 2. Builds a targeted LogQL query + 3. Pre-fetches up to 50 related log lines from Loki + 4. Packages everything into an EvidencePacket ready for T2 + +T2 receives EvidencePackets — it reasons over pre-assembled evidence, +not raw Loki queries. This dramatically improves T2 output quality. +""" + +import logging +import time +import uuid +from dataclasses import dataclass, field + +from loki_client import LokiClient +from trace import FeatureInvocation + +logger = logging.getLogger("sentinel.evidence") + +_MAX_LOG_LINES = 50 + + +@dataclass +class EvidencePacket: + anomaly_id: str + anomaly_description: str + severity: str # "info" | "warn" | "critical" + detector_stream: str # which stream flagged it + invocations: list[FeatureInvocation] # invocations containing the anomaly + related_log_lines: list[dict] # pre-fetched raw log lines (capped at 50) + suggested_logql: str # T1's suggested query for T2 to refine + t1_hypothesis: str # T1's one-sentence best-guess root cause + t1_confidence: float # 0.0 to 1.0 + assembled_at_ns: int + logql_used: str # the actual query used to fetch related_log_lines + + def to_loki_dict(self) -> dict: + """Serializable dict for push to Loki as sentinel_evidence_packet event.""" + return { + "event": "sentinel_evidence_packet", + "component": "log-sentinel", + "domain": "system", + "level": "WARN" if self.severity in ("warn", "critical") else "INFO", + "message": f"[{self.severity.upper()}] {self.anomaly_description[:120]}", + "anomaly_id": self.anomaly_id, + "anomaly_description": self.anomaly_description, + "severity": self.severity, + "detector_stream": self.detector_stream, + "t1_hypothesis": self.t1_hypothesis, + "t1_confidence": self.t1_confidence, + "suggested_logql": self.suggested_logql, + "logql_used": self.logql_used, + "related_lines_count": len(self.related_log_lines), + "invocation_count": len(self.invocations), + "invocation_ids": [inv.invocation_id for inv in self.invocations], + "action_types": list({inv.action_type for inv in self.invocations}), + "assembled_at_ns": self.assembled_at_ns, + } + + def to_prompt_text(self) -> str: + """Format evidence packet as text block for LLM (T2) consumption.""" + lines = [ + f"=== EVIDENCE PACKET {self.anomaly_id} ===", + f"Severity: {self.severity.upper()}", + f"Stream: {self.detector_stream}", + f"Anomaly: {self.anomaly_description}", + f"T1 hypothesis: {self.t1_hypothesis or '(none)'}", + f"T1 confidence: {self.t1_confidence:.0%}", + "", + ] + + if self.invocations: + lines.append(f"Affected invocations ({len(self.invocations)}):") + for inv in self.invocations[:5]: + status = "FAILED" if inv.success is False else ("OK" if inv.success else "?") + lines.append( + f" [{status}] {inv.action_type} via {inv.correlation_method} " + f"({inv.duration_ms}ms, {len(inv.events)} events)" + ) + if inv.error: + lines.append(f" error: {inv.error}") + lines.append("") + + if self.related_log_lines: + lines.append(f"Related log lines ({len(self.related_log_lines)}, capped at {_MAX_LOG_LINES}):") + for log in self.related_log_lines[:_MAX_LOG_LINES]: + ts = log.get("timestamp", "")[:19] + evt = log.get("event", log.get("message", ""))[:60] + lvl = log.get("level", "") + err = log.get("error", "") + suffix = f" error={err[:60]}" if err else "" + lines.append(f" {ts} [{lvl}] {evt}{suffix}") + lines.append("") + + lines.append(f"Suggested LogQL for deeper investigation: {self.suggested_logql}") + return "\n".join(lines) + + +class EvidenceBuilder: + """Assembles EvidencePackets from T1 anomaly signals + feature invocations.""" + + def __init__(self, loki: LokiClient): + self.loki = loki + + def build( + self, + anomaly: dict, + invocations: list[FeatureInvocation], + start_ns: int, + end_ns: int, + ) -> EvidencePacket: + """ + Build an EvidencePacket for a single T1 anomaly. + + anomaly dict shape (from T1 LLM output): + id, description, severity, stream, event_type, + hypothesis, confidence, suggested_logql, trace_id + """ + anomaly_id = anomaly.get("id") or str(uuid.uuid4())[:8] + stream = anomaly.get("stream", "sim-steward") + event_type = anomaly.get("event_type", "") + + relevant = self._find_relevant_invocations(anomaly, invocations) + logql = self._build_logql(anomaly, relevant, stream, event_type) + + try: + lines = self.loki.query_lines(logql, start_ns, end_ns, limit=_MAX_LOG_LINES) + except Exception as e: + logger.warning("EvidenceBuilder Loki query failed: %s", e) + lines = [] + + suggested = anomaly.get("suggested_logql") or logql + + return EvidencePacket( + anomaly_id=anomaly_id, + anomaly_description=anomaly.get("description", anomaly.get("title", "")), + severity=anomaly.get("severity", "warn"), + detector_stream=stream, + invocations=relevant, + related_log_lines=lines, + suggested_logql=suggested, + t1_hypothesis=anomaly.get("hypothesis", ""), + t1_confidence=float(anomaly.get("confidence", 0.5)), + assembled_at_ns=int(time.time() * 1e9), + logql_used=logql, + ) + + def build_many( + self, + anomalies: list[dict], + invocations: list[FeatureInvocation], + start_ns: int, + end_ns: int, + ) -> list[EvidencePacket]: + """Build evidence packets for all anomalies. Skips on error.""" + packets = [] + for anomaly in anomalies: + try: + packet = self.build(anomaly, invocations, start_ns, end_ns) + packets.append(packet) + except Exception as e: + logger.warning("Failed to build evidence for anomaly %s: %s", anomaly.get("id", "?"), e) + return packets + + # ── Private ─────────────────────────────────────────────────────────── + + def _find_relevant_invocations( + self, anomaly: dict, invocations: list[FeatureInvocation] + ) -> list[FeatureInvocation]: + """Find invocations that contain signals matching this anomaly.""" + # Tier 1: exact trace_id match + trace_id = anomaly.get("trace_id") + if trace_id: + matches = [inv for inv in invocations if inv.invocation_id == trace_id] + if matches: + return matches + + # Tier 2: invocations containing an event of the matching type/stream + anomaly_event = anomaly.get("event_type", "") + anomaly_stream = anomaly.get("stream", "") + anomaly_severity = anomaly.get("severity", "") + + relevant = [] + for inv in invocations: + for ev in inv.events: + stream_match = anomaly_stream and ev.stream == anomaly_stream + event_match = anomaly_event and ev.event_type == anomaly_event + error_match = anomaly_severity == "critical" and ( + ev.raw.get("level", "").upper() == "ERROR" or ev.raw.get("error") + ) + if stream_match or event_match or error_match: + relevant.append(inv) + break + + if relevant: + return relevant + + # Tier 3: failed invocations (best-effort for error anomalies) + failed = [inv for inv in invocations if inv.success is False] + if failed: + return failed[:3] + + # Fallback: first 3 invocations + return invocations[:3] + + def _build_logql( + self, + anomaly: dict, + invocations: list[FeatureInvocation], + stream: str, + event_type: str, + ) -> str: + """Build a targeted LogQL query for fetching related log lines.""" + # Prefer trace_id query if available + trace_ids = [ + inv.invocation_id + for inv in invocations + if inv.correlation_method == "trace_id" + ] + if len(trace_ids) == 1: + return f'{{app="{stream}"}} | json | trace_id="{trace_ids[0]}"' + + # Event-type query + if event_type: + return f'{{app="{stream}"}} | json | event="{event_type}"' + + # Severity-based fallback + severity = anomaly.get("severity", "warn") + if severity == "critical": + return f'{{app="{stream}"}} | json | level="ERROR"' + + return f'{{app="{stream}"}} | json' diff --git a/observability/local/log-sentinel/flows/__init__.py b/observability/local/log-sentinel/flows/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/observability/local/log-sentinel/flows/__pycache__/__init__.cpython-313.pyc b/observability/local/log-sentinel/flows/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 512dee39459b5c85f6d38c5a3ecf89128ad19567..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 192 zcmey&%ge<81m2U6W`gL)AOZ#$p^VQgK*m&tbOudEzm*I{OhDdekkqYsXRDad;?$zz znDWfLl8l&?)Uuf3%v{~#lGO6Vq7)E2qclmkAg44vGcP7T38=IzF)1@Av!pU6CqFqc z2S}#t7N_QwWag#j#H8iqmlwyx$7kkcmc+;F6;$5hu*uC&Da}c>D`ExO2Xap_i1Cq` Lk&&^88OQqYCMSMGr~YJ4{TI zsb@OVv8U}AQd2}H9poqDr;xtXX*v^Ul1%$(MoCewo=zsoOeb$Lka69oetXABgvw3d zx-Q=`e0Bz$q@h9IY- zapC0?11|^(SPGa?;k_tPTdMPn@H zHb(F)7+FLvY!xkGem^T(^%?+zQYQb1?H;A+<}H; zIm!pBH9NVJA&swzDzutS0*J(uIJO}`SVM@}_^s^+WEOR!5R%Ef-a;ry*0;0}b$~La z1T>0nQ-c9o;}USFqPZeO_K{5o!Wyecasrd9DCA^8(0+KTCmd2Ftb`|%DfMDFE?o*M z$?*-ZJy5wE*3S-0x&d@% zE^B-wLKGf}XzsEsI?c%{auE-JE2z-D_bU5Kdx3YX@H>|I9oe6L%6F~768e^mK<2uf z*8T|Zo<+liKySg#k~HUtVz#uh#M(Ai;=pNiGbhWIEil}L%KO~t*>di#!BJ59QBf1%cNHB;X-N-@LLCPi?YiIsb}GhmNg9+#)M&dbqpIpEOfw4$+@sCrT3k_tG7W|fnQN)TR3#g)kG98W`vnuTcq&PF&O&9G%oy-B5|9 z1-)S+KuqMR`8ONtRv#kVlyy_eufK8ajcjD4renFLqu{8$?{d$1-}Gi* z{Hv?|>WPB8Zmwsh=iZL46?gZtyZf_kjrU#kH%8`0XGZ7b89DcKzTx|yxDMXgd%wXu zmwGdm3+5ZzrkRHfsy+0%+cS4$=15k_yLVshEwr><@4wbRt(_j)xYY z`_y&tUkb!`^ug1fQx5J!hy9e-hIf&-Sg@o{;MBKVh&+LWlC{r4HM3yYLFl(Ok?JZL ziyNJ%6}k&T2AKXuV*10SP!ys?ns^!XjL}!*#+GvhxJ{+=EZwE^9z$UjdDDC!Ltzu` z-$7ATewFArY<-Cy7&DAE#lWjd;UYwdPW?5NWm9dgmrkopSF`EWl_|DuqF^4Q3D>4~ zSLRYHy3L!2b%(7jpeX@m5#otjRa{bHIH@z$>`Hn9$D~L+9n(D0YZ)n~O7Tc6J)V&zaw@ro<`|7;0J$V# zNH!V^=_m!4Agu8o0`?NHj{p*WFv*(u0Du6aF{6@-iFadSsR7oA0=i33icYSO0*JW2 z;rTmYq`5YGz8*@vFs*z8;0h`>poXTop_!rVP;TUt`oLO!mCL?bhZ>%orqr_9EhK_D8EtaFRkKID4Y&)Kl!BX3MD(uq5zPQ zm`lcMod934zfv`ex*$b`XfqK+1%xgLK_J=Mj=)6J1u+CwS`F#(rHe|jFYewHcPi2b zOCDT4gIEM6(#fN#aE-5HOfU2@aAj9algAK!^zSLWY4{;&~dm7D|a$gOlJgijE46Io9 zaUy@1fo$>8Mz*lQ4Vj>?yx+uzx0~37^C6pPJ6tNk1hBb*V;fRnN->&;7;PUkqUuwy zSFD@>mi5r1o-au#q)R3tiF43Q3D}O1Jt3Okb|IJ;> z{6h5qLf`W&LdPgmLGe=%Tv3_FIp+U$r%Py-6VSkK48f052N_a*Vj}86k|-1vWAHfy zIWavc;Q*)6o!uJsV~x6GC-QHCT3LLx1=BRZJZ5MKuTlSmw#XXB7d0|i9zjwudXxV=# z@9+6j?C#F@67S3J%I~F?{VzO3)bc5}n_@FWrMYLhxhLOz z^dsi(i4QG*c6{LYuzI<9aGfC=zGyr8NDt>fJLx~!OikHOZzNrzb&6>f+NP=kLMp8a ziS$G&4su~h9>=@k!g`twFgmU|$D?ZOBBW}gq+lUKD}kDgf}2$9nuUCtOGb`03I-xw zLzJXo(3q$!LxHbK}VOj2{eVdQLx(g>3Xt68z6OvtLDS=A_p z{H_>RvTj@npCOco2mn*uI8j#~acWTBMtkNNLx;Snt#5sqnvJ$+ZQWJt z->d4fO}EBwj^&%W3m)Ix)XY>au1PkhT+i}~m($=%w*7Hf*(KRbf zKSZqHeV_w7+Wx&lYx{KHLf?E}zOH4>hqkvqV31cRwj=M31@CXYYfiqddewovEz>6o zp2oSU*{L-r>#8j}P;1+*_|5oI>+%za^{E?Hs8tG0+Y7te7tg*ubbDy=xh3(A>wV9= zo;$X?zI(#Jx)pXlumiKA6FFT_OVm{tYmu|=`rx&}w~jBW_s)ivedkvkLk|w3dVjG8 zZS!2MezfWVI;9q5{*kZ#Os(ak{nY?Jwmbf;|!$Psb-@ zf;%FSS0|$K#!5{jl1O4jmEkXmR5}vDPs3hJ@}cpC$HtQ}Rj&*&sr&GA1Q4UcX8~yS z5iswlnv6xFs)~~%6RM;{BDWDHAIwI%$PwYD^pMQ!Gsdyx zuz*doMo7&JPl8$lVLt%P0#cA<#dweY3f-gh>Q}$#%Y?Laca1-4i0wEI!vaYgiU6R< gP!#nU8u${~zC>03K)rd?`_S?{MR`^cfjWnO1ENL-Gynhq diff --git a/observability/local/log-sentinel/flows/definitions/capture_incident.yml b/observability/local/log-sentinel/flows/definitions/capture_incident.yml deleted file mode 100644 index 3bd6d68..0000000 --- a/observability/local/log-sentinel/flows/definitions/capture_incident.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: capture_incident -display_name: Capture Incident -description: User triggers capture_incident, plugin records the incident and reports success. -source_doc: docs/RULES-ActionCoverage.md -steps: - - id: capture_dispatched - event: action_dispatched - label: Capture incident dispatched - filters: - action: capture_incident - timeout_sec: 0 - optional: false - next: - - capture_result - - id: capture_result - event: action_result - label: Capture incident succeeded - filters: - action: capture_incident - success: "true" - timeout_sec: 5 - optional: false - next: [] -expected_completion_sec: 10 -gap_severity: warn diff --git a/observability/local/log-sentinel/flows/definitions/review_incident.yml b/observability/local/log-sentinel/flows/definitions/review_incident.yml deleted file mode 100644 index f99e866..0000000 --- a/observability/local/log-sentinel/flows/definitions/review_incident.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: review_incident -display_name: Review Incident -description: User clicks an incident row, dashboard dispatches seek, action succeeds. -source_doc: docs/RULES-ActionCoverage.md -steps: - - id: ui_click - event: dashboard_ui_event - label: Incident row clicked - filters: - event_type: click - timeout_sec: 0 - optional: false - next: - - seek_dispatched - - id: seek_dispatched - event: action_dispatched - label: Seek to incident dispatched - filters: - action: seek_to_incident - timeout_sec: 5 - optional: false - next: - - seek_result - - id: seek_result - event: action_result - label: Seek to incident succeeded - filters: - action: seek_to_incident - success: "true" - timeout_sec: 5 - optional: false - next: [] -expected_completion_sec: 15 -gap_severity: warn diff --git a/observability/local/log-sentinel/flows/definitions/session_health.yml b/observability/local/log-sentinel/flows/definitions/session_health.yml deleted file mode 100644 index b633549..0000000 --- a/observability/local/log-sentinel/flows/definitions/session_health.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: session_health -display_name: Session Health -description: Dashboard opens, WebSocket connects, plugin reports ready. -source_doc: docs/RULES-ActionCoverage.md -steps: - - id: dashboard_opened - event: dashboard_opened - label: Dashboard opened - filters: {} - timeout_sec: 0 - optional: false - next: - - ws_connected - - id: ws_connected - event: ws_client_connected - label: WebSocket client connected - filters: {} - timeout_sec: 10 - optional: false - next: - - plugin_ready - - id: plugin_ready - event: plugin_ready - label: Plugin ready - filters: {} - timeout_sec: 5 - optional: true - next: [] -expected_completion_sec: 20 -gap_severity: warn diff --git a/observability/local/log-sentinel/flows/definitions/transport_controls.yml b/observability/local/log-sentinel/flows/definitions/transport_controls.yml deleted file mode 100644 index 23bed78..0000000 --- a/observability/local/log-sentinel/flows/definitions/transport_controls.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: transport_controls -display_name: Transport Controls -description: User dispatches a transport action (play, pause, rewind, fast-forward), plugin reports result. -source_doc: docs/RULES-ActionCoverage.md -steps: - - id: transport_dispatched - event: action_dispatched - label: Transport action dispatched - filters: - domain: action - timeout_sec: 0 - optional: false - next: - - transport_result - - id: transport_result - event: action_result - label: Transport action result - filters: - domain: action - timeout_sec: 5 - optional: false - next: [] -expected_completion_sec: 10 -gap_severity: info diff --git a/observability/local/log-sentinel/flows/definitions/walk_driver.yml b/observability/local/log-sentinel/flows/definitions/walk_driver.yml deleted file mode 100644 index e3e5ce7..0000000 --- a/observability/local/log-sentinel/flows/definitions/walk_driver.yml +++ /dev/null @@ -1,33 +0,0 @@ -name: walk_driver -display_name: Walk Driver Incidents -description: User triggers find-driver-incidents, plugin seeks to each, results returned. -source_doc: docs/RULES-ActionCoverage.md -steps: - - id: trigger - event: action_dispatched - label: Find driver incidents triggered - filters: - action: find_driver_incidents - timeout_sec: 0 - optional: false - next: - - seek_dispatched - - id: seek_dispatched - event: action_dispatched - label: Seek to incident dispatched - filters: - action: seek_to_incident - timeout_sec: 10 - optional: false - next: - - results - - id: results - event: action_result - label: Driver walk results - filters: - action: find_driver_incidents - timeout_sec: 30 - optional: false - next: [] -expected_completion_sec: 60 -gap_severity: warn diff --git a/observability/local/log-sentinel/flows/definitions/walk_session.yml b/observability/local/log-sentinel/flows/definitions/walk_session.yml deleted file mode 100644 index 92bc239..0000000 --- a/observability/local/log-sentinel/flows/definitions/walk_session.yml +++ /dev/null @@ -1,33 +0,0 @@ -name: walk_session -display_name: Walk Session Incidents -description: User triggers find-all-incidents, plugin seeks to each, results returned. -source_doc: docs/RULES-ActionCoverage.md -steps: - - id: trigger - event: action_dispatched - label: Find all incidents triggered - filters: - action: find_all_incidents - timeout_sec: 0 - optional: false - next: - - seek_dispatched - - id: seek_dispatched - event: action_dispatched - label: Seek to incident dispatched - filters: - action: seek_to_incident - timeout_sec: 10 - optional: false - next: - - results - - id: results - event: action_result - label: Session walk results - filters: - action: find_all_incidents - timeout_sec: 60 - optional: false - next: [] -expected_completion_sec: 120 -gap_severity: warn diff --git a/observability/local/log-sentinel/flows/engine.py b/observability/local/log-sentinel/flows/engine.py deleted file mode 100644 index 8fe322d..0000000 --- a/observability/local/log-sentinel/flows/engine.py +++ /dev/null @@ -1,85 +0,0 @@ -"""FlowEngine — load YAML flow definitions and evaluate event sequences.""" - -import os - -import yaml - -from models import FlowDefinition, FlowEvaluation, FlowGap, FlowStep - - -class FlowEngine: - def __init__(self, definitions_dir: str): - self.flows: dict[str, FlowDefinition] = {} - self._load_definitions(definitions_dir) - - def _load_definitions(self, definitions_dir: str): - if not os.path.isdir(definitions_dir): - return - for f in os.listdir(definitions_dir): - if not f.endswith((".yml", ".yaml")): - continue - with open(os.path.join(definitions_dir, f)) as fh: - raw = yaml.safe_load(fh) - flow = self._parse(raw) - self.flows[flow.name] = flow - - def _parse(self, raw: dict) -> FlowDefinition: - steps = [ - FlowStep( - id=s["id"], - event=s["event"], - label=s.get("label", ""), - filters=s.get("filters", {}), - timeout_sec=s.get("timeout_sec", 0), - optional=s.get("optional", False), - next_steps=s.get("next", []), - ) - for s in raw.get("steps", []) - ] - return FlowDefinition( - name=raw["name"], - display_name=raw.get("display_name", raw["name"]), - description=raw.get("description", ""), - source_doc=raw.get("source_doc", ""), - steps=steps, - expected_completion_sec=raw.get("expected_completion_sec", 0), - gap_severity=raw.get("gap_severity", "warn"), - ) - - def evaluate(self, events: list[dict], flow_name: str | None = None) -> list[FlowEvaluation]: - results = [] - if flow_name and flow_name in self.flows: - targets = [self.flows[flow_name]] - else: - targets = list(self.flows.values()) - - for flow in targets: - matched: dict[str, dict] = {} - for event in events: - for step in flow.steps: - if step.id in matched: - continue - if event.get("event", "") != step.event: - continue - if step.filters: - fields = event.get("fields", {}) - if not all( - str(fields.get(k, "")).lower() == str(v).lower() - or str(event.get(k, "")).lower() == str(v).lower() - for k, v in step.filters.items() - ): - continue - matched[step.id] = event - - gaps = [ - FlowGap( - step=s, - flow=flow, - description=f"Expected '{s.label}' ({s.event}) not found", - ) - for s in flow.steps - if not s.optional and s.id not in matched - ] - results.append(FlowEvaluation(flow=flow, matched_steps=matched, gaps=gaps)) - - return results diff --git a/observability/local/log-sentinel/grafana_client.py b/observability/local/log-sentinel/grafana_client.py index a4f9162..cc065c5 100644 --- a/observability/local/log-sentinel/grafana_client.py +++ b/observability/local/log-sentinel/grafana_client.py @@ -47,3 +47,18 @@ def annotate_investigation(self, investigation): ) except Exception as e: logger.debug("Grafana investigation annotation error: %s", e) + + def annotate_raw(self, title: str, text: str, tags: list[str]): + try: + requests.post( + f"{self.base_url}/api/annotations", + auth=self.auth, + json={ + "time": int(time.time() * 1000), + "tags": ["log-sentinel"] + [t for t in tags if t], + "text": f"{title}
{text}", + }, + timeout=5, + ) + except Exception as e: + logger.debug("Grafana annotate_raw error: %s", e) diff --git a/observability/local/log-sentinel/investigator/__init__.py b/observability/local/log-sentinel/investigator/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/observability/local/log-sentinel/investigator/__pycache__/__init__.cpython-313.pyc b/observability/local/log-sentinel/investigator/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 4a6ee28de73aaeb1da93b852acb5a79ce69a2354..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 199 zcmXwzK?=e!5JeNKAVLq~q8>oRjeB>3i-C~Z7zfg%B-56jLp+2B@ey;; z8~(tbDbH6d>U@7;n(t(P@gGd{2oI*&njfFc&*C8r%Iwk#UcYP8_Wc-qMl4blL{U8ISwi=Zr_RR+*@DiCT(&VvwWk%ZVZ!?@hlu4;|i R`$eoxZ-oS(jC1a@NMD%+ITHW? diff --git a/observability/local/log-sentinel/investigator/__pycache__/chain.cpython-313.pyc b/observability/local/log-sentinel/investigator/__pycache__/chain.cpython-313.pyc deleted file mode 100644 index 56f57f601ac2a5d008918fa34475267934c80841..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12150 zcmb_iYj7LabzZ#R1V{qlQ{;*diJ(YGA|*?No-{?tq$rZY5{@LogF@gEf(0(%}Lb6nW_GCs8Y?t{nK;q zVgZnXEj80C@!rLKoV|PR*>k@0-QBQQj1;7wzV)w>J?#|r8~hU)T87|(Y7>cJD z1*1Hz;1#4)@=8*wcoiwtyqc66UIV4-w02a->u4OKKCK@$@P<(%ZyYu8CQ_$4Z63Ao z7ILjUT{UXut)n*HHd@VB)0AK7?U1KYD%&VVw_hRs=4AYJw2zFxJj_guK;_89c%heA zH#s;pHS8S^42}RrVhN2Oci-R!TN$G~7Wz(ER<5Xmh{Tj5zt|lD< z^o?+A)Loixj1NgqE#>;ckB$F?i?=C11)D_S2Epbi{c1)9e`@&C_%-|VjJA<_Rn2P| zonOc3{rY`s#z1NejL~oO>og#vP;VmjCdLe7E$~+be|oLh~P}V*r$<<5^>0$NT3VqZtsut_^7pBiwLz~_I)L3#)HRIUSw}x@@wc$Efy;!qp z?Z|#Va)Lh=ObHPA@kBY{**bEyD%m#(Hd_BXm^P)Nm`w_w!%1H$m__r|y+1r#) z;Vo#L+yP5QA?u=#iWb|?(b0l7Qq;LUQItZ`x#Lm!eWJpYOF1qocDpp9mKWlDWS*~r zPP`Q}8)nszL91F|qqCLT1e`66Nem5n{62x@1^?9u7r*46VXydw$Xu@wXRikN8N9xf zxY#=%O@t$yKXwtiUI|`|L?iKQ{%9-|j6xpnl@-iiu3-KUzBumrYob07h;Wg3ARsn9 zz0z*Eu?{9Bv_p2C$~ShbHtxzc?phwoHSS5N|H=wAa%QjpYr_U~qeNZdZG&}S;|ZP% z71s`0aV5wuXyIw!rlxTONnZJ)tZbuX72=dW%3ECbQWsxwzc|%&akrmWs!!PqmXcBQ z!bo}FIrX$omd_{~DWA$~kbjp-d0w@}85YNDrit~%zFu=_gnZS8nunUPB|$B&&6WhU zHCw9SC)C<2)KXsDtSzauLa>4jTN>e~PbJTsQ4MH0?enf3rvrIS735Lg=BI0?TjV;Q zc7I_XIsKooW4a1EW`LP&m*tmApI&}4&Imm!%!)Jlj9%33Er|h>D*GQs<2grB!5_d% zQI-5xWyzkaFy`+lYI=t(+Ut_9N+qMMEK|NW{rvBT86;KSjS4=u;%->}gxxS#*o`Wf zNu}Mel7ZP-N%mAQehE-aD8|Lvug806FyT=eof=L2 zTMrGjd@vNJ!LuYnU{%=}nrDSXH14K{1?U}&vvfR0qwkZ3&@qmV#;&5s*~kpbg;@8a z|A03yRP<`FzUaOB;cw$0WbG7#Zb(ZGkP;WYQxJ`O zEEW%hf(e05w)(i>#b{wW;w(Q0Adkj&*i~qEi>6SF=ULb?fU#dRjmKb*ioXRKXBUsm zM%Wp*sEy3c!>)f}9U4X8qtDIY`;@MDaZjR+F zHW0ry&x*RjVA04HW*_73SolU*KE4ODUd(o5wimN*%=#b`b%18VVU`!oeDG=j)NVe; z39P6gswp}m+$_tZf(2$0Jef{lP7rGVrh`gvs&f`rJP?H?5du;VcE;rt)o}pEqOMRS zssQzfD%dhndnFi6umX?1UDVFT__<)5@4%^{X7FfU`JLEi3rLND;{ICY8hM}1utCvi zKthiaFhf@aK&bF`iK;LgCtyv~0)WCtR>RZ9ctij$EAPO&;ev_g!qW-yARiYko1RNF z6dGX_Oz~Je7?qw^)Xsw*&BsL(S+RgLRt0M+>ZKYOpq&r$0y`sPSWIewqeP$=(%+%(}mBNZ`yv_^NuI8 zi;Fh2BDpN9@| z!RWOlcv_;iMaqW;^G%2Nm>~3q5^*40!t{JF4sMwf+@iv`2@au|CvfO*;Gzr;9YQ$7 zVAv&X#HPFov>#fE8eM9H7A3S)&~Ru`L5sH3SH7yE`t=2nb)*2Y)C6Q{lB!;MI;nhx zPAWZg0(FD1U*j&d71zaxBKTVV&ozT|uC@K2O6jS2%M7kAwcmFE&)I+2K-8l2c2_Ea0+7@f~j~^GtmHmK+<3y z2?du$RKFUFa6GOJKZqH+W&SW`Com&q651yIC}zW$p$Q}ag+B>bE`x+pAZ7*mrDZ#V zy~Z({z-$t;vyh3}AQ!q6;|0|HB7%_cgS`wt6qQ2U`JCE=?~2d^F~oK1p}l@-WO3v% z>dU`7IyN*=Zcr+RF=cpSq-tEN)w}Ok?_R5M+>GUFcI4~o^DR3c>(s>QoBto3{_nI@ z*MXa@>FWE{?LPu9yfg1;%GY${?Y)l;nj;7?swmsGyuGpLD>Hk%xjN?g-R^j!04V~gwMF&Y!K!Zo1#=6Kw>kQy>MA-#k;u<=V++_$Nga+08e z6#~ME`Vt$Y=m-1cQ~H2SW>iRexRs37twBEnepc;MSF%uyfsh-Z^kp8gf|q5CmB&ME z1@Ft47=xOU{Vs%-X6T3Nz9q@To44qfOC1o1*oL3(lpfybDhROmn4utgSXH;LxL4Nf0+cv#AyaKVY5`uDrTu8s#Xd@Yg_W* z6=(>t3r%} z_z;4(SR(%DV;GQhN>hW0Ao&YCMdyqMLgaA~0xfxR$@XHW*C4u!(0jDpBGK~&GjU~UNAL=yy5@q`cv!HEEW3Tt#gmUCczA-u#0FNV*9 zMiN5cuZLLaQA8b{$-tdO!?0ZG(n>}Y-@~$1P~-GP1xtLFWGjiGB(_;>tzZ|+1I|AO zVS)%5I8JrF!MRr3v~=XnBl+68rDKc7GBxdh>TP=;Zfnc8bddk;4|jIwckX(uQPIxF zCWEtP-A*+$KW?ztt5PS{n<<-p)!Lf1wx;`Y)*Wj$=Tg;TRi^d8UHj_5NOoWZqL+bB z1~R_$nU~LheC&a3I$zg-Hycn)!Lo4c5hHh zm>&c^<2S}LO~>zbJ}|%ZX{}?;TEDraf9320^MSQm$N#KrV8jEgWWG$sE>ME zA^#W4zM&q~k5vZD&Ca2nsvqyvz%_9~n*s{t+=)^9XJXX$P#igjl*|%|cf zie{t8M|*+!+?wR4IM;#j?Mzq#e?Da;BZxtOY(6R(LRl7Y^g70L2-YkJGh>XL?i|k; zQ2v+5EejaFQm)>oXR3;Fk;*R zLyw%992}bJp}oUH6KBp0kDn;i4^51p962#OJ_G|s7{)gom^wc>Eb(u$g>xsrR+8)S z3*BArUB_Hq$By{9u4A6E+bl?TS=fA&h0;A1O1E%%7CKzXv9i%mN&Y`eJ1&iQO3?Gq zXJB*JrzCyFqqLUV$S|Na7r?|u5_1ceBH>G}9)BBYP{| zoSfKZvKv$S^%`nh=k4)##(&Y=b7L$uv}SiMjl4N> zEByTz-ygX%@+0MkmLFQOJ;OQs$<*+#toBqQ-_m~j@;jH8x4+A+w)ACN`f@G%Zff5$ zJ=Rg%de_aAvw7*l;)S#sI4#G%Rfi|*@Z=nak@0e5w!M%uy^wEc0sgAhm0HN^n~@Te z2Sjp?gR73CS;x_w<9K1fRL(S&Z)htGcxZK`EQBoDWXjQC5>x&yV#>=76u}5>P2@OK zy|tF2c2lx}gQH(%(ZIF=>MWY;N(Zwf7Ox;rmPS*1s1Eo8XQBj$kpQfwe&&I#y3&Dd zk-0&(qQcQ_MZQN}33d83-XeZ1tpwEKxyP0Ss2B<8Es0SABcweSg4PPhoW!BPfg~Q; z_*7t^Rms!d1V@*D@j?Th;vv2!bO{jma=^S(fHNiOG^M8i>F8+iA{(W9kJ5z3cPCpR zR+cDlT9{`;k_>qiKtDJg(nKS3kvLH3xHSZWNul>C_$g{;Sg-*+#xLAbq=Q?Q1X9EWRgj`bY+~qhD?rs z1Dn-&EovpI0cQv25rPg{3L&tB>wtlw#KuavPI}xApn^*0wtlhQT@qYK6?v>de#6F1KyYg?}{OFq*|DUb;XS4p< zjNO&7hf~A(?K^JnLq<4fZUGzMXk0f^HmCfKqG6U3zC+|4{V81FpN0b$hAvBj)??Za_JP5e-SRl z_DZ@$d~LjVZk2(K0s|Su30leqz`Pay=WBPZD^&*98X$Pjo1Rov5Y|9?qU0?JXi_Co zqWvm)YvGK!9T2P9i`?Lr#Aq7JRYZl+0Z<2p`BXS=P=DoJ_0vdwkp#qlK?=+UPUwM* z)yg$6qo`y;8Dphcl@2Pvz?zc#3aTg>3Q)Vk5h1D~gxVM3#B>Jirw|utfIN~(bth@m z%HYgQ1f%R=v^YqX;u00pN%Jcnx@(VQd|mtnnE6w?M@Vegf^rhuRoIalB(mX~Cul{2 z`kC%oIbC$8@cP0&^{Wd6H2^#E6@W}oE8)cdI*3gw-88%b--F9>z<(s{UWOn19mt@9 zhC)6!d?a*R zrw=UmWatAAYzIHJ)x$~U(zV5FS(<;~XEU>xGtpRPE|!^{f8c*DNAtNl9u6SQ_BSSO zOxy~lHMg#&L*H*-X8*B!xq0QnJ^kHerg`)i=F^W&)V98Lph}%@eB;J9ZXL{7J2LtX z0tA6lM`BL;;L(a#NX?A9?D(}I7o20^YeQoo031&uir3~q;5GPGq0nLtz-c3VJc)Y2 z#R37MFg$t#9uK?9sP32HC+ZKF;4rqFAit5kl1Oplxy?ujMM86I{8DU&7+{_(wj+Rt z>Ch5@lL9_+F%gH45dwE8@&wYRAVbMxhQLht9b|t&tt-`D}Z$-5-b+_Y#3oF*_uH#RzZoQ4t z)~qXa8drMfIt9gYXakE4m0S-&2KKOMI@9sxp%wee%mXbeG2V^es{n5YpHkp^E|wiB zK%?|Mgdl-JwDjcULp0&HyMYk-n40W9M34BDuTZ7{MwDiN&o#!#cN&E6BOAtlgmUl% zo9v{hKP)NRQT`LCL-Q_-K?eR%q4<;<`3+_IjH>#KGJHmvenVM*OYQl+Qms%vrXc%o sBc*8n6}3N0?SE*ntr{Bd8yYiBeK|w_b?xu9UX`Nrzu==(g|tZj3r*^e%m4rY diff --git a/observability/local/log-sentinel/investigator/__pycache__/knowledge.cpython-313.pyc b/observability/local/log-sentinel/investigator/__pycache__/knowledge.cpython-313.pyc deleted file mode 100644 index 67ac45e53ee80b1ded72bde8d8235740a4f2e50d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2602 zcmb7G(Qex|6m{AT?TiJ({@?<8NmkowyRI9&wSen7jaxejEEhq600J%1HfKr{NXm|j zybRbM4EuU{z=aL3y zMuAMdN~I>1E~WEoCcLpNaec2=duwtMR+6AtiCJp!HX@M<4X^?uP)XqBO4zu8c~T=0 zsdNUUNX39{TDeqmMcx?Qs@FTC?oof->yF=ydhL3Xx?d9jj`9g*I-h~=UhB9wrpK)x z_Q^^SyDgxbgGNk=OpIOAQVHshf)}p?OlQ)zfOT3T$cxN4=}1IgnbeV4N(=gjT1E>x zlas(i3+ZWM6$fSSL96)RXN2e0tQ@|Mg_}v@~)tD7BEytYfLAZNf87`GEttKw|V4qKE|uysaPn) zXmIk9g6>go*x?8b#6)V>CXr>d=Ty=}5;@wJkrsI@o3UIr0fgAi_8TPAWrMU_O3kdQ z?J>Osiw4DoB$!?)4u&nF)Sz57NZBHHjY=sk>TVG5VVR#s6*pjQVubzFR7UGaOGbVz z*X4Fw@Gw>`6F!u9_2J;;ENIssw&1UV2hF9>e%`)0HJMC8qRnS?7Tl-^!^#a6 z=A#ykN*ir#)h2`pdWK+mHPZ{d2{*4{)=kLybs6ne{o59u3B91$Dwv{8Qm-ptx53aRpO_x55!x{~wU=ATN=z8@8L^Q02@P=M(36(xR~ysmqM@95!za zTZA)pkcr&M11ncJ+b5QPkBle{fgx^1V%pm(v(*(%a!;v2y%8y_HO+OUYGk-O=)8H+ zqwWcQ4rtgtEh<@IM@YmhD!aqcFzwU-`jl46&u{GcbnBl>u9BBYlr_J(6f+QHLmdH; z{5q33VZ|3Upb+$C$U;VSF&V^YWs{8$pJ4#KHW{;tm@pQ&7L^!**4mMW;TL5+_k+(3QTY@VX~7?5TpV5zQiOMJ44xIvM4jL&oc1C>Dy(Q5}$V z3-aEsnaRYOg-ojo%m^Qwpej25_Q1A6Q;Pa*(X_ZLg9}Jo`EyZm8LVe5CXwX@@1MII zG}q8Xrp+23&OnQ6F|XX*5jv-R@+y%gFLcnuwPa1*(nniomWJYkZ!F1D<)hIam`>G< zAw>>HDd%#DMqcmm{p6iQy7>-*9MW@Ke=-@Vsfti{l$YFmD>>lWg7ym@a8;7I*75$W ztGi(snM<>J6@gKEozH(j7y6y8f2(CLvnep>;G~x3PdNV)I^GUM| z=((Bjy+KTrR(^f14a|q%S<}JceC;~_@Y;30y|A^i_0_$f(3Ns6pV_dQ Wy|c6PSMBBdJB_dI{IY95!`weayMgNf diff --git a/observability/local/log-sentinel/investigator/__pycache__/prompts.cpython-313.pyc b/observability/local/log-sentinel/investigator/__pycache__/prompts.cpython-313.pyc deleted file mode 100644 index 27e697b3c056656712183f21fad37283611055d7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5749 zcmcIo-E-4c7MJA@94DlOBuyYKtD4dhHV(8vOKD*?4F(K}V=O}n7=$cc+iGM?kaCB-o@C61p4MEYT}332a`Y*Ks; zWUq&M$D>k2+{d*2l|h94;uL64huTbUMtoxkVV14Ti3f)K9TeyLwF87V`(-SKx7f<4 zEDnjk0`0*VMr9!|goEPSLov*V>3)AqZxq7eAi8&k5RQoNj)>vCepHslQHJ1a4_0zg z9MJCbG}wy;wsK7T^-v5mB8{u3&GZ(&3vL`|y88_~<1FJN{_I zXZ{;)yr-NjkCjv9SRD58DV|=-T+wvR-YV;+BI|IiWe75Ch)XZ`x20^cwvr`l3)8}|3%wx%Tz<>_JU;o& zm7myUZvN5g>ifp6+54KZc_C~gtD{X*~p!*0zlpwsmwzgbkGN;jyF^E#x8@UVF{7Uw0>H)g1 zsmM?OGIeRWbU9nhUJzE+^QG*~609nwVWY2XSX?$_eXE5S`>L9uYDTS(r$`k7)g_^b zh-CtUo0?r0vNxAkN~;^xj35Ep&v<+`l`5{Um!y^D>tgn-@TqB_j3r~ahGnZRVAHe( zMRveIre29g&Zkz^i^c5faw)f-2Lp`gPEiHO(y)mDyTXDFBMe)_uFJMS9Lq8>>?znQ zswUSAlY!*c3J?z112)c7Wrb#;H7r@NSu%393K0Ts)5M!q-E4y6Vs>SHZ7rL>5c&?c zqySd1DdmcegsS${BCSAGs|iY7Hfm^*24z6)1mKcI$&d_#r?wV`+pZE4#vHVw0BoKHEIff^8BHeV6~hKRyZ zXqsc#+b|;pm<6`*0I02+O{t+7+X7tZCRl*p23?d0qlJdZV3(o#frdYJ6l@ZbQ5+kn zUh9>6Q6O88U_(;`jSvSBmNz1>HDKR?qK%QElBLuyU_}qPj}Tf_0I~?^D~;H`ue4!8 z%4}(CFAt^@t zs;ub_M!=08pX6E%BbdfWpv>w~iq%Yzb-m|n1fM7aV3=ntteLdGluUxGs^EdJ3Ic~4 zCNwf^$Nz^+-lU(q6r|UChBzGECaw1$$d+|3-8UZ7i~a^YmkvyQ)%3Q|7QP^+f$72c z=@Ynk`vMJBBd}AII#OGwUyiq<-NW<}2xAkI*1{-r9R`AaE}bnF*NZ)qrytM=#+jW& zENv5cNto^i6qmG)z5wR)*ARxm z(heRmlGUy7f!L;H!J3LJ-Q1F3MsFgV_9ls@!*qjpCh)*CycF`P}$e;(K#29_SMrFFS@k&lK%%*Lhi%>G)puaw8FGXwzP7QQV zd1H9Qk}{o$+1x(hQtzGH_ObV@wUU zEeuR}M?=fcrF(?lsZ-oGO`Wtpc_n~{X{z0BC{||aSP!Fh;swSdcd0iO#82URLug-5 zxQPs@;$Xu|*e2YQ*5LsUOqgjPIOZv&7lxORYxIQBIyIpD++r~89qZ6L6H9Q4wY>d7 zK>gEXiXIihx`)&%`X2ko`mwLH?m?W2gY#JlM2Q4wL;08 zEi9Kx*z#q|}98)#L##8K8hl2s0rf~F>?;oS*=t}Za|EI-yd>$U* zX9-9C0B*a{NF?HM7yro}?It*W{NH>mGSlVY@+=CEUG}(tdM@U2)7=mD&cpX?_gEpa z#Qz+d{#$a&oqorCcf-AX*S)87k}8wD=PulHE6A;V(MfKCd2;E68zQ2E6>?BL{`HCxCd9vbOD|M3B>GRbt$E82x;d+6;M89(URUUr9jIHtj zH=XFd#^C0-_zYEFb<^j6i2eX*xt?`k1_m6L=L^jG8qb#9x7#56Iubaz$ksrg1$uxX zr@8n(&>c?p@m2Tjf$VqZI+f@iW)O>9e6|hG%EfkZdM@XiFuLIAhjJG9=NWR)6Ra^Je;mL*qKJ4qBE6x`x<_w$WT@|K^~#|+}L z2p2#2aucf2^Fl!ZG6OYRLQ)}2oBE~O2$ekYAOi+TG?yWnn zROuuYh#nZR?ZaH;{Z{-U7^vhWo^`YUFmJv}G?2rgME9C+brY-(y`q(n=x#8pM-$(w dk57Id`~LQia|`a&!jF^3{>2p{5uOdM{{VmL{dfQX diff --git a/observability/local/log-sentinel/investigator/chain.py b/observability/local/log-sentinel/investigator/chain.py deleted file mode 100644 index d293de8..0000000 --- a/observability/local/log-sentinel/investigator/chain.py +++ /dev/null @@ -1,232 +0,0 @@ -"""InvestigationChain — Tier 2 LLM-driven investigation of findings.""" - -import json -import logging -import re -import time - -import requests - -from investigator.knowledge import SYSTEM_PROMPT -from investigator.prompts import ( - GATHER_SPECS, - INVESTIGATION_PROMPT, - PATTERN_ANALYSIS_PROMPT, -) -from loki_client import LokiClient -from models import Finding, Investigation, TimeWindow - -logger = logging.getLogger("sentinel.investigator") - - -class InvestigationChain: - def __init__(self, ollama_url: str, model_fast: str, model_deep: str, loki: LokiClient): - self.ollama_url = ollama_url.rstrip("/") - self.model_fast = model_fast - self.model_deep = model_deep - self.loki = loki - - # ── Public API ── - - def investigate(self, finding: Finding) -> Investigation: - """Gather context, call fast model, extract structured result. Escalate to deep model on low confidence.""" - gather_start = time.time() - context_lines = self._gather(finding) - gather_ms = int((time.time() - gather_start) * 1000) - - total_lines = sum(len(v) for v in context_lines.values()) - - prompt = INVESTIGATION_PROMPT.format( - title=finding.title, - detector=finding.detector, - severity=finding.severity, - summary=finding.summary, - evidence=self._format_evidence(finding), - context=self._format_context(context_lines), - ) - - infer_start = time.time() - raw = self._call_ollama(self.model_fast, SYSTEM_PROMPT, prompt) - infer_ms = int((time.time() - infer_start) * 1000) - - parsed = self._extract(raw) - - # Escalate to deep model on low confidence - model_used = self.model_fast - if parsed.get("confidence", "low") == "low" and self.model_deep != self.model_fast: - logger.info("Low confidence from fast model, escalating to %s", self.model_deep) - deep_start = time.time() - raw_deep = self._call_ollama(self.model_deep, SYSTEM_PROMPT, prompt) - deep_ms = int((time.time() - deep_start) * 1000) - parsed = self._extract(raw_deep) - raw = raw_deep - infer_ms += deep_ms - model_used = self.model_deep - - return Investigation( - finding=finding, - root_cause=parsed.get("root_cause", "Unable to determine root cause."), - correlation=parsed.get("correlation", "No correlations identified."), - impact=parsed.get("impact", "Impact unknown."), - recommendation=parsed.get("recommendation", "Investigate manually."), - confidence=parsed.get("confidence", "low"), - issue_type=parsed.get("issue_type", "unknown"), - trigger="escalation", - raw_response=raw, - model=model_used, - inference_duration_ms=infer_ms, - gather_duration_ms=gather_ms, - context_lines_gathered=total_lines, - ) - - def investigate_patterns(self, recent_findings: list[Finding]) -> Investigation: - """Proactive T2: analyze recent findings for cross-cutting patterns.""" - summaries = "\n".join( - f"- [{f.severity}] {f.detector}: {f.title} — {f.summary}" - for f in recent_findings - ) - - prompt = PATTERN_ANALYSIS_PROMPT.format( - count=len(recent_findings), - window_min=5, - finding_summaries=summaries, - ) - - infer_start = time.time() - raw = self._call_ollama(self.model_fast, SYSTEM_PROMPT, prompt) - infer_ms = int((time.time() - infer_start) * 1000) - - parsed = self._extract(raw) - - # Use first finding as the anchor - anchor = recent_findings[0] if recent_findings else Finding( - detector="pattern_analysis", - severity="info", - title="Pattern analysis", - summary="No findings to analyze.", - ) - - return Investigation( - finding=anchor, - root_cause=parsed.get("root_cause", "No common root cause identified."), - correlation=parsed.get("correlation", "No correlations identified."), - impact=parsed.get("impact", "Impact unknown."), - recommendation=parsed.get("recommendation", "Continue monitoring."), - confidence=parsed.get("confidence", "low"), - issue_type=parsed.get("issue_type", "unknown"), - trigger="proactive", - raw_response=raw, - model=self.model_fast, - inference_duration_ms=infer_ms, - gather_duration_ms=0, - context_lines_gathered=0, - ) - - # ── Ollama call ── - - def _call_ollama(self, model: str, system: str, prompt: str) -> str: - """POST /api/generate to Ollama. Returns raw text response.""" - try: - resp = requests.post( - f"{self.ollama_url}/api/generate", - json={ - "model": model, - "system": system, - "prompt": prompt, - "stream": False, - "options": { - "temperature": 0.3, - "num_predict": 1024, - }, - }, - timeout=600, - ) - if resp.status_code != 200: - logger.warning("Ollama returned %d: %s", resp.status_code, resp.text[:200]) - return "" - return resp.json().get("response", "") - except requests.exceptions.Timeout: - logger.warning("Ollama request timed out (model=%s)", model) - return "" - except Exception as e: - logger.warning("Ollama call failed: %s", e) - return "" - - # ── Extract structured fields from raw LLM output ── - - def _extract(self, raw: str) -> dict: - """Parse ROOT_CAUSE, CORRELATION, IMPACT, RECOMMENDATION, CONFIDENCE, ISSUE_TYPE from raw text.""" - result = {} - - patterns = { - "root_cause": r"ROOT_CAUSE:\s*(.+?)(?=\n(?:CORRELATION|IMPACT|RECOMMENDATION|CONFIDENCE|ISSUE_TYPE):|$)", - "correlation": r"CORRELATION:\s*(.+?)(?=\n(?:IMPACT|RECOMMENDATION|CONFIDENCE|ISSUE_TYPE):|$)", - "impact": r"IMPACT:\s*(.+?)(?=\n(?:RECOMMENDATION|CONFIDENCE|ISSUE_TYPE):|$)", - "recommendation": r"RECOMMENDATION:\s*(.+?)(?=\n(?:CONFIDENCE|ISSUE_TYPE):|$)", - "confidence": r"CONFIDENCE:\s*(low|medium|high)", - "issue_type": r"ISSUE_TYPE:\s*(bug|config|performance|security|unknown)", - } - - for key, pattern in patterns.items(): - match = re.search(pattern, raw, re.DOTALL | re.IGNORECASE) - if match: - result[key] = match.group(1).strip() - - # Normalize confidence - confidence = result.get("confidence", "low").lower() - if confidence not in ("low", "medium", "high"): - confidence = "low" - result["confidence"] = confidence - - # Normalize issue_type - issue_type = result.get("issue_type", "unknown").lower() - if issue_type not in ("bug", "config", "performance", "security", "unknown"): - issue_type = "unknown" - result["issue_type"] = issue_type - - return result - - # ── Gather context from Loki ── - - def _gather(self, finding: Finding) -> dict[str, list[dict]]: - """Run GATHER_SPECS queries for the finding's detector, return label -> lines.""" - specs = GATHER_SPECS.get(finding.detector, []) - if not specs: - logger.debug("No gather specs for detector %s", finding.detector) - return {} - - result: dict[str, list[dict]] = {} - for spec in specs: - window = TimeWindow.from_now(spec.lookback_sec) - lines = self.loki.query_lines(spec.logql, window.start_ns, window.end_ns, limit=spec.limit) - result[spec.label] = lines - - return result - - # ── Format helpers ── - - @staticmethod - def _format_evidence(finding: Finding) -> str: - """Format finding evidence as indented JSON.""" - if not finding.evidence: - return "(no evidence attached)" - try: - return json.dumps(finding.evidence, indent=2, default=str) - except (TypeError, ValueError): - return str(finding.evidence) - - @staticmethod - def _format_context(context_lines: dict[str, list[dict]]) -> str: - """Format gathered context as numbered lists per label.""" - if not context_lines: - return "(no additional context gathered)" - - sections = [] - for label, lines in context_lines.items(): - if not lines: - sections.append(f"[{label}]: (0 lines)") - continue - numbered = "\n".join(f" {i+1}. {json.dumps(line, default=str)}" for i, line in enumerate(lines[:50])) - sections.append(f"[{label}] ({len(lines)} lines):\n{numbered}") - - return "\n\n".join(sections) diff --git a/observability/local/log-sentinel/investigator/knowledge.py b/observability/local/log-sentinel/investigator/knowledge.py deleted file mode 100644 index cc4b62c..0000000 --- a/observability/local/log-sentinel/investigator/knowledge.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Domain knowledge system prompt for the Log Sentinel investigator.""" - -SYSTEM_PROMPT = """\ -You are a diagnostic analyst for SimSteward, an iRacing incident-review tool. - -ARCHITECTURE: -- C# SimHub plugin (.NET 4.8) reads iRacing shared memory via IRSDKSharper. -- Plugin exposes actions over a Fleck WebSocket bridge (0.0.0.0). -- Browser dashboard (HTML/JS ES6+) served by SimHub HTTP, connects via WS. -- All components emit structured JSON logs shipped to Loki. - -LOG SCHEMA: -- Labels: app (sim-steward|claude-dev-logging), env, level, component, event, domain. -- Domains: action, ui, iracing, system. Components: plugin, bridge, dashboard, lifecycle. -- Key events: action_dispatched, action_result, dashboard_ui_event, ws_client_connected, - ws_client_disconnected, incident_detected, iracing_session_start, iracing_session_end, - iracing_mode_change, iracing_replay_seek, host_resource_sample, plugin_ready. - -USER WORKFLOWS: -1. Session health: dashboard opens -> WS connects -> plugin ready. -2. Review incident: click row -> seek_to_incident dispatched -> result. -3. Walk driver: find_driver_incidents -> seek per incident -> results. -4. Walk session: find_all_incidents -> seek per incident -> results. -5. Capture incident: capture_incident dispatched -> result. -6. Transport controls: play/pause/rewind dispatched -> result. -7. Silent session: iRacing connected but no meaningful events. - -CLAUDE CODE / MCP: -- Claude hooks emit to app=claude-dev-logging with component=lifecycle|tool|mcp-*|agent. -- Hook types: session-start, session-end, stop, pre-compact, post-tool-use. -- MCP tool calls tracked by tool_name, session_id, duration_ms. - -iRACING SPECIFICS: -- Incident deltas: 1x off-track, 2x wall/spin, 4x heavy contact. -- Admin limitation: live races show 0 incidents for non-admin drivers. -- Replay at 16x batches YAML incident events; cross-ref CarIdxGForce + CarIdxTrackSurface. -- replayFrameNum/replayFrameNumEnd are inverted vs SDK naming in plugin code. - -COMMON FAILURES: -- WS bridge_start_failed: port conflict or firewall. -- Action consecutive failures: stuck user retrying broken action. -- Silent session: plugin connected but dashboard never loads or WS rejected. -- Error spikes: usually deploy regression or iRacing API timeout. -- Empty Claude session: hooks fire but no tool use (config or auth issue). - -Analyze evidence. Be specific. Cite log events and timestamps.\ -""" diff --git a/observability/local/log-sentinel/investigator/prompts.py b/observability/local/log-sentinel/investigator/prompts.py deleted file mode 100644 index 2976c9e..0000000 --- a/observability/local/log-sentinel/investigator/prompts.py +++ /dev/null @@ -1,142 +0,0 @@ -"""Prompt templates and gather specifications for the investigator.""" - -from dataclasses import dataclass, field - - -@dataclass -class GatherQuery: - label: str - logql: str - lookback_sec: int = 300 - limit: int = 100 - - -# ── Investigation prompt ── - -INVESTIGATION_PROMPT = """\ -FINDING: {title} -DETECTOR: {detector} -SEVERITY: {severity} -SUMMARY: {summary} - -EVIDENCE: -{evidence} - -GATHERED CONTEXT: -{context} - -Analyze the finding and gathered context. Respond with EXACTLY these sections: - -ROOT_CAUSE: -CORRELATION: -IMPACT: -RECOMMENDATION: -CONFIDENCE: -ISSUE_TYPE: -""" - - -# ── Pattern analysis prompt (proactive T2) ── - -PATTERN_ANALYSIS_PROMPT = """\ -RECENT FINDINGS ({count} in last {window_min} minutes): -{finding_summaries} - -Analyze these findings for cross-cutting patterns, systemic issues, or escalating trends. -Respond with EXACTLY these sections: - -ROOT_CAUSE: -CORRELATION: -IMPACT: -RECOMMENDATION: -CONFIDENCE: -ISSUE_TYPE: -""" - - -# ── Gather specifications per detector ── -# Each detector maps to a list of GatherQuery objects whose results provide -# context for the LLM investigation. Queries are aligned to the cache keys -# defined in query_cache.QUERIES so results are already warm when possible. - -GATHER_SPECS: dict[str, list[GatherQuery]] = { - # ── app detectors ── - "action_failure": [ - GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), - GatherQuery("action_dispatched", '{app="sim-steward", event="action_dispatched"} | json', 300, 200), - GatherQuery("errors", '{app="sim-steward", level="ERROR"} | json', 300, 50), - ], - "error_spike": [ - GatherQuery("errors", '{app="sim-steward", level="ERROR"} | json', 300, 200), - GatherQuery("warnings", '{app="sim-steward", level="WARN"} | json', 300, 100), - GatherQuery("lifecycle", '{app="sim-steward"} | json | event=~"plugin_started|plugin_ready|plugin_stopped|deploy_marker"', 300, 50), - ], - "silent_session": [ - GatherQuery("all_events", '{app="sim-steward"} | json', 300, 200), - GatherQuery("ws_events", '{app="sim-steward"} | json | event=~"ws_client_connected|ws_client_disconnected|ws_client_rejected"', 300, 50), - GatherQuery("lifecycle", '{app="sim-steward"} | json | event=~"iracing_connected|iracing_disconnected|plugin_ready"', 300, 50), - ], - "stuck_user": [ - GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), - GatherQuery("ui_events", '{app="sim-steward", event="dashboard_ui_event"} | json', 300, 100), - GatherQuery("errors", '{app="sim-steward", level="ERROR"} | json', 300, 50), - ], - "websocket_health": [ - GatherQuery("ws_events", '{app="sim-steward"} | json | event=~"ws_client_connected|ws_client_disconnected|ws_client_rejected|bridge_start_failed"', 300, 200), - GatherQuery("lifecycle", '{app="sim-steward"} | json | event=~"plugin_started|plugin_ready|bridge_starting"', 300, 50), - GatherQuery("errors", '{app="sim-steward", level="ERROR"} | json', 300, 50), - ], - # ── ops detectors ── - "claude_session": [ - GatherQuery("lifecycle", '{app="claude-dev-logging", component="lifecycle"} | json', 300, 200), - GatherQuery("tools", '{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', 300, 100), - GatherQuery("errors", '{app="claude-dev-logging", level="ERROR"} | json', 300, 50), - ], - "claude_tool_failure": [ - GatherQuery("tools", '{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', 300, 200), - GatherQuery("errors", '{app="claude-dev-logging", level="ERROR"} | json', 300, 50), - GatherQuery("lifecycle", '{app="claude-dev-logging", component="lifecycle"} | json', 300, 50), - ], - "claude_token_burn": [ - GatherQuery("tokens", '{app="claude-token-metrics"} | json', 300, 200), - GatherQuery("lifecycle", '{app="claude-dev-logging", component="lifecycle"} | json', 300, 50), - GatherQuery("agents", '{app="claude-dev-logging", component="agent"} | json', 300, 50), - ], - "claude_agent_loop": [ - GatherQuery("agents", '{app="claude-dev-logging", component="agent"} | json', 300, 200), - GatherQuery("tools", '{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', 300, 100), - GatherQuery("lifecycle", '{app="claude-dev-logging", component="lifecycle"} | json', 300, 50), - ], - "claude_error_spike": [ - GatherQuery("errors", '{app="claude-dev-logging", level="ERROR"} | json', 300, 200), - GatherQuery("lifecycle", '{app="claude-dev-logging", component="lifecycle"} | json', 300, 50), - GatherQuery("tools", '{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', 300, 100), - ], - # ── flow-based detectors ── - "flow_session_health": [ - GatherQuery("ws_events", '{app="sim-steward"} | json | event=~"ws_client_connected|ws_client_disconnected|ws_client_rejected"', 300, 100), - GatherQuery("lifecycle", '{app="sim-steward"} | json | event=~"plugin_started|plugin_ready|dashboard_opened"', 300, 50), - ], - "flow_review_incident": [ - GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), - GatherQuery("ui_events", '{app="sim-steward", event="dashboard_ui_event"} | json', 300, 100), - ], - "flow_walk_driver": [ - GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), - GatherQuery("action_dispatched", '{app="sim-steward", event="action_dispatched"} | json', 300, 200), - GatherQuery("incidents", '{app="sim-steward", event="incident_detected"} | json', 300, 100), - ], - "flow_walk_session": [ - GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), - GatherQuery("action_dispatched", '{app="sim-steward", event="action_dispatched"} | json', 300, 200), - GatherQuery("incidents", '{app="sim-steward", event="incident_detected"} | json', 300, 100), - ], - "flow_capture_incident": [ - GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), - GatherQuery("action_dispatched", '{app="sim-steward", event="action_dispatched"} | json', 300, 100), - ], - "flow_transport_controls": [ - GatherQuery("action_results", '{app="sim-steward", event="action_result"} | json', 300, 200), - GatherQuery("action_dispatched", '{app="sim-steward", event="action_dispatched"} | json', 300, 100), - ], -} diff --git a/observability/local/log-sentinel/loki_client.py b/observability/local/log-sentinel/loki_client.py index ebbc357..7de2c9e 100644 --- a/observability/local/log-sentinel/loki_client.py +++ b/observability/local/log-sentinel/loki_client.py @@ -132,9 +132,10 @@ def push_investigation(self, investigation, env: str = "local"): self.push(entry, env) def push_cycle(self, cycle_data: dict, env: str = "local"): + anomaly_count = cycle_data.get("anomaly_count", cycle_data.get("finding_count", 0)) entry = { "level": "INFO", - "message": f"Cycle #{cycle_data['cycle_num']}: {cycle_data['finding_count']} findings, {cycle_data['escalated_count']} escalated", + "message": f"Cycle #{cycle_data['cycle_num']}: {anomaly_count} anomalies", "component": "log-sentinel", "event": "sentinel_cycle", "domain": "system", @@ -164,6 +165,56 @@ def push_t2_run(self, t2_data: dict, env: str = "local"): } self.push(entry, env) + def push_analyst_run(self, run_data: dict, env: str = "local"): + tier = run_data.get("tier", "t1") + entry = { + "level": "INFO", + "message": f"Analyst {tier}: model={run_data.get('model','?')} anomalies={run_data.get('anomaly_count', run_data.get('logql_queries_generated', '?'))} duration={run_data.get('duration_ms','?')}ms", + "component": "log-sentinel", + "event": "sentinel_analyst_run", + "domain": "system", + **run_data, + } + self.push(entry, env) + + def push_timeline(self, timeline_data: dict, env: str = "local"): + entry = { + "level": "INFO", + "message": f"Timeline: {timeline_data.get('event_count', 0)} events, {timeline_data.get('session_count', 0)} sessions", + "component": "log-sentinel", + "event": "sentinel_timeline_built", + "domain": "system", + **timeline_data, + } + self.push(entry, env) + + def push_investigation_v2(self, t2_result, anomalies: list, env: str = "local"): + from analyst import T2Result + entry = { + "level": "INFO", + "message": f"Investigation [{t2_result.confidence}]: {t2_result.root_cause[:120]}", + "component": "log-sentinel", + "event": "sentinel_investigation", + "domain": "system", + "anomaly_ids": [a.get("id", "") for a in anomalies if a.get("needs_t2")], + "root_cause": t2_result.root_cause, + "issue_type": t2_result.issue_type, + "confidence": t2_result.confidence, + "correlation": t2_result.correlation, + "impact": t2_result.impact, + "recommendation": t2_result.recommendation, + "logql_queries_used": t2_result.logql_queries_used, + "logql_gather_duration_ms": t2_result.logql_gather_duration_ms, + "inference_duration_ms": t2_result.inference_duration_ms, + "sentry_worthy": t2_result.sentry_worthy, + "model": t2_result.model, + } + self.push(entry, env) + + def annotate_raw(self, *args, **kwargs): + """Stub — annotate_raw is called on grafana_client, not loki_client.""" + pass + def push_sentry_event(self, sentry_data: dict, env: str = "local"): entry = { "level": "INFO", @@ -174,3 +225,106 @@ def push_sentry_event(self, sentry_data: dict, env: str = "local"): **sentry_data, } self.push(entry, env) + + # ── v3 push helpers ────────────────────────────────────────────────────── + + def push_evidence_packet(self, packet, env: str = "local"): + """Push sentinel_evidence_packet — T1's pre-assembled anomaly context.""" + entry = packet.to_loki_dict() + self.push(entry, env) + + def push_t2_investigation(self, t2_result, packet_dicts: list, env: str = "local"): + """Push sentinel_t2_investigation — T2's investigation result.""" + entry = { + "level": "INFO", + "message": f"T2 investigation [{t2_result.confidence}]: {t2_result.root_cause[:120]}", + "component": "log-sentinel", + "event": "sentinel_t2_investigation", + "domain": "system", + "root_cause": t2_result.root_cause, + "issue_type": t2_result.issue_type, + "confidence": t2_result.confidence, + "correlation": t2_result.correlation, + "impact": t2_result.impact, + "recommendation": t2_result.recommendation, + "sentry_worthy": t2_result.sentry_worthy, + "sentry_fingerprint": t2_result.sentry_fingerprint, + "sentry_event_id": t2_result.sentry_event_id or "", + "evidence_packet_count": t2_result.evidence_packet_count, + "anomaly_ids": [p.get("anomaly_id", "") for p in packet_dicts], + "logql_queries_used": t2_result.logql_queries_used, + "logql_gather_duration_ms": t2_result.logql_gather_duration_ms, + "inference_duration_ms": t2_result.inference_duration_ms, + "model": t2_result.model, + } + self.push(entry, env) + + def push_synthesis(self, t3_result, trigger: str = "scheduled", env: str = "local"): + """Push sentinel_synthesis — T3's period synthesis summary.""" + entry = { + "level": "INFO", + "message": f"T3 synthesis [{trigger}]: {t3_result.sessions_analyzed} sessions, " + f"{len(t3_result.recurring_patterns)} patterns", + "component": "log-sentinel", + "event": "sentinel_synthesis", + "domain": "system", + "trigger": trigger, + "period_summary": t3_result.period_summary[:500], + "sessions_analyzed": t3_result.sessions_analyzed, + "features_worked": t3_result.features_worked, + "features_failed": t3_result.features_failed, + "recurring_pattern_count": len(t3_result.recurring_patterns), + "regression_detected": t3_result.regression_detected, + "regression_detail": t3_result.regression_detail[:200], + "action_items": t3_result.action_items[:5], + "baselines_updated": t3_result.baselines_updated, + "threshold_recommendation_count": len(t3_result.threshold_recommendations), + "model": t3_result.model, + "inference_duration_ms": t3_result.inference_duration_ms, + } + self.push(entry, env) + + def push_narrative(self, narrative_dict: dict, env: str = "local"): + """Push sentinel_narrative — T3's per-session story.""" + entry = { + "level": "INFO", + "message": f"Session narrative: {narrative_dict.get('session_id', '?')[:12]}", + "component": "log-sentinel", + "event": "sentinel_narrative", + "domain": "system", + "session_id": narrative_dict.get("session_id", ""), + "narrative_text": narrative_dict.get("narrative_text", "")[:1000], + "features_worked": narrative_dict.get("features_worked", []), + "features_failed": narrative_dict.get("features_failed", []), + "invocation_count": narrative_dict.get("invocation_count", 0), + } + self.push(entry, env) + + def push_threshold_recommendation(self, rec: dict, env: str = "local"): + """Push sentinel_threshold_recommendation — T3's threshold calibration advice.""" + entry = { + "level": "INFO", + "message": ( + f"Threshold recommendation: {rec.get('alert', '?')} " + f"current={rec.get('current_threshold')} → suggested={rec.get('suggested_threshold')} " + f"({rec.get('direction', '?')})" + ), + "component": "log-sentinel", + "event": "sentinel_threshold_recommendation", + "domain": "system", + **rec, + } + self.push(entry, env) + + def push_trigger(self, alert_data: dict, env: str = "local"): + """Push sentinel_trigger — per T0 webhook alert received.""" + entry = { + "level": "INFO", + "message": f"Trigger: {alert_data.get('alertname', '?')} [{alert_data.get('trigger_tier', '?')}]", + "component": "log-sentinel", + "event": "sentinel_trigger", + "domain": "system", + "trigger_source": "grafana_alert", + **alert_data, + } + self.push(entry, env) diff --git a/observability/local/log-sentinel/models.py b/observability/local/log-sentinel/models.py deleted file mode 100644 index e815079..0000000 --- a/observability/local/log-sentinel/models.py +++ /dev/null @@ -1,100 +0,0 @@ -"""Data models for Log Sentinel findings and investigations.""" - -import hashlib -import uuid -from dataclasses import dataclass, field -from datetime import datetime, timezone - - -@dataclass -class TimeWindow: - start_ns: int - end_ns: int - duration_sec: int - - @classmethod - def from_now(cls, lookback_sec: int) -> "TimeWindow": - now_ms = int(datetime.now(timezone.utc).timestamp() * 1000) - end_ns = now_ms * 1_000_000 - start_ns = (now_ms - lookback_sec * 1000) * 1_000_000 - return cls(start_ns=start_ns, end_ns=end_ns, duration_sec=lookback_sec) - - -@dataclass -class Finding: - detector: str - severity: str # "info" | "warn" | "critical" - title: str - summary: str - category: str = "app" # "app" | "ops" - evidence: dict = field(default_factory=dict) - timestamp: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat()) - finding_id: str = field(default_factory=lambda: str(uuid.uuid4())) - escalate_to_t2: bool = False - flow_context: str = "" - logql_query: str = "" - - @property - def fingerprint(self) -> str: - """Stable hash for deduplication: same detector + title = same fingerprint.""" - raw = f"{self.detector}:{self.title}" - return hashlib.sha256(raw.encode()).hexdigest()[:16] - - -@dataclass -class Investigation: - finding: Finding - root_cause: str - correlation: str - impact: str - recommendation: str - confidence: str # "low" | "medium" | "high" - issue_type: str = "unknown" # "bug" | "config" | "performance" | "security" | "unknown" - trigger: str = "escalation" # "escalation" | "proactive" - raw_response: str = "" - model: str = "" - investigation_id: str = field(default_factory=lambda: str(uuid.uuid4())) - timestamp: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat()) - inference_duration_ms: int = 0 - gather_duration_ms: int = 0 - context_lines_gathered: int = 0 - - -@dataclass -class FlowStep: - id: str - event: str - label: str - filters: dict = field(default_factory=dict) - timeout_sec: int = 0 - optional: bool = False - next_steps: list = field(default_factory=list) - - -@dataclass -class FlowDefinition: - name: str - display_name: str - description: str - source_doc: str - steps: list - expected_completion_sec: int = 0 - gap_severity: str = "warn" - - -@dataclass -class FlowGap: - step: FlowStep - flow: FlowDefinition - description: str = "" - - -@dataclass -class FlowEvaluation: - flow: FlowDefinition - matched_steps: dict = field(default_factory=dict) - gaps: list = field(default_factory=list) - - @property - def complete(self) -> bool: - return len(self.gaps) == 0 diff --git a/observability/local/log-sentinel/narrative.py b/observability/local/log-sentinel/narrative.py new file mode 100644 index 0000000..72e3a9b --- /dev/null +++ b/observability/local/log-sentinel/narrative.py @@ -0,0 +1,214 @@ +"""Session narrative builder — used by T3 synthesis. + +Turns a set of FeatureInvocations + T1/T2 findings into a human-readable +per-session story that answers: "What was the user trying to do, did it work?" + +Output shape (returned as text block): + NARRATIVE: [] + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + <2-3 sentence prose of what happened> + + WORKED: · + FAILED: (error) + PATTERNS: + ACTION: +""" + +import logging +from datetime import datetime, timezone + +from trace import FeatureInvocation + +logger = logging.getLogger("sentinel.narrative") + + +class NarrativeBuilder: + def build( + self, + session_id: str, + invocations: list[FeatureInvocation], + anomaly_dicts: list[dict], + t2_investigation_dicts: list[dict], + pattern_matches: list[str] | None = None, + ) -> str: + """Build a narrative text block for a single session.""" + if not invocations: + return f"NARRATIVE: session={session_id}\n (no feature invocations recorded)" + + # Time range + start_ns = min(inv.start_ts_ns for inv in invocations) + end_ns = max(inv.end_ts_ns for inv in invocations) + start_dt = datetime.fromtimestamp(start_ns / 1e9, tz=timezone.utc) + end_dt = datetime.fromtimestamp(end_ns / 1e9, tz=timezone.utc) + date_str = start_dt.strftime("%Y-%m-%d") + time_range = f"{start_dt.strftime('%H:%M')}–{end_dt.strftime('%H:%M')}" + + # Classify worked vs failed + worked = [] + failed = [] + for inv in invocations: + if inv.success is False: + failed.append(inv) + elif inv.success is True: + worked.append(inv) + # success=None (unknown) counted as neither + + # Build prose summary + prose = self._build_prose(invocations, worked, failed, anomaly_dicts) + + # Recommendation from T2 investigations or anomalies + action = self._extract_action(t2_investigation_dicts, anomaly_dicts) + + # Pattern summary + patterns_text = "" + if pattern_matches: + patterns_text = " · ".join(pattern_matches[:3]) + elif _has_recurring_issue(anomaly_dicts): + patterns_text = f"{sum(1 for a in anomaly_dicts if a.get('severity') in ('warn', 'critical'))} anomalies flagged" + + # Assemble + sep = "━" * 48 + lines = [ + f"NARRATIVE: {date_str} {time_range} [{session_id[:12]}]", + sep, + "", + prose, + "", + ] + + if worked: + worked_str = " · ".join(_action_label(inv) for inv in _dedupe_by_type(worked)) + lines.append(f"WORKED: {worked_str}") + if failed: + failed_str = " · ".join( + f"{_action_label(inv)} ({(inv.error or 'error')[:40]})" + for inv in _dedupe_by_type(failed) + ) + lines.append(f"FAILED: {failed_str}") + if patterns_text: + lines.append(f"PATTERNS: {patterns_text}") + if action: + lines.append(f"ACTION: {action[:200]}") + + return "\n".join(lines) + + def build_all( + self, + invocations: list[FeatureInvocation], + anomaly_dicts: list[dict], + t2_investigation_dicts: list[dict], + ) -> list[dict]: + """Group invocations by session_id, build a narrative per session. + + Returns list of dicts with keys: session_id, narrative_text, features_worked, + features_failed, invocation_count. + """ + # Group invocations by session_id + sessions: dict[str, list[FeatureInvocation]] = {} + for inv in invocations: + sid = (inv.trigger_event.session_id if inv.trigger_event else None) or "no_session" + sessions.setdefault(sid, []).append(inv) + + results = [] + for sid, session_invocations in sessions.items(): + # Filter anomalies + investigations for this session + session_anomalies = _filter_for_session(anomaly_dicts, sid) + session_t2 = _filter_for_session(t2_investigation_dicts, sid) + + text = self.build( + session_id=sid, + invocations=session_invocations, + anomaly_dicts=session_anomalies, + t2_investigation_dicts=session_t2, + ) + worked = [inv.action_type for inv in session_invocations if inv.success is True] + failed = [inv.action_type for inv in session_invocations if inv.success is False] + + results.append({ + "session_id": sid, + "narrative_text": text, + "features_worked": list(dict.fromkeys(worked)), # dedupe, order-preserving + "features_failed": list(dict.fromkeys(failed)), + "invocation_count": len(session_invocations), + }) + + return results + + # ── Private ─────────────────────────────────────────────────────────────── + + def _build_prose( + self, + all_invocations: list[FeatureInvocation], + worked: list[FeatureInvocation], + failed: list[FeatureInvocation], + anomaly_dicts: list[dict], + ) -> str: + total = len(all_invocations) + worked_count = len(worked) + failed_count = len(failed) + + # Action type distribution + type_counts: dict[str, int] = {} + for inv in all_invocations: + type_counts[inv.action_type] = type_counts.get(inv.action_type, 0) + 1 + + top_types = sorted(type_counts.items(), key=lambda x: x[1], reverse=True)[:3] + type_str = ", ".join(f"{name} (×{n})" for name, n in top_types) + + health_str = ( + "All recorded actions completed successfully." + if failed_count == 0 + else f"{failed_count} of {total} action(s) failed." + ) + + anomaly_count = sum(1 for a in anomaly_dicts if a.get("severity") in ("warn", "critical")) + anomaly_str = f" {anomaly_count} anomaly flags were raised." if anomaly_count else "" + + return ( + f"{total} feature invocation(s) recorded: {type_str}. " + f"{health_str}{anomaly_str}" + ) + + def _extract_action( + self, + t2_dicts: list[dict], + anomaly_dicts: list[dict], + ) -> str: + # Prefer T2 recommendation if available + for t2 in t2_dicts: + rec = t2.get("recommendation", "") + if rec and rec not in ("Investigate manually.", ""): + return rec[:200] + # Fall back to critical anomaly hypothesis + for a in anomaly_dicts: + if a.get("severity") == "critical" and a.get("hypothesis"): + return a["hypothesis"][:200] + return "" + + +# ── Helpers ─────────────────────────────────────────────────────────────────── + +def _action_label(inv: FeatureInvocation) -> str: + return inv.action_type.replace("_", " ").replace("-", " ").lower() + + +def _dedupe_by_type(invocations: list[FeatureInvocation]) -> list[FeatureInvocation]: + seen: dict[str, FeatureInvocation] = {} + for inv in invocations: + seen.setdefault(inv.action_type, inv) + return list(seen.values()) + + +def _has_recurring_issue(anomaly_dicts: list[dict]) -> bool: + return any(a.get("severity") in ("warn", "critical") for a in anomaly_dicts) + + +def _filter_for_session(items: list[dict], session_id: str) -> list[dict]: + """Return items that mention this session_id, or all items if session is no_session.""" + if session_id == "no_session": + return items + return [ + item for item in items + if not item.get("session_id") or item.get("session_id") == session_id + ] diff --git a/observability/local/log-sentinel/ollama_client.py b/observability/local/log-sentinel/ollama_client.py new file mode 100644 index 0000000..4448a44 --- /dev/null +++ b/observability/local/log-sentinel/ollama_client.py @@ -0,0 +1,64 @@ +"""Ollama HTTP client with qwen3 /think and /no_think mode support.""" + +import re +import time +import logging + +import requests + +logger = logging.getLogger("sentinel.ollama") + +_THINK_STRIP = re.compile(r".*?", re.DOTALL) + + +class OllamaClient: + def __init__(self, base_url: str, timeout: int = 300): + self.base_url = base_url.rstrip("/") + self.timeout = timeout + + def generate( + self, + model: str, + prompt: str, + think: bool = False, + temperature: float = 0.1, + ) -> tuple[str, int]: + """ + Call Ollama /api/generate. Returns (response_text, duration_ms). + Prepends /think or /no_think for qwen3 models. + Strips ... blocks from output before returning. + Raises on failure so callers can handle via circuit breaker. + """ + mode_prefix = "/think\n" if think else "/no_think\n" + full_prompt = mode_prefix + prompt + + start = time.time() + resp = requests.post( + f"{self.base_url}/api/generate", + json={ + "model": model, + "prompt": full_prompt, + "stream": False, + "options": { + "temperature": temperature, + "num_predict": 2048, + }, + }, + timeout=self.timeout, + ) + duration_ms = int((time.time() - start) * 1000) + + if resp.status_code != 200: + raise RuntimeError(f"Ollama {resp.status_code}: {resp.text[:200]}") + + raw = resp.json().get("response", "") + cleaned = _THINK_STRIP.sub("", raw).strip() + return cleaned, duration_ms + + def is_available(self) -> bool: + """Quick availability check — HEAD /api/tags.""" + try: + resp = requests.get(f"{self.base_url}/api/tags", timeout=5) + return resp.status_code == 200 + except Exception: + return False diff --git a/observability/local/log-sentinel/prompts.py b/observability/local/log-sentinel/prompts.py new file mode 100644 index 0000000..08bfdb6 --- /dev/null +++ b/observability/local/log-sentinel/prompts.py @@ -0,0 +1,396 @@ +"""Prompt templates and structured output schemas for Log Sentinel v2/v3.""" + +# ── Stream descriptions injected into every prompt ────────────────────────── + +STREAM_DESCRIPTIONS = { + "sim-steward": ( + "SimHub plugin logs: iRacing session events, user actions (button clicks, " + "replay controls), WebSocket messages, incident detection, plugin lifecycle. " + "Key fields: event, domain, component, session_id, subsession_id." + ), + "claude-dev-logging": ( + "Claude Code AI agent logs: tool calls (Read, Write, Bash, etc.), " + "session lifecycle, subagent activity, MCP service calls, token snapshots. " + "Key fields: event, hook_type, tool_name, service, session_id, duration_ms." + ), + "claude-token-metrics": ( + "Claude Code session summaries: one entry per completed AI session. " + "Fields: total_input_tokens, total_output_tokens, cost_usd, model, effort, " + "assistant_turns, tool_use_count, session_id." + ), +} + +# ── T1 prompts ─────────────────────────────────────────────────────────────── + +T1_SYSTEM = """\ +You are a log analyst for a SimHub iRacing plugin system that integrates with an AI coding assistant. +You analyze structured JSON logs from three streams to identify what happened and what looks wrong. + +Stream guide: +{stream_guide} + +Always respond with valid JSON only. No markdown, no explanation outside the JSON object.\ +""" + +T1_SUMMARY_PROMPT = """\ +Analyze the following log activity from the past {window_minutes} minutes. + +LOG COUNTS (total lines per stream): +{counts} + +RECENT LOGS — sim-steward ({sim_steward_count} lines shown): +{sim_steward_sample} + +RECENT LOGS — claude-dev-logging ({claude_dev_count} lines shown): +{claude_dev_sample} + +RECENT LOGS — claude-token-metrics ({claude_token_count} lines shown): +{claude_token_sample} + +Respond with this JSON schema exactly: +{{ + "summary": "<2-3 sentence narrative of what happened this window>", + "cycle_notes": "" +}} +""" + +T1_ANOMALY_PROMPT = """\ +You have already summarized this window: +{summary} + +Now analyze the same logs for anomalies. Look for: +- Error spikes or unexpected ERROR/WARN levels +- Gaps in expected activity (e.g. session started but no actions followed) +- Unusual token costs or AI session patterns +- WebSocket disconnects, action failures, plugin crashes +- Anything that deviates from normal healthy operation + +LOG COUNTS: +{counts} + +RECENT LOGS — sim-steward: +{sim_steward_sample} + +RECENT LOGS — claude-dev-logging: +{claude_dev_sample} + +RECENT LOGS — claude-token-metrics: +{claude_token_sample} + +Respond with this JSON schema exactly: +{{ + "anomalies": [ + {{ + "id": "", + "stream": "", + "description": "", + "severity": "", + "needs_t2": , + "suggested_logql": "
" + }} + ] +}} + +Return an empty anomalies array if nothing looks wrong. Do not invent anomalies. +""" + +# ── T2 prompts ─────────────────────────────────────────────────────────────── + +T2_SYSTEM = """\ +You are a senior site reliability engineer investigating anomalies in a SimHub iRacing plugin system. +You have been given anomaly flags, a chronological event timeline, and raw log evidence from targeted queries. +Your job: determine root cause, identify cross-stream correlations, and provide concrete actionable recommendations. + +Stream guide: +{stream_guide} + +Always respond with valid JSON only. No markdown, no explanation outside the JSON object.\ +""" + +T2_INVESTIGATION_PROMPT = """\ +ANOMALIES TO INVESTIGATE: +{anomaly_descriptions} + +EVENT TIMELINE (past {window_minutes} minutes, chronological across all streams): +{timeline_text} + +TARGETED LOG QUERIES AND RESULTS: +{logql_results} + +Based on all of the above, respond with this JSON schema exactly: +{{ + "root_cause": "", + "issue_type": "", + "confidence": "", + "correlation": "", + "impact": "", + "recommendation": "", + "logql_queries_used": {logql_queries_list}, + "sentry_worthy": +}} +""" + +# ── LogQL generation prompt ────────────────────────────────────────────────── + +LOGQL_GEN_SYSTEM = """\ +You are a Loki LogQL expert. Generate precise LogQL queries to investigate anomalies. +Always respond with a valid JSON array of strings only. No explanation.\ +""" + +LOGQL_GEN_PROMPT = """\ +Generate up to 5 LogQL queries to investigate these anomalies: +{anomaly_descriptions} + +Available streams (use exact app label values): +- {{app="sim-steward"}} — plugin actions, iRacing events +- {{app="claude-dev-logging"}} — AI agent tool calls, lifecycle +- {{app="claude-token-metrics"}} — AI session token summaries + +Time window: past {window_minutes} minutes. + +Rules: +- Every query must start with {{ and contain at least one | +- Use | json to parse JSON log lines +- Use | level = "ERROR" or | event = "..." to filter +- Keep queries focused and specific to the anomalies + +Respond with a JSON array of strings: +["", "", ...] +""" + + +# ── Helper: build formatted stream guide ──────────────────────────────────── + +def build_stream_guide() -> str: + return "\n".join( + f" {app}: {desc}" for app, desc in STREAM_DESCRIPTIONS.items() + ) + + +# ── Helper: format log sample for prompt ──────────────────────────────────── + +def format_log_sample(lines: list[dict], max_lines: int = 30) -> str: + import json + if not lines: + return " (no logs in this window)" + shown = lines[-max_lines:] # most recent + return "\n".join(f" {json.dumps(line, default=str)}" for line in shown) + + +# ── Helper: format LogQL results for T2 prompt ────────────────────────────── + +def format_logql_results(results: dict[str, list[dict]]) -> str: + import json + if not results: + return " (no additional queries executed)" + sections = [] + for query, lines in results.items(): + if not lines: + sections.append(f"=== {query} ===\n (0 results)") + else: + formatted = "\n".join( + f" {json.dumps(line, default=str)}" for line in lines[:50] + ) + sections.append(f"=== {query} ===\n{formatted}") + return "\n\n".join(sections) + + +# ── v3: Feature invocation formatter ──────────────────────────────────────── + +def format_invocations(invocations, max_invocations: int = 15) -> str: + """Format FeatureInvocation list for injection into T1 prompt.""" + if not invocations: + return " (no feature invocations detected this window)" + + shown = invocations[:max_invocations] + lines = [] + for inv in shown: + status = "FAILED" if inv.success is False else ("OK" if inv.success else "?") + err = f" error={inv.error[:60]}" if inv.error else "" + lines.append( + f" [{status}] {inv.action_type} via {inv.correlation_method} " + f"({inv.duration_ms}ms, {len(inv.events)} events){err}" + ) + if len(invocations) > max_invocations: + lines.append(f" [... {len(invocations) - max_invocations} more invocations not shown]") + return "\n".join(lines) + + +def format_evidence_packets_for_t2(packet_dicts: list[dict]) -> str: + """Format Loki-serialized evidence packet metadata for T2 prompt.""" + if not packet_dicts: + return " (no evidence packets available)" + lines = [] + for p in packet_dicts: + lines.append( + f" [{p.get('severity', '?').upper()}] anomaly_id={p.get('anomaly_id', '?')} " + f"stream={p.get('detector_stream', '?')}" + ) + lines.append(f" {p.get('anomaly_description', '')[:120]}") + if p.get("t1_hypothesis"): + lines.append(f" T1 hypothesis: {p['t1_hypothesis'][:120]}") + lines.append( + f" confidence={p.get('t1_confidence', 0):.0%} " + f"related_lines={p.get('related_lines_count', 0)} " + f"invocations={p.get('invocation_count', 0)}" + ) + if p.get("suggested_logql"): + lines.append(f" suggested_logql: {p['suggested_logql'][:120]}") + lines.append("") + return "\n".join(lines) + + +# ── v3: T1 anomaly prompt with invocations + baseline context ──────────────── + +T1_ANOMALY_PROMPT_V3 = """\ +You have already summarized this window: +{summary} + +FEATURE INVOCATIONS (user actions traced end-to-end this window): +{invocations_text} + +BASELINE CONTEXT (historical normal values — use to judge what is anomalous): +{baseline_context} + +Now analyze the logs for anomalies. Look for: +- Error spikes or unexpected ERROR/WARN levels +- Failed feature invocations (action_type FAILED) +- Gaps in expected activity (e.g. session started but no actions followed) +- Unusual token costs or AI session patterns +- WebSocket disconnects, action failures, plugin crashes +- Metrics exceeding baselines by 3x or more +- Anything deviating from historical normal operation + +LOG COUNTS: +{counts} + +RECENT LOGS — sim-steward: +{sim_steward_sample} + +RECENT LOGS — claude-dev-logging: +{claude_dev_sample} + +RECENT LOGS — claude-token-metrics: +{claude_token_sample} + +Respond with this JSON schema exactly: +{{ + "anomalies": [ + {{ + "id": "", + "stream": "", + "event_type": "", + "description": "", + "severity": "", + "needs_t2": , + "hypothesis": "", + "confidence": <0.0 to 1.0>, + "trace_id": "", + "suggested_logql": "" + }} + ] +}} + +Return an empty anomalies array if nothing looks wrong. Do not invent anomalies. +""" + + +# ── v3: T2 evidence-packet prompts ────────────────────────────────────────── + +T2_EVIDENCE_SYSTEM = """\ +You are a senior site reliability engineer investigating anomalies in a SimHub iRacing plugin system. +You have been given pre-assembled evidence packets from T1 fast triage, plus relevant Sentry history. +Your job: validate T1 hypotheses, determine root cause, identify cross-stream correlations, and provide +concrete actionable recommendations. + +Stream guide: +{stream_guide} + +Always respond with valid JSON only. No markdown, no explanation outside the JSON object.\ +""" + +T2_EVIDENCE_PROMPT = """\ +EVIDENCE PACKETS FROM T1 TRIAGE: +{evidence_text} + +SENTRY HISTORY (existing issues matching these anomaly signatures): +{sentry_context} + +ADDITIONAL LOG EVIDENCE (from targeted LogQL queries): +{logql_results} + +Based on all of the above, respond with this JSON schema exactly: +{{ + "root_cause": "", + "issue_type": "", + "confidence": "", + "correlation": "", + "impact": "", + "recommendation": "", + "sentry_worthy": , + "sentry_fingerprint": "", + "logql_queries_used": [] +}} +""" + + +# ── v3: T3 synthesis prompts ───────────────────────────────────────────────── + +T3_SYSTEM = """\ +You are a systems analyst synthesizing log data, anomaly findings, and Sentry history +for a SimHub iRacing plugin with an integrated AI coding assistant. + +Your goal: answer "What was the user trying to do, and did it work?" +Produce a human-readable synthesis covering sessions, patterns, costs, regressions, and health. + +Stream guide: +{stream_guide} + +Always respond with valid JSON only. No markdown, no explanation outside the JSON object.\ +""" + +T3_SYNTHESIS_PROMPT = """\ +SYNTHESIS WINDOW: {window_description} +MODE: {mode} + +T1 EVIDENCE PACKETS (anomalies found this period): +{evidence_summary} + +T2 INVESTIGATIONS (deep findings this period): +{investigation_summary} + +OPEN SENTRY ISSUES: +{sentry_issues} + +RECENT RELEASES: +{recent_releases} + +SESSION NARRATIVES: +{session_narratives} + +Respond with this JSON schema exactly: +{{ + "period_summary": "<2-3 sentence overview of the period>", + "sessions_analyzed": , + "features_worked": ["", ...], + "features_failed": ["", ...], + "recurring_patterns": [ + {{ + "pattern": "", + "occurrences": , + "first_seen": "", + "recommendation": "" + }} + ], + "cost_summary": {{ + "sessions": , + "total_usd": , + "mean_per_session_usd": , + "trend": "" + }}, + "regression_detected": , + "regression_detail": "", + "action_items": ["", ""], + "baselines_need_update": +}} +""" diff --git a/observability/local/log-sentinel/query_cache.py b/observability/local/log-sentinel/query_cache.py deleted file mode 100644 index 2a750e4..0000000 --- a/observability/local/log-sentinel/query_cache.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Shared Loki query cache — run common queries once per cycle, share results across detectors.""" - -import logging -import time - -from loki_client import LokiClient -from models import TimeWindow - -logger = logging.getLogger("sentinel.cache") - -# Predefined query keys. Each maps to a LogQL query and the app stream it targets. -QUERIES = { - # sim-steward (app detectors) - "ss_all": '{app="sim-steward"} | json', - "ss_errors": '{app="sim-steward", level="ERROR"} | json', - "ss_actions": '{app="sim-steward", event="action_result"} | json', - "ss_lifecycle": '{app="sim-steward"} | json | event=~"plugin_started|plugin_ready|plugin_stopped|iracing_connected|iracing_disconnected|bridge_starting|bridge_start_failed|deploy_marker"', - "ss_ws": '{app="sim-steward"} | json | event=~"ws_client_connected|ws_client_disconnected|ws_client_rejected|bridge_start_failed"', - "ss_incidents": '{app="sim-steward", event="incident_detected"} | json', - "ss_digests": '{app="sim-steward", event="session_digest"} | json', - "ss_resources": '{app="sim-steward", event="host_resource_sample"} | json', - # claude-dev-logging (ops detectors) - "claude_all": '{app="claude-dev-logging"} | json', - "claude_lifecycle": '{app="claude-dev-logging", component="lifecycle"} | json', - "claude_tools": '{app="claude-dev-logging", component=~"tool|mcp-.*"} | json', - "claude_agents": '{app="claude-dev-logging", component="agent"} | json', - "claude_errors": '{app="claude-dev-logging", level="ERROR"} | json', - "claude_tokens": '{app="claude-token-metrics"} | json', - # sentinel self-monitoring - "sentinel_findings": '{app="sim-steward", component="log-sentinel", event="sentinel_finding"} | json', - "sentinel_cycles": '{app="sim-steward", component="log-sentinel", event="sentinel_cycle"} | json', - "sentinel_t2": '{app="sim-steward", component="log-sentinel", event="sentinel_t2_run"} | json', -} - - -class CycleQueryCache: - """Runs all predefined queries once, caches results for detector access.""" - - def __init__(self, loki: LokiClient): - self.loki = loki - self._cache: dict[str, list[dict]] = {} - self._durations: dict[str, int] = {} - - def populate(self, window: TimeWindow, keys: list[str] | None = None): - """Run queries and cache results. If keys=None, run all.""" - target_keys = keys or list(QUERIES.keys()) - self._cache.clear() - self._durations.clear() - - for key in target_keys: - logql = QUERIES.get(key) - if not logql: - continue - start = time.time() - try: - lines = self.loki.query_lines(logql, window.start_ns, window.end_ns, limit=1000) - self._cache[key] = lines - except Exception as e: - logger.warning("Cache query '%s' failed: %s", key, e) - self._cache[key] = [] - self._durations[key] = int((time.time() - start) * 1000) - - total = sum(len(v) for v in self._cache.values()) - logger.info( - "Cache populated: %d queries, %d total lines, %dms", - len(self._cache), total, sum(self._durations.values()), - ) - - def get(self, key: str) -> list[dict]: - """Get cached results for a query key. Returns empty list if not cached.""" - return self._cache.get(key, []) - - def get_by_severity(self, key: str) -> dict[str, list[dict]]: - """Get cached results grouped by level: errors first, then warnings, then info.""" - lines = self.get(key) - grouped = {"ERROR": [], "WARN": [], "INFO": [], "DEBUG": []} - for line in lines: - level = (line.get("level") or "INFO").upper() - grouped.setdefault(level, []).append(line) - return grouped - - def filter(self, key: str, **field_filters) -> list[dict]: - """Filter cached results by field values.""" - lines = self.get(key) - results = [] - for line in lines: - fields = line.get("fields", {}) - match = all( - fields.get(k) == v or line.get(k) == v - for k, v in field_filters.items() - ) - if match: - results.append(line) - return results - - @property - def stats(self) -> dict: - return { - "queries": len(self._cache), - "total_lines": sum(len(v) for v in self._cache.values()), - "durations": self._durations, - } diff --git a/observability/local/log-sentinel/requirements.txt b/observability/local/log-sentinel/requirements.txt index c5a68ee..e8f6ece 100644 --- a/observability/local/log-sentinel/requirements.txt +++ b/observability/local/log-sentinel/requirements.txt @@ -1,6 +1,5 @@ flask>=3.0.0 requests>=2.31.0 -pyyaml>=6.0.1 schedule>=1.2.0 sentry-sdk>=2.0.0 pytest>=8.0.0 diff --git a/observability/local/log-sentinel/sentinel.py b/observability/local/log-sentinel/sentinel.py index 6fe0c78..55df187 100644 --- a/observability/local/log-sentinel/sentinel.py +++ b/observability/local/log-sentinel/sentinel.py @@ -1,377 +1,326 @@ -"""Log Sentinel main loop — parallel detectors, async T2, dedup, circuit breakers, 100% logging.""" +"""Log Sentinel v3 — main cycle orchestrator.""" import logging -import queue -import threading import time import uuid -from concurrent.futures import ThreadPoolExecutor, as_completed +from dataclasses import dataclass import schedule +from baseline import BaselineManager from circuit_breaker import CircuitBreaker from config import Config +from evidence import EvidenceBuilder from grafana_client import GrafanaClient from loki_client import LokiClient -from models import Finding, TimeWindow -from query_cache import CycleQueryCache +from ollama_client import OllamaClient from sentry_client import SentryClient - -from detectors.error_spike import ErrorSpikeDetector -from detectors.action_failure import ActionFailureDetector -from detectors.websocket_health import WebSocketHealthDetector -from detectors.silent_session import SilentSessionDetector -from detectors.flow_gap import FlowGapDetector -from detectors.stuck_user import StuckUserDetector -from detectors.incident_anomaly import IncidentAnomalyDetector -from detectors.plugin_lifecycle import PluginLifecycleDetector -from detectors.resource_health import ResourceHealthDetector -from detectors.session_quality import SessionQualityDetector -from detectors.claude_session import ClaudeSessionDetector -from detectors.mcp_health import McpHealthDetector -from detectors.agent_loop import AgentLoopDetector -from detectors.tool_patterns import ToolPatternsDetector -from detectors.token_usage import TokenUsageDetector -from detectors.sentinel_health import SentinelHealthDetector - -from flows.engine import FlowEngine -from investigator.chain import InvestigationChain +from t1_agent import T1Agent, T1Result +from t2_agent import T2Agent, T2Result +from t3_agent import T3Agent +from timeline import TimelineBuilder +from trace import InvocationBuilder logger = logging.getLogger("sentinel") +@dataclass +class CycleResult: + cycle_id: str + cycle_num: int + window_minutes: int + t1: T1Result | None + timeline_event_count: int + anomaly_count: int + duration_ms: int + error: str | None = None + + class Sentinel: def __init__(self, config: Config): self.config = config + self.loki = LokiClient(config.loki_url) + self.ollama = OllamaClient(config.ollama_url) self.grafana = GrafanaClient(config.grafana_url, config.grafana_user, config.grafana_password) self.sentry = SentryClient(config.sentry_dsn, config.env_label) - self.cache = CycleQueryCache(self.loki) - self.flow_engine = FlowEngine("flows/definitions") - # Circuit breakers self.loki_breaker = CircuitBreaker("loki", failure_threshold=3, backoff_sec=60) self.ollama_breaker = CircuitBreaker("ollama", failure_threshold=3, backoff_sec=120) - # In-memory stats for sentinel_health detector (avoids circular Loki query) + self.baseline = BaselineManager(self.loki, config.baseline_path) + self.evidence_builder = EvidenceBuilder(self.loki) + self.invocation_builder = InvocationBuilder() + self.timeline_builder = TimelineBuilder(self.loki, self.loki_breaker) + + self.t1_agent = T1Agent( + self.ollama, self.loki, self.ollama_breaker, config, + self.baseline, self.evidence_builder, + ) + self.t2_agent = T2Agent( + self.ollama, self.loki, self.grafana, self.sentry, + self.ollama_breaker, config, + ) + self.t3_agent = T3Agent( + self.ollama, self.loki, self.grafana, self.sentry, + self.baseline, self.ollama_breaker, config, + ) + + self._cycle_num = 0 + self._trigger_dedup: dict[str, float] = {} # alertname → last trigger time.time() self._stats = { - "last_cycle_duration_ms": 0, - "consecutive_detector_errors": 0, - "last_t2_duration_ms": 0, - "t2_queue_size": 0, "cycles_completed": 0, + "total_anomalies": 0, + "last_cycle_duration_ms": 0, + "last_t1_duration_ms": 0, + "last_t2_run_ts": 0, + "last_t3_run_ts": 0, } - # Detectors — app category - self.detectors = [ - ErrorSpikeDetector(), - ActionFailureDetector(), - WebSocketHealthDetector(), - SilentSessionDetector(), - FlowGapDetector(self.flow_engine), - StuckUserDetector(), - IncidentAnomalyDetector(), - PluginLifecycleDetector(), - ResourceHealthDetector(), - SessionQualityDetector(), - # ops category - ClaudeSessionDetector(), - McpHealthDetector(), - AgentLoopDetector(), - ToolPatternsDetector(), - TokenUsageDetector(), - SentinelHealthDetector(self._stats), - ] - - # T2 investigator - self.investigator = None - self._t2_queue: queue.Queue = queue.Queue() - if config.t2_enabled: - self.investigator = InvestigationChain( - ollama_url=config.ollama_url, - model_fast=config.ollama_model_fast, - model_deep=config.ollama_model_deep, - loki=self.loki, - ) + # ── Public ─────────────────────────────────────────────────────────────── - # Dedup: fingerprint → last_seen_timestamp - self._seen_fingerprints: dict[str, float] = {} - # T2 dedup: fingerprint → last_investigated_time - self._investigated_fingerprints: dict[str, float] = {} - self._proactive_hash: str = "" - - self._cycle_count = 0 - - # ── T1 Cycle ── + def start(self): + """Blocking schedule loop.""" + logger.info( + "Sentinel v3 started: mode=%s t1=%ds t2=%ds t3=%ds fast=%s deep=%s", + self.config.sentinel_mode, + self.config.t1_interval_sec, + self.config.t2_interval_sec, + self.config.t3_interval_sec, + self.config.ollama_model_fast, + self.config.ollama_model_deep, + ) + self.run_cycle() + schedule.every(self.config.t1_interval_sec).seconds.do(self.run_cycle) + schedule.every(self.config.t2_interval_sec).seconds.do(self.run_t2_cycle) + schedule.every(self.config.t3_interval_sec).seconds.do(self.run_t3_cycle) + while True: + schedule.run_pending() + time.sleep(1) - def run_cycle(self): - """Single T1 detection cycle with parallel execution and 100% logging.""" + def run_cycle(self) -> CycleResult: + """T1 analysis cycle. Always returns CycleResult.""" + self._cycle_num += 1 cycle_id = str(uuid.uuid4())[:8] - self._cycle_count += 1 cycle_start = time.time() - window = TimeWindow.from_now(self.config.lookback_sec) + end_ns = self.loki.now_ns() + start_ns = end_ns - int(self.config.lookback_sec * 1e9) + window_minutes = max(1, self.config.lookback_sec // 60) - # Populate shared query cache (one Loki call per query key) - if not self.loki_breaker.allow_request(): - logger.warning("Cycle #%d skipped: Loki circuit open", self._cycle_count) - return + logger.info("Cycle #%d [%s] start: window=%dmin", self._cycle_num, cycle_id, window_minutes) + + t1 = None + timeline_events = [] + error = None try: - self.cache.populate(window) - self.loki_breaker.record_success() - except Exception as e: - self.loki_breaker.record_failure() - logger.error("Cache populate failed: %s", e) - return + # 1. Gather + counts, samples = self._gather(start_ns, end_ns) + + # 2. Build timeline + invocations + timeline_events = self.timeline_builder.build(start_ns, end_ns) + tl_stats = self.timeline_builder.get_stats(timeline_events) + self.loki.push_timeline({ + **tl_stats, + "cycle_id": cycle_id, + "truncated": tl_stats["event_count"] > 60, + }, self.config.env_label) - # Run all detectors in parallel - all_findings: list[Finding] = [] - detector_errors = 0 - - with ThreadPoolExecutor(max_workers=4) as pool: - futures = { - pool.submit(self._run_detector, det, cycle_id): det - for det in self.detectors - } - for future in as_completed(futures): - det = futures[future] - try: - findings = future.result() - all_findings.extend(findings) - except Exception as e: - detector_errors += 1 - logger.error("Detector %s failed: %s", det.name, e) - - # Update stats for sentinel_health - self._stats["consecutive_detector_errors"] = ( - self._stats["consecutive_detector_errors"] + detector_errors - if detector_errors > 0 else 0 - ) + invocations = self.invocation_builder.build(timeline_events) + + # 3. T1 analysis + if not self.ollama_breaker.allow_request(): + logger.warning("T1 skipped: Ollama circuit open") + else: + t1 = self.t1_agent.run( + start_ns, end_ns, counts, + samples["sim-steward"], + samples["claude-dev-logging"], + samples["claude-token-metrics"], + invocations=invocations, + trigger_source="scheduled", + ) + self.loki.push_analyst_run({ + "cycle_id": cycle_id, + "tier": "t1", + "model": t1.model, + "think_mode": True, + "duration_ms": t1.total_duration_ms, + "summary_duration_ms": t1.summary_duration_ms, + "anomaly_duration_ms": t1.anomaly_duration_ms, + "anomaly_count": len(t1.anomalies), + "needs_t2_count": sum(1 for a in t1.anomalies if a.get("needs_t2")), + "evidence_packet_count": len(t1.evidence_packets), + "invocation_count": len(t1.invocations), + "window_minutes": window_minutes, + "trigger_source": t1.trigger_source, + }, self.config.env_label) - # Dedup and process findings — priority order: critical, warn, info - all_findings.sort(key=lambda f: {"critical": 0, "warn": 1, "info": 2}.get(f.severity, 3)) - - escalated = 0 - suppressed = 0 - for finding in all_findings: - if self._is_duplicate(finding): - suppressed += 1 - continue - - self.loki.push_finding(finding, self.config.env_label) - - if finding.severity in ("warn", "critical"): - self.grafana.annotate(finding) - - # Critical findings → Sentry immediately - if finding.severity == "critical": - event_id = self.sentry.create_issue(finding) - if event_id: - self.loki.push_sentry_event({ - "finding_id": finding.finding_id, - "sentry_event_id": event_id, - "title": finding.title, - "level": "error", - }, self.config.env_label) - - # Escalate to T2 (non-blocking, with dedup) - if finding.escalate_to_t2 and self.investigator: - fp = finding.fingerprint - last_inv = self._investigated_fingerprints.get(fp, 0) - if time.time() - last_inv < 900: # 15 min T2 dedup window - logger.debug("T2 dedup: skipping %s (investigated %ds ago)", fp[:8], int(time.time() - last_inv)) - else: - self._investigated_fingerprints[fp] = time.time() - escalated += 1 - self._t2_queue.put(("escalation", finding)) - - # Emit cycle metrics - cycle_duration_ms = int((time.time() - cycle_start) * 1000) - self._stats["last_cycle_duration_ms"] = cycle_duration_ms - self._stats["cycles_completed"] = self._cycle_count - self._stats["t2_queue_size"] = self._t2_queue.qsize() - - app_findings = sum(1 for f in all_findings if f.category == "app" and not self._is_duplicate(f)) - ops_findings = sum(1 for f in all_findings if f.category == "ops" and not self._is_duplicate(f)) + except Exception as e: + error = str(e) + logger.error("Cycle #%d error: %s", self._cycle_num, e) + + duration_ms = int((time.time() - cycle_start) * 1000) + result = CycleResult( + cycle_id=cycle_id, + cycle_num=self._cycle_num, + window_minutes=window_minutes, + t1=t1, + timeline_event_count=len(timeline_events), + anomaly_count=len(t1.anomalies) if t1 else 0, + duration_ms=duration_ms, + error=error, + ) self.loki.push_cycle({ "cycle_id": cycle_id, - "cycle_num": self._cycle_count, - "duration_ms": cycle_duration_ms, - "finding_count": len(all_findings) - suppressed, - "suppressed_count": suppressed, - "escalated_count": escalated, - "error_count": detector_errors, - "app_findings": app_findings, - "ops_findings": ops_findings, - "cache_queries": self.cache.stats["queries"], - "cache_lines": self.cache.stats["total_lines"], + "cycle_num": self._cycle_num, + "window_minutes": window_minutes, + "t1_duration_ms": t1.total_duration_ms if t1 else 0, + "anomaly_count": result.anomaly_count, + "evidence_packet_count": len(t1.evidence_packets) if t1 else 0, + "timeline_event_count": len(timeline_events), + "total_duration_ms": duration_ms, + "error": error, }, self.config.env_label) + self._stats["cycles_completed"] = self._cycle_num + self._stats["last_cycle_duration_ms"] = duration_ms + self._stats["last_t1_duration_ms"] = t1.total_duration_ms if t1 else 0 + if t1: + self._stats["total_anomalies"] += result.anomaly_count + logger.info( - "Cycle #%d: %d findings (%d suppressed), %d escalated, %d errors, %dms", - self._cycle_count, len(all_findings) - suppressed, suppressed, - escalated, detector_errors, cycle_duration_ms, + "Cycle #%d complete: %d anomalies %d evidence_packets %dms", + self._cycle_num, result.anomaly_count, + len(t1.evidence_packets) if t1 else 0, duration_ms, ) + return result - def _run_detector(self, detector, cycle_id: str) -> list[Finding]: - """Run a single detector with timing and logging.""" - start = time.time() - error_msg = None - findings = [] + def run_t2_cycle(self) -> None: + """Independent T2 investigation cycle — pulls evidence packets from Loki.""" + if not self.ollama_breaker.allow_request(): + logger.warning("T2 cycle skipped: Ollama circuit open") + return + logger.info("T2 cycle starting") try: - findings = detector.detect(self.cache) + result = self.t2_agent.run() + self._stats["last_t2_run_ts"] = int(time.time()) + if result: + logger.info( + "T2 cycle complete: confidence=%s sentry=%s %dms", + result.confidence, result.sentry_worthy, result.total_duration_ms, + ) except Exception as e: - error_msg = str(e) - raise - finally: - duration_ms = int((time.time() - start) * 1000) - self.loki.push_detector_run({ - "cycle_id": cycle_id, - "detector": detector.name, - "category": detector.category, - "duration_ms": duration_ms, - "finding_count": len(findings), - "error": error_msg, - }, self.config.env_label) - return findings - - # ── Dedup ── - - def _is_duplicate(self, finding: Finding) -> bool: - fp = finding.fingerprint - now = time.time() - last_seen = self._seen_fingerprints.get(fp) - if last_seen and (now - last_seen) < self.config.dedup_window_sec: - return True - self._seen_fingerprints[fp] = now - # Clean old entries - cutoff = now - self.config.dedup_window_sec * 2 - self._seen_fingerprints = { - k: v for k, v in self._seen_fingerprints.items() if v > cutoff - } - return False - - # ── T2 Background Thread ── - - def _t2_worker(self): - """Background thread that processes T2 investigations from the queue.""" - logger.info("T2 worker started") - while True: - try: - trigger, payload = self._t2_queue.get(timeout=5) - except queue.Empty: - continue + logger.error("T2 cycle error: %s", e) - if not self.ollama_breaker.allow_request(): - logger.warning("T2 skipped: Ollama circuit open") - self._t2_queue.task_done() - continue + def run_t3_cycle(self) -> None: + """Independent T3 synthesis cycle — runs on slow cadence.""" + if not self.ollama_breaker.allow_request(): + logger.warning("T3 cycle skipped: Ollama circuit open") + return + logger.info("T3 cycle starting (mode=%s)", self.config.sentinel_mode) + try: + result = self.t3_agent.run(trigger="scheduled") + self._stats["last_t3_run_ts"] = int(time.time()) + logger.info( + "T3 cycle complete: %d sessions, regression=%s, %dms", + result.sessions_analyzed, result.regression_detected, result.inference_duration_ms, + ) + except Exception as e: + logger.error("T3 cycle error: %s", e) + + def trigger_cycle( + self, + alert_context: str, + trigger_tier: str, + alert_names: list[str], + lookback_sec: int = 1800, + ) -> None: + """Alert-driven cycle — called from /trigger webhook, runs in background thread.""" + logger.info( + "Trigger cycle: tier=%s alerts=%s lookback=%ds", + trigger_tier, alert_names, lookback_sec, + ) + end_ns = self.loki.now_ns() + start_ns = end_ns - lookback_sec * 1_000_000_000 - try: - if trigger == "escalation": - investigation = self.investigator.investigate(payload) - elif trigger == "proactive": - investigation = self.investigator.investigate_patterns(payload) - else: - self._t2_queue.task_done() - continue - - self.ollama_breaker.record_success() - - # Push results - self.loki.push_investigation(investigation, self.config.env_label) - self.grafana.annotate_investigation(investigation) - self.loki.push_t2_run({ - "investigation_id": investigation.investigation_id, - "finding_id": investigation.finding.finding_id if trigger == "escalation" else "proactive", - "trigger": trigger, - "tier": f"t2_{'deep' if investigation.model == self.config.ollama_model_deep else 'fast'}", - "model": investigation.model, - "gather_duration_ms": investigation.gather_duration_ms, - "inference_duration_ms": investigation.inference_duration_ms, - "total_duration_ms": investigation.gather_duration_ms + investigation.inference_duration_ms, - "context_lines": investigation.context_lines_gathered, - "confidence": investigation.confidence, - "issue_type": investigation.issue_type, - "escalated_to_deep": investigation.model == self.config.ollama_model_deep, - }, self.config.env_label) + try: + counts, samples = self._gather(start_ns, end_ns) + timeline_events = self.timeline_builder.build(start_ns, end_ns) + invocations = self.invocation_builder.build(timeline_events) + except Exception as e: + logger.error("Trigger cycle gather failed: %s", e) + return - # T2 investigations → Sentry - sentry_id = self.sentry.create_investigation_issue(investigation) - if sentry_id: - self.loki.push_sentry_event({ - "investigation_id": investigation.investigation_id, - "sentry_event_id": sentry_id, - "title": investigation.root_cause[:100], - "level": "error" if investigation.finding.severity == "critical" else "warning", - }, self.config.env_label) + if not self.ollama_breaker.allow_request(): + logger.warning("Trigger cycle T1 skipped: Ollama circuit open") + return - logger.info( - "T2 complete [%s]: %s confidence=%s model=%s type=%s", - trigger, investigation.investigation_id[:8], - investigation.confidence, investigation.model, investigation.issue_type, - ) - self._stats["last_t2_duration_ms"] = investigation.gather_duration_ms + investigation.inference_duration_ms + t1 = None + try: + t1 = self.t1_agent.run( + start_ns, end_ns, counts, + samples["sim-steward"], + samples["claude-dev-logging"], + samples["claude-token-metrics"], + invocations=invocations, + alert_context=alert_context, + trigger_source="grafana_alert", + alert_names=alert_names, + ) + logger.info( + "Trigger T1 complete: %d anomalies, %d evidence_packets, %dms", + len(t1.anomalies), len(t1.evidence_packets), t1.total_duration_ms, + ) + except Exception as e: + logger.error("Trigger cycle T1 failed: %s", e) - except Exception as e: - self.ollama_breaker.record_failure() - logger.error("T2 investigation failed: %s", e) - finally: - self._t2_queue.task_done() - self._stats["t2_queue_size"] = self._t2_queue.qsize() - - def _t2_proactive_poll(self): - """Periodically query L1 findings and ask T2 to analyze patterns.""" - import hashlib - if not self.investigator: - return - window = TimeWindow.from_now(self.config.t2_proactive_interval_sec) - findings = self.loki.query_lines( - '{app="sim-steward", component="log-sentinel", event="sentinel_finding"} | json', - window.start_ns, window.end_ns, limit=100, - ) - if len(findings) >= 3: - # Dedup: skip if same finding set as last poll - fps = sorted(set(f.get("fingerprint", "") for f in findings)) - set_hash = hashlib.sha256("|".join(fps).encode()).hexdigest()[:16] - if set_hash == self._proactive_hash: - logger.debug("T2 proactive dedup: same finding set, skipping") + # For t2-tier alerts, skip needs_t2 gate — escalate immediately + if trigger_tier == "t2" and self.config.t2_enabled: + if not self.ollama_breaker.allow_request(): + logger.warning("Trigger cycle T2 skipped: Ollama circuit open") return - self._proactive_hash = set_hash - logger.info("T2 proactive: analyzing %d recent findings", len(findings)) - self._t2_queue.put(("proactive", findings)) + try: + forced_ids = [ep.anomaly_id for ep in t1.evidence_packets] if t1 else None + result = self.t2_agent.run(forced_packet_ids=forced_ids) + self._stats["last_t2_run_ts"] = int(time.time()) + if result: + logger.info( + "Trigger T2 complete: confidence=%s sentry=%s %dms", + result.confidence, result.sentry_worthy, result.total_duration_ms, + ) + except Exception as e: + logger.error("Trigger cycle T2 failed: %s", e) - # ── Lifecycle ── + # ── Private ────────────────────────────────────────────────────────────── - def start(self): - """Start all loops.""" - logger.info( - "Sentinel v2 started: %d detectors (app=%d ops=%d), poll %ds, lookback %ds, T2 %s, models: fast=%s deep=%s", - len(self.detectors), - sum(1 for d in self.detectors if d.category == "app"), - sum(1 for d in self.detectors if d.category == "ops"), - self.config.poll_interval_sec, - self.config.lookback_sec, - "enabled" if self.investigator else "disabled", - self.config.ollama_model_fast, - self.config.ollama_model_deep, - ) + def _gather(self, start_ns: int, end_ns: int) -> tuple[dict, dict]: + """Fetch counts and samples from all three Loki streams.""" + stream_queries = { + "sim-steward": '{app="sim-steward"} | json', + "claude-dev-logging": '{app="claude-dev-logging"} | json', + "claude-token-metrics": '{app="claude-token-metrics"} | json', + } - # Start T2 background worker - if self.investigator: - t2_thread = threading.Thread(target=self._t2_worker, daemon=True) - t2_thread.start() + counts = {} + samples = {} - # Run first cycle immediately - self.run_cycle() + if not self.loki_breaker.allow_request(): + logger.warning("Gather skipped: Loki circuit open") + return {k: 0 for k in stream_queries}, {k: [] for k in stream_queries} - # Schedule recurring - schedule.every(self.config.poll_interval_sec).seconds.do(self.run_cycle) - if self.investigator: - schedule.every(self.config.t2_proactive_interval_sec).seconds.do(self._t2_proactive_poll) + try: + for name, logql in stream_queries.items(): + counts[name] = self.loki.count(logql, start_ns, end_ns) + samples[name] = self.loki.query_lines(logql, start_ns, end_ns, limit=100) + self.loki_breaker.record_success() + except Exception as e: + self.loki_breaker.record_failure() + logger.error("Gather failed: %s", e) + for name in stream_queries: + counts.setdefault(name, -1) + samples.setdefault(name, []) - while True: - schedule.run_pending() - time.sleep(1) + return counts, samples diff --git a/observability/local/log-sentinel/sentry_client.py b/observability/local/log-sentinel/sentry_client.py index 607e285..1fc98ed 100644 --- a/observability/local/log-sentinel/sentry_client.py +++ b/observability/local/log-sentinel/sentry_client.py @@ -1,6 +1,14 @@ -"""Sentry SDK wrapper — create issues for critical findings and T2 investigations.""" +"""Sentry SDK wrapper — create issues, read history, and capture behavioral findings. + +v3 additions: + - traces_sample_rate bumped to 1.0 (enable transactions) + - search_issues() — REST API read for T2/T3 history queries + - find_releases() — REST API read for T3 regression detection + - capture_behavioral_finding() — T2 writes behavioral patterns not captured by SDK +""" import logging +import requests logger = logging.getLogger("sentinel.sentry") @@ -13,13 +21,25 @@ class SentryClient: - def __init__(self, dsn: str, env: str = "local"): + def __init__( + self, + dsn: str, + env: str = "local", + auth_token: str = "", + org: str = "", + project: str = "", + ): self.enabled = bool(dsn) and _sdk_available + self._auth_token = auth_token + self._org = org + self._project = project + self._api_enabled = bool(auth_token and org and project) + if self.enabled: sentry_sdk.init( dsn=dsn, environment=env, - traces_sample_rate=0.0, + traces_sample_rate=1.0, send_default_pii=False, ) logger.info("Sentry initialized (env=%s)", env) @@ -58,6 +78,116 @@ def create_issue(self, finding) -> str | None: logger.warning("Sentry create_issue failed: %s", e) return None + def capture_behavioral_finding( + self, + title: str, + issue_type: str, + recommendation: str, + confidence: str, + fingerprint: str, + context: dict, + ) -> str | None: + """Create Sentry issue for a T2 behavioral finding (not captured by SDK). + + Only call this for patterns that wouldn't surface as clean exceptions: + e.g. 'WebSocket always drops after 20min replay', 'incident detection stalls + after session_num > 3'. Do NOT use for things already covered by SDK capture. + """ + if not self.enabled: + return None + try: + level = "error" if confidence == "high" else "warning" + with sentry_sdk.new_scope() as scope: + scope.set_tag("issue_type", issue_type) + scope.set_tag("confidence", confidence) + scope.set_tag("source", "t2_behavioral") + scope.set_context("finding", { + "recommendation": recommendation, + **context, + }) + scope.fingerprint = ["t2.behavioral", fingerprint] + event_id = sentry_sdk.capture_message( + f"[T2] {title}", + level=level, + scope=scope, + ) + logger.info("Sentry behavioral finding created: %s", event_id) + return event_id + except Exception as e: + logger.warning("Sentry capture_behavioral_finding failed: %s", e) + return None + + # ── REST API read methods ────────────────────────────────────────────────── + + def search_issues(self, query: str = "is:unresolved", limit: int = 10) -> list[dict]: + """Search Sentry issues via REST API. Returns list of issue dicts.""" + if not self._api_enabled: + return [] + try: + resp = requests.get( + f"https://sentry.io/api/0/projects/{self._org}/{self._project}/issues/", + headers={"Authorization": f"Bearer {self._auth_token}"}, + params={"query": query, "limit": limit}, + timeout=10, + ) + if resp.status_code == 200: + return resp.json() + logger.debug("Sentry search_issues HTTP %d: %s", resp.status_code, resp.text[:200]) + except Exception as e: + logger.debug("Sentry search_issues failed: %s", e) + return [] + + def get_issue(self, issue_id: str) -> dict: + """Fetch a single Sentry issue by ID.""" + if not self._api_enabled: + return {} + try: + resp = requests.get( + f"https://sentry.io/api/0/issues/{issue_id}/", + headers={"Authorization": f"Bearer {self._auth_token}"}, + timeout=10, + ) + if resp.status_code == 200: + return resp.json() + except Exception as e: + logger.debug("Sentry get_issue failed: %s", e) + return {} + + def find_releases(self, limit: int = 5) -> list[dict]: + """Fetch recent releases for regression detection in T3.""" + if not self._api_enabled: + return [] + try: + resp = requests.get( + f"https://sentry.io/api/0/projects/{self._org}/{self._project}/releases/", + headers={"Authorization": f"Bearer {self._auth_token}"}, + params={"limit": limit}, + timeout=10, + ) + if resp.status_code == 200: + return resp.json() + except Exception as e: + logger.debug("Sentry find_releases failed: %s", e) + return [] + + def create_release(self, version: str) -> dict: + """Create a Sentry release (called from deploy.ps1 via this client).""" + if not self._api_enabled: + return {} + try: + resp = requests.post( + f"https://sentry.io/api/0/organizations/{self._org}/releases/", + headers={"Authorization": f"Bearer {self._auth_token}"}, + json={"version": version, "projects": [self._project]}, + timeout=10, + ) + if resp.status_code in (200, 201): + return resp.json() + logger.debug("Sentry create_release HTTP %d: %s", resp.status_code, resp.text[:200]) + except Exception as e: + logger.warning("Sentry create_release failed: %s", e) + return {} + def create_investigation_issue(self, investigation) -> str | None: """Create Sentry issue for a T2 investigation report. Returns event_id or None.""" if not self.enabled: diff --git a/observability/local/log-sentinel/t1_agent.py b/observability/local/log-sentinel/t1_agent.py new file mode 100644 index 0000000..f6119f8 --- /dev/null +++ b/observability/local/log-sentinel/t1_agent.py @@ -0,0 +1,220 @@ +"""T1 — Fast triage agent. + +Replaces the T1 half of analyst.py for v3. +Key changes over v2 Analyst.run_t1(): + - Accepts pre-built FeatureInvocations from InvocationBuilder + - Injects BaselineManager context into anomaly prompt + - Accepts optional T0 alert context for event-driven runs + - Builds EvidencePackets for each anomaly via EvidenceBuilder + - Pushes sentinel_evidence_packet events to Loki + - T1Result carries invocations + evidence_packets + trigger metadata +""" + +import logging +from dataclasses import dataclass, field + +from analyst import _parse_json, _normalize_anomalies +from baseline import BaselineManager +from circuit_breaker import CircuitBreaker +from config import Config +from evidence import EvidenceBuilder, EvidencePacket +from loki_client import LokiClient +from ollama_client import OllamaClient +from prompts import ( + T1_SYSTEM, T1_SUMMARY_PROMPT, T1_ANOMALY_PROMPT_V3, + build_stream_guide, format_log_sample, format_invocations, +) +from trace import FeatureInvocation + +logger = logging.getLogger("sentinel.t1") + + +@dataclass +class T1Result: + summary: str + cycle_notes: str + anomalies: list[dict] + invocations: list[FeatureInvocation] + evidence_packets: list[EvidencePacket] + model: str + summary_duration_ms: int + anomaly_duration_ms: int + trigger_source: str # "scheduled" | "grafana_alert" + alert_names: list[str] # T0 alert names that triggered this run + raw_summary_response: str = field(repr=False, default="") + raw_anomaly_response: str = field(repr=False, default="") + + @property + def needs_t2(self) -> bool: + return any(a.get("needs_t2") for a in self.anomalies) + + @property + def total_duration_ms(self) -> int: + return self.summary_duration_ms + self.anomaly_duration_ms + + +class T1Agent: + def __init__( + self, + ollama: OllamaClient, + loki: LokiClient, + breaker: CircuitBreaker, + config: Config, + baseline: BaselineManager, + evidence_builder: EvidenceBuilder, + ): + self.ollama = ollama + self.loki = loki + self.breaker = breaker + self.config = config + self.baseline = baseline + self.evidence_builder = evidence_builder + self._stream_guide = build_stream_guide() + + def run( + self, + start_ns: int, + end_ns: int, + counts: dict[str, int], + sim_steward_sample: list[dict], + claude_dev_sample: list[dict], + claude_token_sample: list[dict], + invocations: list[FeatureInvocation], + alert_context: str = "", + trigger_source: str = "scheduled", + alert_names: list[str] | None = None, + ) -> T1Result: + window_minutes = max(1, int((end_ns - start_ns) / 1e9 / 60)) + counts_text = "\n".join(f" {k}: {v}" for k, v in counts.items()) + + samples = dict( + sim_steward_sample=format_log_sample(sim_steward_sample), + sim_steward_count=len(sim_steward_sample), + claude_dev_sample=format_log_sample(claude_dev_sample), + claude_dev_count=len(claude_dev_sample), + claude_token_sample=format_log_sample(claude_token_sample), + claude_token_count=len(claude_token_sample), + ) + + invocations_text = format_invocations(invocations) + baseline_context = self.baseline.get_prompt_context() + system = T1_SYSTEM.format(stream_guide=self._stream_guide) + + # Optional T0 alert context prefix — injected into both calls + alert_prefix = "" + if alert_context: + alert_prefix = ( + f"ALERT CONTEXT (from Grafana):\n{alert_context}\n" + "→ Focus investigation on this signal. Do not suppress even if recent history is quiet.\n\n" + ) + + # Call A: summary (/no_think — fast) + summary_prompt = alert_prefix + T1_SUMMARY_PROMPT.format( + window_minutes=window_minutes, + counts=counts_text, + **samples, + ) + summary_text = "" + cycle_notes = "" + summary_ms = 0 + raw_summary = "" + try: + raw_summary, summary_ms = self.ollama.generate( + self.config.ollama_model_fast, + system + "\n\n" + summary_prompt, + think=False, + ) + self.breaker.record_success() + parsed = _parse_json(raw_summary) + summary_text = parsed.get("summary", "") + cycle_notes = parsed.get("cycle_notes", "") + except Exception as e: + self.breaker.record_failure() + logger.error("T1 summary call failed: %s", e) + + # Call B: anomaly scan (/think) — invocations + baseline context included + anomaly_prompt = alert_prefix + T1_ANOMALY_PROMPT_V3.format( + summary=summary_text or "(summary unavailable)", + counts=counts_text, + invocations_text=invocations_text, + baseline_context=baseline_context, + **samples, + ) + anomalies = [] + anomaly_ms = 0 + raw_anomaly = "" + try: + raw_anomaly, anomaly_ms = self.ollama.generate( + self.config.ollama_model_fast, + system + "\n\n" + anomaly_prompt, + think=True, + ) + self.breaker.record_success() + parsed = _parse_json(raw_anomaly) + anomalies = _normalize_anomalies_v3(parsed.get("anomalies", [])) + except Exception as e: + self.breaker.record_failure() + logger.error("T1 anomaly call failed: %s", e) + + # Build evidence packets for each anomaly, push to Loki + evidence_packets = [] + if anomalies: + evidence_packets = self.evidence_builder.build_many( + anomalies, invocations, start_ns, end_ns + ) + for packet in evidence_packets: + try: + self.loki.push_evidence_packet(packet, env=self.config.env_label) + except Exception as e: + logger.warning("Failed to push evidence packet %s: %s", packet.anomaly_id, e) + + logger.info( + "T1 [%s]: %d invocations, %d anomalies (%d→T2), %d evidence packets, summary=%dms anomaly=%dms", + trigger_source, + len(invocations), + len(anomalies), + sum(1 for a in anomalies if a.get("needs_t2")), + len(evidence_packets), + summary_ms, + anomaly_ms, + ) + + return T1Result( + summary=summary_text, + cycle_notes=cycle_notes, + anomalies=anomalies, + invocations=invocations, + evidence_packets=evidence_packets, + model=self.config.ollama_model_fast, + summary_duration_ms=summary_ms, + anomaly_duration_ms=anomaly_ms, + trigger_source=trigger_source, + alert_names=alert_names or [], + raw_summary_response=raw_summary, + raw_anomaly_response=raw_anomaly, + ) + + +# ── Helpers ─────────────────────────────────────────────────────────────────── + +def _normalize_anomalies_v3(raw: list) -> list[dict]: + """Normalize v3 anomaly dicts from T1 LLM output (superset of v2 fields).""" + if not isinstance(raw, list): + return [] + valid = [] + for a in raw: + if not isinstance(a, dict): + continue + valid.append({ + "id": str(a.get("id", "unknown"))[:64], + "stream": a.get("stream", "unknown"), + "event_type": str(a.get("event_type", ""))[:64], + "description": str(a.get("description", ""))[:500], + "severity": a.get("severity", "info") if a.get("severity") in ("info", "warn", "critical") else "info", + "needs_t2": bool(a.get("needs_t2", False)), + "hypothesis": str(a.get("hypothesis", ""))[:300], + "confidence": float(a.get("confidence", 0.5)) if isinstance(a.get("confidence"), (int, float)) else 0.5, + "trace_id": str(a.get("trace_id", ""))[:64], + "suggested_logql": str(a.get("suggested_logql", ""))[:300], + }) + return valid diff --git a/observability/local/log-sentinel/t2_agent.py b/observability/local/log-sentinel/t2_agent.py new file mode 100644 index 0000000..a10ea91 --- /dev/null +++ b/observability/local/log-sentinel/t2_agent.py @@ -0,0 +1,318 @@ +"""T2 — Deep investigation agent. + +Replaces the T2 half of analyst.py for v3. +Key changes over v2 Analyst.run_t2(): + - Reads evidence packets from Loki (state store), not from T1Result directly + - Queries Sentry for existing issues before forming recommendations + - Produces sentinel_t2_investigation events to Loki + - Creates Grafana annotation per investigation + - Creates Sentry issue if sentry_worthy + high confidence + not already captured + +Input flow: + Loki {event="sentinel_evidence_packet"} (last 15 min) + → SentryClient.search_issues() for each anomaly signature + → qwen3:32b /think + → LokiClient.push_t2_investigation() + → GrafanaClient.annotate_raw() + → SentryClient.capture_message() if warranted +""" + +import json +import logging +import time +from dataclasses import dataclass, field + +from analyst import _parse_json, _normalize_confidence, _normalize_issue_type, _valid_logql +from circuit_breaker import CircuitBreaker +from config import Config +from grafana_client import GrafanaClient +from loki_client import LokiClient +from ollama_client import OllamaClient +from prompts import ( + T2_EVIDENCE_SYSTEM, T2_EVIDENCE_PROMPT, + build_stream_guide, format_evidence_packets_for_t2, format_logql_results, + LOGQL_GEN_SYSTEM, LOGQL_GEN_PROMPT, +) +from sentry_client import SentryClient + +logger = logging.getLogger("sentinel.t2") + +# How far back to pull evidence packets from Loki +_EVIDENCE_LOOKBACK_SEC = 900 # 15 minutes + + +@dataclass +class T2Result: + root_cause: str + issue_type: str + confidence: str + correlation: str + impact: str + recommendation: str + logql_queries_used: list[str] + sentry_worthy: bool + sentry_fingerprint: str + evidence_packet_count: int + sentry_event_id: str | None + model: str + inference_duration_ms: int + logql_gather_duration_ms: int + raw_response: str = field(repr=False, default="") + + @property + def total_duration_ms(self) -> int: + return self.inference_duration_ms + self.logql_gather_duration_ms + + +class T2Agent: + def __init__( + self, + ollama: OllamaClient, + loki: LokiClient, + grafana: GrafanaClient, + sentry: SentryClient, + breaker: CircuitBreaker, + config: Config, + ): + self.ollama = ollama + self.loki = loki + self.grafana = grafana + self.sentry = sentry + self.breaker = breaker + self.config = config + self._stream_guide = build_stream_guide() + + def run( + self, + end_ns: int | None = None, + lookback_sec: int = _EVIDENCE_LOOKBACK_SEC, + forced_packet_ids: list[str] | None = None, + ) -> T2Result | None: + """ + Run T2 investigation over recent evidence packets. + + forced_packet_ids: if set, only process these specific anomaly_ids + (used when T1 immediately escalates critical anomalies) + """ + if end_ns is None: + end_ns = self.loki.now_ns() + start_ns = end_ns - lookback_sec * 1_000_000_000 + + # Step 1: load evidence packets from Loki + packet_dicts = self._load_evidence_packets(start_ns, end_ns, forced_packet_ids) + if not packet_dicts: + logger.info("T2: no evidence packets in window, skipping") + return None + + # Step 2: read Sentry history for context + sentry_context = self._fetch_sentry_context(packet_dicts) + + # Step 3: generate + execute targeted LogQL for additional evidence + gather_start = time.time() + queries = self._generate_logql_queries(packet_dicts, lookback_sec // 60) + logql_results = self._execute_logql_queries(queries, start_ns, end_ns) + gather_ms = int((time.time() - gather_start) * 1000) + + # Step 4: T2 inference + system = T2_EVIDENCE_SYSTEM.format(stream_guide=self._stream_guide) + prompt = T2_EVIDENCE_PROMPT.format( + evidence_text=format_evidence_packets_for_t2(packet_dicts), + sentry_context=sentry_context, + logql_results=format_logql_results(logql_results), + ) + + raw = "" + infer_ms = 0 + try: + raw, infer_ms = self.ollama.generate( + self.config.ollama_model_deep, + system + "\n\n" + prompt, + think=True, + ) + self.breaker.record_success() + except Exception as e: + self.breaker.record_failure() + logger.error("T2 inference failed: %s", e) + + parsed = _parse_json(raw) + all_queries = queries + list(parsed.get("logql_queries_used", [])) + + result = T2Result( + root_cause=parsed.get("root_cause", "Unable to determine root cause."), + issue_type=_normalize_issue_type(parsed.get("issue_type", "unknown")), + confidence=_normalize_confidence(parsed.get("confidence", "low")), + correlation=parsed.get("correlation", "No correlations identified."), + impact=parsed.get("impact", "Impact unknown."), + recommendation=parsed.get("recommendation", "Investigate manually."), + logql_queries_used=all_queries, + sentry_worthy=bool(parsed.get("sentry_worthy", False)), + sentry_fingerprint=str(parsed.get("sentry_fingerprint", ""))[:100], + evidence_packet_count=len(packet_dicts), + sentry_event_id=None, + model=self.config.ollama_model_deep, + inference_duration_ms=infer_ms, + logql_gather_duration_ms=gather_ms, + raw_response=raw, + ) + + # Step 5: push investigation to Loki + Grafana + self._push_investigation(result, packet_dicts, end_ns) + self._annotate_grafana(result) + + # Step 6: create Sentry issue if warranted + if result.sentry_worthy and result.confidence == "high": + event_id = self._create_sentry_issue(result, packet_dicts) + result.sentry_event_id = event_id + + logger.info( + "T2 complete: confidence=%s sentry=%s packets=%d gather=%dms infer=%dms queries=%d", + result.confidence, result.sentry_worthy, + len(packet_dicts), gather_ms, infer_ms, len(all_queries), + ) + return result + + # ── Private ─────────────────────────────────────────────────────────────── + + def _load_evidence_packets( + self, + start_ns: int, + end_ns: int, + forced_ids: list[str] | None, + ) -> list[dict]: + logql = '{app="sim-steward", event="sentinel_evidence_packet"}' + packets = self.loki.query_lines(logql, start_ns, end_ns, limit=100) + if forced_ids: + packets = [p for p in packets if p.get("anomaly_id") in forced_ids] + # Dedup by anomaly_id, keep most recent + seen: dict[str, dict] = {} + for p in packets: + aid = p.get("anomaly_id", "") + if aid not in seen or p.get("assembled_at_ns", 0) > seen[aid].get("assembled_at_ns", 0): + seen[aid] = p + return list(seen.values()) + + def _fetch_sentry_context(self, packet_dicts: list[dict]) -> str: + if not packet_dicts: + return "(no Sentry history available)" + # Build a query from the most severe anomaly descriptions + critical = [p for p in packet_dicts if p.get("severity") == "critical"] + sample = (critical or packet_dicts)[:3] + streams = list({p.get("detector_stream", "") for p in sample if p.get("detector_stream")}) + query = " ".join(streams) + " " + " ".join( + p.get("anomaly_description", "")[:40] for p in sample + ) + try: + issues = self.sentry.search_issues(query=query.strip()[:200], limit=5) + if not issues: + return "(no matching Sentry issues found)" + lines = [] + for issue in issues: + lines.append( + f" [{issue.get('level', '?').upper()}] {issue.get('title', '?')[:80]}" + f" (status={issue.get('status', '?')}, times_seen={issue.get('count', '?')})" + ) + if issue.get("lastSeen"): + lines.append(f" last_seen: {issue['lastSeen']}") + return "\n".join(lines) + except Exception as e: + logger.debug("Sentry context fetch failed: %s", e) + return "(Sentry unavailable)" + + def _generate_logql_queries( + self, + packet_dicts: list[dict], + window_minutes: int, + ) -> list[str]: + # Seed with suggested_logql from evidence packets + seeded = [ + p["suggested_logql"] for p in packet_dicts + if p.get("suggested_logql") and _valid_logql(p["suggested_logql"]) + ] + + if not packet_dicts: + return seeded[:5] + + anomaly_descriptions = "\n".join( + f"- {p.get('anomaly_id', '?')}: {p.get('anomaly_description', '')[:80]}" + for p in packet_dicts[:5] + ) + prompt = LOGQL_GEN_SYSTEM + "\n\n" + LOGQL_GEN_PROMPT.format( + anomaly_descriptions=anomaly_descriptions, + window_minutes=window_minutes, + ) + try: + raw, _ = self.ollama.generate( + self.config.ollama_model_fast, + prompt, + think=False, + temperature=0.0, + ) + generated = json.loads(raw) if raw.strip().startswith("[") else [] + if isinstance(generated, list): + combined = seeded + [q for q in generated if isinstance(q, str)] + return [q.strip() for q in combined if _valid_logql(q)][:5] + except Exception as e: + logger.debug("T2 LogQL gen failed: %s", e) + + return [q for q in seeded if _valid_logql(q)][:5] + + def _execute_logql_queries( + self, queries: list[str], start_ns: int, end_ns: int + ) -> dict[str, list[dict]]: + results = {} + for query in queries: + try: + lines = self.loki.query_lines(query, start_ns, end_ns, limit=50) + results[query] = lines + except Exception as e: + logger.debug("T2 LogQL execute failed (%s): %s", query[:60], e) + results[query] = [] + return results + + def _push_investigation( + self, result: T2Result, packet_dicts: list[dict], end_ns: int + ) -> None: + try: + self.loki.push_t2_investigation(result, packet_dicts, env=self.config.env_label) + except Exception as e: + logger.warning("Failed to push T2 investigation to Loki: %s", e) + + def _annotate_grafana(self, result: T2Result) -> None: + try: + severity_tag = "critical" if result.confidence == "high" and result.sentry_worthy else "investigation" + self.grafana.annotate_raw( + title=f"T2 Investigation [{result.confidence}]: {result.root_cause[:80]}", + text=( + f"Root cause: {result.root_cause}
" + f"Recommendation: {result.recommendation}
" + f"Type: {result.issue_type} | Packets: {result.evidence_packet_count} | " + f"Model: {result.model}" + ), + tags=["t2", result.issue_type, result.confidence, severity_tag], + ) + except Exception as e: + logger.debug("T2 Grafana annotation failed: %s", e) + + def _create_sentry_issue( + self, result: T2Result, packet_dicts: list[dict] + ) -> str | None: + try: + streams = list({p.get("detector_stream", "") for p in packet_dicts if p.get("detector_stream")}) + fingerprint = result.sentry_fingerprint or f"t2.{result.issue_type}.{streams[0] if streams else 'unknown'}" + return self.sentry.capture_behavioral_finding( + title=result.root_cause[:120], + issue_type=result.issue_type, + recommendation=result.recommendation, + confidence=result.confidence, + fingerprint=fingerprint, + context={ + "root_cause": result.root_cause, + "correlation": result.correlation, + "impact": result.impact, + "evidence_packet_count": result.evidence_packet_count, + "model": result.model, + }, + ) + except Exception as e: + logger.warning("T2 Sentry issue creation failed: %s", e) + return None diff --git a/observability/local/log-sentinel/t3_agent.py b/observability/local/log-sentinel/t3_agent.py new file mode 100644 index 0000000..3cfbf09 --- /dev/null +++ b/observability/local/log-sentinel/t3_agent.py @@ -0,0 +1,329 @@ +"""T3 — Synthesis agent. + +Runs on a mode-dependent schedule (dev: 2h, prod: 4h) or on T2 critical escalation. +Answers: "What was the user trying to do, and did it work?" + +What T3 does: + 1. Query Loki for T1 evidence packets + T2 investigations for the synthesis window + 2. Query Sentry for open issues + recent releases + 3. Build session narratives via NarrativeBuilder + 4. Run qwen3:32b /think for 7 synthesis passes (single LLM call) + 5. Update baselines.json via BaselineManager + 6. Emit sentinel_threshold_recommendation per drifted T0 threshold + 7. Push sentinel_synthesis + sentinel_narrative events to Loki + +Mode differences: + dev — 2h cadence, focus: Claude sessions, tool usage, code activity + prod — 4h cadence, focus: iRacing sessions, feature stability, user-facing errors +""" + +import logging +import time +from dataclasses import dataclass, field +from datetime import datetime, timezone + +from analyst import _parse_json +from baseline import BaselineManager +from circuit_breaker import CircuitBreaker +from config import Config +from grafana_client import GrafanaClient +from loki_client import LokiClient +from narrative import NarrativeBuilder +from ollama_client import OllamaClient +from prompts import T3_SYSTEM, T3_SYNTHESIS_PROMPT, build_stream_guide +from sentry_client import SentryClient +from trace import FeatureInvocation + +logger = logging.getLogger("sentinel.t3") + +# Lookbacks per mode for pulling Loki evidence +_MODE_LOOKBACK = { + "dev": 2 * 3600, + "prod": 4 * 3600, +} + + +@dataclass +class T3Result: + period_summary: str + sessions_analyzed: int + features_worked: list[str] + features_failed: list[str] + recurring_patterns: list[dict] + cost_summary: dict + regression_detected: bool + regression_detail: str + action_items: list[str] + baselines_updated: bool + threshold_recommendations: list[dict] + session_narratives: list[dict] # list of {session_id, narrative_text, ...} + model: str + inference_duration_ms: int + raw_response: str = field(repr=False, default="") + + +class T3Agent: + def __init__( + self, + ollama: OllamaClient, + loki: LokiClient, + grafana: GrafanaClient, + sentry: SentryClient, + baseline: BaselineManager, + breaker: CircuitBreaker, + config: Config, + ): + self.ollama = ollama + self.loki = loki + self.grafana = grafana + self.sentry = sentry + self.baseline = baseline + self.breaker = breaker + self.config = config + self.narrative_builder = NarrativeBuilder() + self._stream_guide = build_stream_guide() + + def run( + self, + end_ns: int | None = None, + invocations: list[FeatureInvocation] | None = None, + lookback_sec: int | None = None, + trigger: str = "scheduled", + ) -> T3Result: + """ + Run T3 synthesis. + + invocations: if provided (e.g. from same-cycle T1 run), used for narratives. + Otherwise T3 uses only Loki-stored invocation summaries. + trigger: "scheduled" | "t2_escalation" + """ + if end_ns is None: + end_ns = self.loki.now_ns() + if lookback_sec is None: + lookback_sec = _MODE_LOOKBACK.get(self.config.sentinel_mode, 7200) + start_ns = end_ns - lookback_sec * 1_000_000_000 + + mode = self.config.sentinel_mode + window_description = _format_window(start_ns, end_ns, mode) + + # Step 1: load evidence from Loki + evidence_packets = self._load_evidence_packets(start_ns, end_ns) + investigations = self._load_investigations(start_ns, end_ns) + + # Step 2: Sentry context + sentry_issues_text, sentry_releases_text = self._fetch_sentry_context() + + # Step 3: build session narratives + session_narratives = [] + if invocations: + all_anomalies = [ep for ep in evidence_packets] + session_narratives = self.narrative_builder.build_all( + invocations=invocations, + anomaly_dicts=all_anomalies, + t2_investigation_dicts=investigations, + ) + + narratives_text = _format_narratives_for_prompt(session_narratives) + + # Step 4: T3 LLM synthesis + system = T3_SYSTEM.format(stream_guide=self._stream_guide) + prompt = T3_SYNTHESIS_PROMPT.format( + window_description=window_description, + mode=mode, + evidence_summary=_format_evidence_summary(evidence_packets), + investigation_summary=_format_investigation_summary(investigations), + sentry_issues=sentry_issues_text, + recent_releases=sentry_releases_text, + session_narratives=narratives_text, + ) + + raw = "" + infer_ms = 0 + try: + raw, infer_ms = self.ollama.generate( + self.config.ollama_model_deep, + system + "\n\n" + prompt, + think=True, + ) + self.breaker.record_success() + except Exception as e: + self.breaker.record_failure() + logger.error("T3 inference failed: %s", e) + + parsed = _parse_json(raw) + + # Step 5: update baselines + baselines_updated = False + threshold_recs = [] + try: + self.baseline.compute_and_save(lookback_sec=lookback_sec) + threshold_recs = self.baseline.get_threshold_recommendations() + baselines_updated = True + logger.info("T3: baselines updated, %d threshold recommendations", len(threshold_recs)) + except Exception as e: + logger.warning("T3 baseline update failed: %s", e) + + result = T3Result( + period_summary=parsed.get("period_summary", ""), + sessions_analyzed=int(parsed.get("sessions_analyzed", len(session_narratives))), + features_worked=parsed.get("features_worked", []), + features_failed=parsed.get("features_failed", []), + recurring_patterns=parsed.get("recurring_patterns", []), + cost_summary=parsed.get("cost_summary", {}), + regression_detected=bool(parsed.get("regression_detected", False)), + regression_detail=parsed.get("regression_detail", ""), + action_items=parsed.get("action_items", []), + baselines_updated=baselines_updated, + threshold_recommendations=threshold_recs, + session_narratives=session_narratives, + model=self.config.ollama_model_deep, + inference_duration_ms=infer_ms, + raw_response=raw, + ) + + # Step 6: push all outputs + self._push_outputs(result, end_ns, trigger) + self._annotate_grafana(result, trigger) + + logger.info( + "T3 [%s/%s]: %d sessions, %d patterns, regression=%s, baselines=%s, %dms", + mode, trigger, + result.sessions_analyzed, + len(result.recurring_patterns), + result.regression_detected, + result.baselines_updated, + infer_ms, + ) + return result + + # ── Private ─────────────────────────────────────────────────────────────── + + def _load_evidence_packets(self, start_ns: int, end_ns: int) -> list[dict]: + logql = '{app="sim-steward", event="sentinel_evidence_packet"}' + try: + return self.loki.query_lines(logql, start_ns, end_ns, limit=200) + except Exception as e: + logger.warning("T3 evidence packet load failed: %s", e) + return [] + + def _load_investigations(self, start_ns: int, end_ns: int) -> list[dict]: + logql = '{app="sim-steward", event="sentinel_t2_investigation"}' + try: + return self.loki.query_lines(logql, start_ns, end_ns, limit=50) + except Exception as e: + logger.warning("T3 investigation load failed: %s", e) + return [] + + def _fetch_sentry_context(self) -> tuple[str, str]: + issues_text = "(Sentry unavailable)" + releases_text = "(no release data)" + try: + issues = self.sentry.search_issues(query="is:unresolved", limit=20) + if issues: + lines = [ + f" [{i.get('level', '?').upper()}] {i.get('title', '?')[:80]}" + f" (times_seen={i.get('count', '?')}, last={i.get('lastSeen', '?')[:10]})" + for i in issues + ] + issues_text = "\n".join(lines) + else: + issues_text = "(no open Sentry issues)" + except Exception as e: + logger.debug("T3 Sentry issues fetch failed: %s", e) + + try: + releases = self.sentry.find_releases(limit=5) + if releases: + lines = [ + f" {r.get('version', '?')} released {r.get('dateCreated', '?')[:10]}" + for r in releases + ] + releases_text = "\n".join(lines) + else: + releases_text = "(no releases found)" + except Exception as e: + logger.debug("T3 Sentry releases fetch failed: %s", e) + + return issues_text, releases_text + + def _push_outputs(self, result: T3Result, end_ns: int, trigger: str) -> None: + # Push synthesis summary + try: + self.loki.push_synthesis(result, trigger=trigger, env=self.config.env_label) + except Exception as e: + logger.warning("T3: failed to push synthesis to Loki: %s", e) + + # Push per-session narratives + for narrative in result.session_narratives: + try: + self.loki.push_narrative(narrative, env=self.config.env_label) + except Exception as e: + logger.debug("T3: failed to push narrative for %s: %s", narrative.get("session_id"), e) + + # Push threshold recommendations + for rec in result.threshold_recommendations: + try: + self.loki.push_threshold_recommendation(rec, env=self.config.env_label) + except Exception as e: + logger.debug("T3: failed to push threshold rec: %s", e) + + def _annotate_grafana(self, result: T3Result, trigger: str) -> None: + try: + regression_note = f" ⚠️ Regression: {result.regression_detail[:60]}" if result.regression_detected else "" + self.grafana.annotate_raw( + title=f"T3 Synthesis [{self.config.sentinel_mode}]: {result.sessions_analyzed} sessions", + text=( + f"{result.period_summary[:200]}{regression_note}
" + f"Patterns: {len(result.recurring_patterns)} | " + f"Baselines updated: {result.baselines_updated} | " + f"Trigger: {trigger}" + ), + tags=["t3", "synthesis", self.config.sentinel_mode, trigger], + ) + except Exception as e: + logger.debug("T3 Grafana annotation failed: %s", e) + + +# ── Helpers ─────────────────────────────────────────────────────────────────── + +def _format_window(start_ns: int, end_ns: int, mode: str) -> str: + start_dt = datetime.fromtimestamp(start_ns / 1e9, tz=timezone.utc) + end_dt = datetime.fromtimestamp(end_ns / 1e9, tz=timezone.utc) + return ( + f"{start_dt.strftime('%Y-%m-%d %H:%M')} – {end_dt.strftime('%H:%M')} UTC " + f"({int((end_ns - start_ns) / 3.6e12):.0f}h window, mode={mode})" + ) + + +def _format_evidence_summary(packets: list[dict]) -> str: + if not packets: + return " (none)" + lines = [] + for p in packets[:20]: + lines.append( + f" [{p.get('severity', '?').upper()}] {p.get('anomaly_description', '')[:80]}" + ) + if len(packets) > 20: + lines.append(f" [... {len(packets) - 20} more]") + return "\n".join(lines) + + +def _format_investigation_summary(investigations: list[dict]) -> str: + if not investigations: + return " (none)" + lines = [] + for inv in investigations[:10]: + lines.append( + f" [{inv.get('confidence', '?')}] {inv.get('root_cause', '')[:80]}" + f" (type={inv.get('issue_type', '?')})" + ) + return "\n".join(lines) + + +def _format_narratives_for_prompt(session_narratives: list[dict]) -> str: + if not session_narratives: + return " (no session narratives available — no invocations this window)" + parts = [] + for n in session_narratives[:10]: + parts.append(n.get("narrative_text", "")[:600]) + return "\n\n".join(parts) diff --git a/observability/local/log-sentinel/timeline.py b/observability/local/log-sentinel/timeline.py new file mode 100644 index 0000000..649961e --- /dev/null +++ b/observability/local/log-sentinel/timeline.py @@ -0,0 +1,200 @@ +"""Cross-stream timeline builder — correlates events from all Loki streams.""" + +import logging +from dataclasses import dataclass, field +from datetime import datetime, timezone + +from loki_client import LokiClient +from circuit_breaker import CircuitBreaker + +logger = logging.getLogger("sentinel.timeline") + +# Streams to query and their display names +STREAMS = [ + ("sim-steward", '{app="sim-steward"} | json'), + ("claude-dev-logging", '{app="claude-dev-logging"} | json'), + ("claude-token-metrics", '{app="claude-token-metrics"} | json'), +] + +# Events to exclude from the timeline (too noisy) +_SKIP_EVENTS = {"sentinel_log", "sentinel_cycle", "sentinel_analyst_run", "sentinel_timeline_built"} + +# Temporal correlation window (nanoseconds) +_TEMPORAL_WINDOW_NS = 30 * 1_000_000_000 + + +@dataclass +class TimelineEvent: + ts_ns: int + ts_iso: str + stream: str + event_type: str + domain: str + component: str + message: str + session_id: str | None + subsession_id: str | None + raw: dict = field(repr=False) + + +class TimelineBuilder: + def __init__(self, loki: LokiClient, breaker: CircuitBreaker): + self.loki = loki + self.breaker = breaker + + def build( + self, + start_ns: int, + end_ns: int, + limit_per_stream: int = 200, + ) -> list[TimelineEvent]: + """Query all streams, merge and sort chronologically.""" + if not self.breaker.allow_request(): + logger.warning("Timeline build skipped: Loki circuit open") + return [] + + all_events: list[TimelineEvent] = [] + try: + for stream_name, logql in STREAMS: + lines = self.loki.query_lines(logql, start_ns, end_ns, limit=limit_per_stream) + self.breaker.record_success() + for line in lines: + ev = self._parse_event(stream_name, line) + if ev: + all_events.append(ev) + except Exception as e: + self.breaker.record_failure() + logger.error("Timeline build error: %s", e) + return all_events + + all_events.sort(key=lambda e: e.ts_ns) + return all_events + + def _parse_event(self, stream: str, line: dict) -> TimelineEvent | None: + event_type = line.get("event", "") + if event_type in _SKIP_EVENTS: + return None + + # Parse timestamp — prefer the log's own timestamp field, fallback to now + ts_ns = 0 + ts_iso = line.get("timestamp", "") + if ts_iso: + try: + dt = datetime.fromisoformat(ts_iso.replace("Z", "+00:00")) + ts_ns = int(dt.timestamp() * 1e9) + except (ValueError, TypeError): + pass + if not ts_ns: + ts_ns = self.loki.now_ns() + ts_iso = datetime.now(timezone.utc).isoformat() + + return TimelineEvent( + ts_ns=ts_ns, + ts_iso=ts_iso, + stream=stream, + event_type=event_type or "unknown", + domain=line.get("domain", ""), + component=line.get("component", ""), + message=line.get("message", ""), + session_id=line.get("session_id") or None, + subsession_id=line.get("subsession_id") or None, + raw=line, + ) + + def get_active_sessions(self, events: list[TimelineEvent]) -> list[str]: + """Return distinct session_ids seen in the event list.""" + seen = [] + for ev in events: + if ev.session_id and ev.session_id not in seen: + seen.append(ev.session_id) + return seen + + def to_prompt_text(self, events: list[TimelineEvent], max_events: int = 60) -> str: + """Format timeline as human-readable numbered lines for LLM consumption.""" + if not events: + return "(no events in this window)" + + truncated = len(events) > max_events + shown = events[-max_events:] if truncated else events + + # Group by session_id + sessions: dict[str, list[TimelineEvent]] = {} + no_session: list[TimelineEvent] = [] + + for ev in shown: + if ev.session_id: + sessions.setdefault(ev.session_id, []).append(ev) + else: + no_session.append(ev) + + lines = [] + counter = 1 + + for sid, evts in sessions.items(): + # Find subsession if present + sub = next((e.subsession_id for e in evts if e.subsession_id), None) + header = f"SESSION {sid[:8]}" + if sub: + header += f" [subsession {sub}]" + lines.append(header) + for ev in evts: + lines.append(_format_event_line(counter, ev)) + counter += 1 + lines.append("") + + if no_session: + lines.append("CO-OCCURRING (no session correlation)") + for ev in no_session: + lines.append(_format_event_line(counter, ev)) + counter += 1 + + if truncated: + lines.append( + f"\n[NOTE: {len(events) - max_events} earlier events not shown. " + f"Earliest: {events[0].ts_iso}, Latest: {events[-1].ts_iso}]" + ) + + return "\n".join(lines) + + def get_stats(self, events: list[TimelineEvent]) -> dict: + sessions = self.get_active_sessions(events) + streams = list({e.stream for e in events}) + return { + "event_count": len(events), + "session_count": len(sessions), + "streams_queried": streams, + } + + +def _format_event_line(idx: int, ev: TimelineEvent) -> str: + # Extract time portion only (HH:MM:SS) + try: + t = ev.ts_iso[11:19] + except (IndexError, TypeError): + t = "??:??:??" + + # Pick the most informative extra field from raw + extra = _pick_extra(ev) + extra_str = f" {extra}" if extra else "" + + return ( + f" [{idx:03d}] {t} {ev.stream:<25} {ev.event_type:<30}{extra_str}" + ) + + +def _pick_extra(ev: TimelineEvent) -> str: + """Extract a short key=value summary from the raw event for the timeline.""" + raw = ev.raw + candidates = [ + ("action", raw.get("action")), + ("tool", raw.get("tool_name")), + ("event_type", raw.get("hook_type")), + ("track", raw.get("track_display_name")), + ("driver", raw.get("display_name")), + ("cost_usd", raw.get("cost_usd")), + ("tokens", raw.get("total_tokens")), + ("error", raw.get("error")), + ("duration_ms", raw.get("duration_ms")), + ] + parts = [f"{k}={v}" for k, v in candidates if v is not None and v != ""] + return " ".join(parts[:3]) diff --git a/observability/local/log-sentinel/trace.py b/observability/local/log-sentinel/trace.py new file mode 100644 index 0000000..5d336a4 --- /dev/null +++ b/observability/local/log-sentinel/trace.py @@ -0,0 +1,225 @@ +"""Feature invocation model — groups timeline events into traceable user actions. + +Three correlation strategies (applied in order): + 1. trace_id exact — events share a trace_id field (plugin + dashboard instrumented) + 2. temporal — events cluster within 150ms with expected sequence patterns + 3. inferred — fallback: group by session_id + 1-minute time bucket +""" + +import logging +import uuid +from dataclasses import dataclass, field + +from timeline import TimelineEvent + +logger = logging.getLogger("sentinel.trace") + +# Temporal grouping window (nanoseconds) +_TEMPORAL_WINDOW_NS = 150_000_000 # 150ms + +# Events that anchor the start of a new invocation in temporal mode +_ANCHOR_EVENTS = { + "dashboard_ui_event", + "action_dispatched", + "iracing_session_start", + "iracing_replay_seek", +} + +# Events that signal the end of an invocation +_TERMINAL_EVENTS = { + "action_result", + "iracing_session_end", +} + +# Inferred grouping bucket (nanoseconds) +_BUCKET_NS = 60 * 1_000_000_000 # 1 minute + + +@dataclass +class FeatureInvocation: + invocation_id: str # trace_id if available, else generated UUID + correlation_method: str # "trace_id" | "temporal" | "inferred" + start_ts_ns: int + end_ts_ns: int + action_type: str # "replay_seek" | "incident_review" | "session_start" | etc. + trigger_event: TimelineEvent # first event in this invocation + events: list[TimelineEvent] # all events belonging to this invocation + success: bool | None # did the feature complete? None = unknown + error: str | None # error message if failed + duration_ms: int + streams_involved: list[str] # which Loki streams contributed events + + def to_summary_dict(self) -> dict: + """Compact serializable summary for Loki push and LLM context.""" + return { + "invocation_id": self.invocation_id, + "correlation_method": self.correlation_method, + "action_type": self.action_type, + "success": self.success, + "error": self.error, + "duration_ms": self.duration_ms, + "event_count": len(self.events), + "streams": self.streams_involved, + "start_ts_ns": self.start_ts_ns, + "end_ts_ns": self.end_ts_ns, + } + + +class InvocationBuilder: + """Groups a flat list of TimelineEvents into FeatureInvocation objects.""" + + def build(self, events: list[TimelineEvent]) -> list[FeatureInvocation]: + """ + Returns invocations built from the event list. + Events are consumed across three passes; any event can only belong to one invocation. + """ + remaining = list(events) + invocations: list[FeatureInvocation] = [] + + # Pass 1 — exact trace_id grouping + trace_invocations, remaining = self._group_by_trace_id(remaining) + invocations.extend(trace_invocations) + + # Pass 2 — temporal window grouping + temporal_invocations, remaining = self._group_temporal(remaining) + invocations.extend(temporal_invocations) + + # Pass 3 — inferred (session + time bucket) + inferred_invocations = self._group_inferred(remaining) + invocations.extend(inferred_invocations) + + logger.debug( + "InvocationBuilder: %d events → %d invocations (%d trace_id, %d temporal, %d inferred)", + len(events), + len(invocations), + len(trace_invocations), + len(temporal_invocations), + len(inferred_invocations), + ) + return sorted(invocations, key=lambda i: i.start_ts_ns) + + # ── Pass 1: exact trace_id ───────────────────────────────────────────── + + def _group_by_trace_id( + self, events: list[TimelineEvent] + ) -> tuple[list[FeatureInvocation], list[TimelineEvent]]: + groups: dict[str, list[TimelineEvent]] = {} + leftover: list[TimelineEvent] = [] + + for ev in events: + tid = ev.raw.get("trace_id") + if tid: + groups.setdefault(tid, []).append(ev) + else: + leftover.append(ev) + + invocations = [ + _build_invocation(group, "trace_id", trace_id=tid) + for tid, group in groups.items() + ] + return invocations, leftover + + # ── Pass 2: temporal window ──────────────────────────────────────────── + + def _group_temporal( + self, events: list[TimelineEvent] + ) -> tuple[list[FeatureInvocation], list[TimelineEvent]]: + if not events: + return [], [] + + sorted_events = sorted(events, key=lambda e: e.ts_ns) + groups: list[list[TimelineEvent]] = [] + current: list[TimelineEvent] = [] + + for ev in sorted_events: + if not current: + current = [ev] + continue + + gap = ev.ts_ns - current[-1].ts_ns + is_anchor = ev.event_type in _ANCHOR_EVENTS + + if is_anchor or gap > _TEMPORAL_WINDOW_NS: + if current: + groups.append(current) + current = [ev] + else: + current.append(ev) + + if current: + groups.append(current) + + # Drop single-event groups with no action signal — too noisy + meaningful = [g for g in groups if len(g) > 1 or g[0].event_type in _ANCHOR_EVENTS] + leftover = [ev for g in groups if g not in meaningful for ev in g] + + invocations = [_build_invocation(g, "temporal") for g in meaningful] + return invocations, leftover + + # ── Pass 3: inferred (session + time bucket) ─────────────────────────── + + def _group_inferred(self, events: list[TimelineEvent]) -> list[FeatureInvocation]: + if not events: + return [] + + buckets: dict[str, list[TimelineEvent]] = {} + for ev in events: + sid = ev.session_id or "no_session" + bucket = ev.ts_ns // _BUCKET_NS + key = f"{sid}:{bucket}" + buckets.setdefault(key, []).append(ev) + + return [_build_invocation(group, "inferred") for group in buckets.values()] + + +# ── Helpers ──────────────────────────────────────────────────────────────── + +def _build_invocation( + events: list[TimelineEvent], + method: str, + trace_id: str | None = None, +) -> FeatureInvocation: + sorted_events = sorted(events, key=lambda e: e.ts_ns) + start_ns = sorted_events[0].ts_ns + end_ns = sorted_events[-1].ts_ns + duration_ms = max(0, (end_ns - start_ns) // 1_000_000) + + # action_type: prefer action_dispatched.raw["action"], else trigger event_type + action_type = "unknown" + for ev in sorted_events: + if ev.event_type == "action_dispatched": + action_type = ev.raw.get("action") or ev.event_type + break + if action_type == "unknown": + action_type = sorted_events[0].event_type or "unknown" + + # success / error: look for terminal events + success: bool | None = None + error: str | None = None + for ev in sorted_events: + if ev.event_type in _TERMINAL_EVENTS or ev.event_type.endswith("_result"): + raw_success = ev.raw.get("success") + raw_error = ev.raw.get("error") + if raw_error: + success = False + error = str(raw_error)[:200] + break + if raw_success is not None: + success = bool(raw_success) + break + + streams = list({ev.stream for ev in sorted_events}) + + return FeatureInvocation( + invocation_id=trace_id or str(uuid.uuid4()), + correlation_method=method, + start_ts_ns=start_ns, + end_ts_ns=end_ns, + action_type=action_type, + trigger_event=sorted_events[0], + events=sorted_events, + success=success, + error=error, + duration_ms=duration_ms, + streams_involved=streams, + ) diff --git a/src/SimSteward.Plugin/SimStewardPlugin.DataCaptureSuite.cs b/src/SimSteward.Plugin/SimStewardPlugin.DataCaptureSuite.cs index 1175f9c..5255eaf 100644 --- a/src/SimSteward.Plugin/SimStewardPlugin.DataCaptureSuite.cs +++ b/src/SimSteward.Plugin/SimStewardPlugin.DataCaptureSuite.cs @@ -419,12 +419,13 @@ private void TickPreflight() return; } - // Seek to near-end of replay for L2 - int seekTarget = Math.Max(0, _replayFrameTotal - 10); + // Seek to end of replay using ReplaySearch(ToEnd) — more reliable than + // frame-based seek (ReplayFrameNumEnd can be 0 or stale, which would + // seek to frame 0 and read SessionState at replay start instead of end). + _preflightSettleTicks = 0; try { - _irsdk.ReplaySetPlaySpeed(1, false); - _irsdk.ReplaySetPlayPosition(IRacingSdkEnum.RpyPosMode.Begin, seekTarget); + _irsdk.ReplaySearch(IRacingSdkEnum.RpySrchMode.ToEnd); } catch (Exception ex) { @@ -444,8 +445,10 @@ private void TickPreflight() { _preflightSettleTicks++; int frame = SafeGetInt("ReplayFrameNum"); - int seekTarget = Math.Max(0, _replayFrameTotal - 10); - if (Math.Abs(frame - seekTarget) <= 30 || _preflightSettleTicks > 300) + // ReplaySearch(ToEnd) is fire-and-forget; we don't have an exact target frame. + // Settle when: near ReplayFrameNumEnd (if valid) OR after 60 ticks (1s min wait). + bool nearEnd = _replayFrameTotal > 0 && frame >= _replayFrameTotal - 60; + if (nearEnd || _preflightSettleTicks >= 60 || _preflightSettleTicks > 300) { int sessionState = 0; try { sessionState = _irsdk.Data.GetInt("SessionState"); } catch { }