+ <>
{/* Render the "Ask AI" button if the chat is not shown */}
{!showChat && (
)}
{/* Render the ChatComponent when showChat is true, passing the onClose prop */}
- {showChat && }
-
+ {showChat && (
+
+
+
+ )}
+ >
);
}
diff --git a/package-lock.json b/package-lock.json
new file mode 100644
index 0000000..b772907
--- /dev/null
+++ b/package-lock.json
@@ -0,0 +1,5474 @@
+{
+ "name": "docs",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "dependencies": {
+ "katex": "^0.16.11",
+ "next": "^14.2.3",
+ "nextra": "^2.13.4",
+ "nextra-theme-docs": "^2.13.4",
+ "react": "^18.3.1",
+ "react-dom": "^18.3.1",
+ "react-katex": "^3.0.1"
+ }
+ },
+ "node_modules/@babel/runtime": {
+ "version": "7.27.6",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.6.tgz",
+ "integrity": "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@braintree/sanitize-url": {
+ "version": "6.0.4",
+ "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz",
+ "integrity": "sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==",
+ "license": "MIT"
+ },
+ "node_modules/@headlessui/react": {
+ "version": "1.7.19",
+ "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.19.tgz",
+ "integrity": "sha512-Ll+8q3OlMJfJbAKM/+/Y2q6PPYbryqNTXDbryx7SXLIDamkF6iQFbriYHga0dY44PvDhvvBWCx1Xj4U5+G4hOw==",
+ "license": "MIT",
+ "dependencies": {
+ "@tanstack/react-virtual": "^3.0.0-beta.60",
+ "client-only": "^0.0.1"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "react": "^16 || ^17 || ^18",
+ "react-dom": "^16 || ^17 || ^18"
+ }
+ },
+ "node_modules/@mdx-js/mdx": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-2.3.0.tgz",
+ "integrity": "sha512-jLuwRlz8DQfQNiUCJR50Y09CGPq3fLtmtUQfVrj79E0JWu3dvsVcxVIcfhR5h0iXu+/z++zDrYeiJqifRynJkA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/mdx": "^2.0.0",
+ "estree-util-build-jsx": "^2.0.0",
+ "estree-util-is-identifier-name": "^2.0.0",
+ "estree-util-to-js": "^1.1.0",
+ "estree-walker": "^3.0.0",
+ "hast-util-to-estree": "^2.0.0",
+ "markdown-extensions": "^1.0.0",
+ "periscopic": "^3.0.0",
+ "remark-mdx": "^2.0.0",
+ "remark-parse": "^10.0.0",
+ "remark-rehype": "^10.0.0",
+ "unified": "^10.0.0",
+ "unist-util-position-from-estree": "^1.0.0",
+ "unist-util-stringify-position": "^3.0.0",
+ "unist-util-visit": "^4.0.0",
+ "vfile": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/@mdx-js/mdx/node_modules/unist-util-visit": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
+ "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^5.1.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/@mdx-js/mdx/node_modules/unist-util-visit-parents": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
+ "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/@mdx-js/react": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-2.3.0.tgz",
+ "integrity": "sha512-zQH//gdOmuu7nt2oJR29vFhDv88oGPmVw6BggmrHeMI+xgEkp1B2dX9/bMBSYtK0dyLX/aOmesKS09g222K1/g==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdx": "^2.0.0",
+ "@types/react": ">=16"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ },
+ "peerDependencies": {
+ "react": ">=16"
+ }
+ },
+ "node_modules/@napi-rs/simple-git": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git/-/simple-git-0.1.19.tgz",
+ "integrity": "sha512-jMxvwzkKzd3cXo2EB9GM2ic0eYo2rP/BS6gJt6HnWbsDO1O8GSD4k7o2Cpr2YERtMpGF/MGcDfsfj2EbQPtrXw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 10"
+ },
+ "optionalDependencies": {
+ "@napi-rs/simple-git-android-arm-eabi": "0.1.19",
+ "@napi-rs/simple-git-android-arm64": "0.1.19",
+ "@napi-rs/simple-git-darwin-arm64": "0.1.19",
+ "@napi-rs/simple-git-darwin-x64": "0.1.19",
+ "@napi-rs/simple-git-freebsd-x64": "0.1.19",
+ "@napi-rs/simple-git-linux-arm-gnueabihf": "0.1.19",
+ "@napi-rs/simple-git-linux-arm64-gnu": "0.1.19",
+ "@napi-rs/simple-git-linux-arm64-musl": "0.1.19",
+ "@napi-rs/simple-git-linux-powerpc64le-gnu": "0.1.19",
+ "@napi-rs/simple-git-linux-s390x-gnu": "0.1.19",
+ "@napi-rs/simple-git-linux-x64-gnu": "0.1.19",
+ "@napi-rs/simple-git-linux-x64-musl": "0.1.19",
+ "@napi-rs/simple-git-win32-arm64-msvc": "0.1.19",
+ "@napi-rs/simple-git-win32-x64-msvc": "0.1.19"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-android-arm-eabi": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-android-arm-eabi/-/simple-git-android-arm-eabi-0.1.19.tgz",
+ "integrity": "sha512-XryEH/hadZ4Duk/HS/HC/cA1j0RHmqUGey3MsCf65ZS0VrWMqChXM/xlTPWuY5jfCc/rPubHaqI7DZlbexnX/g==",
+ "cpu": [
+ "arm"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-android-arm64": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-android-arm64/-/simple-git-android-arm64-0.1.19.tgz",
+ "integrity": "sha512-ZQ0cPvY6nV9p7zrR9ZPo7hQBkDAcY/CHj3BjYNhykeUCiSNCrhvwX+WEeg5on8M1j4d5jcI/cwVG2FslfiByUg==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-darwin-arm64": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-darwin-arm64/-/simple-git-darwin-arm64-0.1.19.tgz",
+ "integrity": "sha512-viZB5TYgjA1vH+QluhxZo0WKro3xBA+1xSzYx8mcxUMO5gnAoUMwXn0ZO/6Zy6pai+aGae+cj6XihGnrBRu3Pg==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-darwin-x64": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-darwin-x64/-/simple-git-darwin-x64-0.1.19.tgz",
+ "integrity": "sha512-6dNkzSNUV5X9rsVYQbpZLyJu4Gtkl2vNJ3abBXHX/Etk0ILG5ZasO3ncznIANZQpqcbn/QPHr49J2QYAXGoKJA==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-freebsd-x64": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-freebsd-x64/-/simple-git-freebsd-x64-0.1.19.tgz",
+ "integrity": "sha512-sB9krVIchzd20FjI2ZZ8FDsTSsXLBdnwJ6CpeVyrhXHnoszfcqxt49ocZHujAS9lMpXq7i2Nv1EXJmCy4KdhwA==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-arm-gnueabihf": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-arm-gnueabihf/-/simple-git-linux-arm-gnueabihf-0.1.19.tgz",
+ "integrity": "sha512-6HPn09lr9N1n5/XKfP8Np53g4fEXVxOFqNkS6rTH3Rm1lZHdazTRH62RggXLTguZwjcE+MvOLvoTIoR5kAS8+g==",
+ "cpu": [
+ "arm"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-arm64-gnu": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-arm64-gnu/-/simple-git-linux-arm64-gnu-0.1.19.tgz",
+ "integrity": "sha512-G0gISckt4cVDp3oh5Z6PV3GHJrJO6Z8bIS+9xA7vTtKdqB1i5y0n3cSFLlzQciLzhr+CajFD27doW4lEyErQ/Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-arm64-musl": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-arm64-musl/-/simple-git-linux-arm64-musl-0.1.19.tgz",
+ "integrity": "sha512-OwTRF+H4IZYxmDFRi1IrLMfqbdIpvHeYbJl2X94NVsLVOY+3NUHvEzL3fYaVx5urBaMnIK0DD3wZLbcueWvxbA==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-powerpc64le-gnu": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-powerpc64le-gnu/-/simple-git-linux-powerpc64le-gnu-0.1.19.tgz",
+ "integrity": "sha512-p7zuNNVyzpRvkCt2RIGv9FX/WPcPbZ6/FRUgUTZkA2WU33mrbvNqSi4AOqCCl6mBvEd+EOw5NU4lS9ORRJvAEg==",
+ "cpu": [
+ "powerpc64le"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-s390x-gnu": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-s390x-gnu/-/simple-git-linux-s390x-gnu-0.1.19.tgz",
+ "integrity": "sha512-6N2vwJUPLiak8GLrS0a3is0gSb0UwI2CHOOqtvQxPmv+JVI8kn3vKiUscsktdDb0wGEPeZ8PvZs0y8UWix7K4g==",
+ "cpu": [
+ "s390x"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-x64-gnu": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-x64-gnu/-/simple-git-linux-x64-gnu-0.1.19.tgz",
+ "integrity": "sha512-61YfeO1J13WK7MalLgP3QlV6of2rWnVw1aqxWkAgy/lGxoOFSJ4Wid6ANVCEZk4tJpPX/XNeneqkUz5xpeb2Cw==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-linux-x64-musl": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-linux-x64-musl/-/simple-git-linux-x64-musl-0.1.19.tgz",
+ "integrity": "sha512-cCTWNpMJnN3PrUBItWcs3dQKCydsIasbrS3laMzq8k7OzF93Zrp2LWDTPlLCO9brbBVpBzy2Qk5Xg9uAfe/Ukw==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-win32-arm64-msvc": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-win32-arm64-msvc/-/simple-git-win32-arm64-msvc-0.1.19.tgz",
+ "integrity": "sha512-sWavb1BjeLKKBA+PbTsRSSzVNfb7V/dOpaJvkgR5d2kWFn/AHmCZHSSj/3nyZdYf0BdDC+DIvqk3daAEZ6QMVw==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@napi-rs/simple-git-win32-x64-msvc": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/@napi-rs/simple-git-win32-x64-msvc/-/simple-git-win32-x64-msvc-0.1.19.tgz",
+ "integrity": "sha512-FmNuPoK4+qwaSCkp8lm3sJlrxk374enW+zCE5ZksXlZzj/9BDJAULJb5QUJ7o9Y8A/G+d8LkdQLPBE2Jaxe5XA==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/env": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.30.tgz",
+ "integrity": "sha512-KBiBKrDY6kxTQWGzKjQB7QirL3PiiOkV7KW98leHFjtVRKtft76Ra5qSA/SL75xT44dp6hOcqiiJ6iievLOYug==",
+ "license": "MIT"
+ },
+ "node_modules/@next/swc-darwin-arm64": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.30.tgz",
+ "integrity": "sha512-EAqfOTb3bTGh9+ewpO/jC59uACadRHM6TSA9DdxJB/6gxOpyV+zrbqeXiFTDy9uV6bmipFDkfpAskeaDcO+7/g==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-darwin-x64": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.30.tgz",
+ "integrity": "sha512-TyO7Wz1IKE2kGv8dwQ0bmPL3s44EKVencOqwIY69myoS3rdpO1NPg5xPM5ymKu7nfX4oYJrpMxv8G9iqLsnL4A==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-arm64-gnu": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.30.tgz",
+ "integrity": "sha512-I5lg1fgPJ7I5dk6mr3qCH1hJYKJu1FsfKSiTKoYwcuUf53HWTrEkwmMI0t5ojFKeA6Vu+SfT2zVy5NS0QLXV4Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-arm64-musl": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.30.tgz",
+ "integrity": "sha512-8GkNA+sLclQyxgzCDs2/2GSwBc92QLMrmYAmoP2xehe5MUKBLB2cgo34Yu242L1siSkwQkiV4YLdCnjwc/Micw==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-x64-gnu": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.30.tgz",
+ "integrity": "sha512-8Ly7okjssLuBoe8qaRCcjGtcMsv79hwzn/63wNeIkzJVFVX06h5S737XNr7DZwlsbTBDOyI6qbL2BJB5n6TV/w==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-x64-musl": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.30.tgz",
+ "integrity": "sha512-dBmV1lLNeX4mR7uI7KNVHsGQU+OgTG5RGFPi3tBJpsKPvOPtg9poyav/BYWrB3GPQL4dW5YGGgalwZ79WukbKQ==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-win32-arm64-msvc": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.30.tgz",
+ "integrity": "sha512-6MMHi2Qc1Gkq+4YLXAgbYslE1f9zMGBikKMdmQRHXjkGPot1JY3n5/Qrbg40Uvbi8//wYnydPnyvNhI1DMUW1g==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-win32-ia32-msvc": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.30.tgz",
+ "integrity": "sha512-pVZMnFok5qEX4RT59mK2hEVtJX+XFfak+/rjHpyFh7juiT52r177bfFKhnlafm0UOSldhXjj32b+LZIOdswGTg==",
+ "cpu": [
+ "ia32"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-win32-x64-msvc": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.30.tgz",
+ "integrity": "sha512-4KCo8hMZXMjpTzs3HOqOGYYwAXymXIy7PEPAXNEcEOyKqkjiDlECumrWziy+JEF0Oi4ILHGxzgQ3YiMGG2t/Lg==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@popperjs/core": {
+ "version": "2.11.8",
+ "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz",
+ "integrity": "sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/popperjs"
+ }
+ },
+ "node_modules/@swc/counter": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz",
+ "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==",
+ "license": "Apache-2.0"
+ },
+ "node_modules/@swc/helpers": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz",
+ "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@swc/counter": "^0.1.3",
+ "tslib": "^2.4.0"
+ }
+ },
+ "node_modules/@tanstack/react-virtual": {
+ "version": "3.13.10",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.13.10.tgz",
+ "integrity": "sha512-nvrzk4E9mWB4124YdJ7/yzwou7IfHxlSef6ugCFcBfRmsnsma3heciiiV97sBNxyc3VuwtZvmwXd0aB5BpucVw==",
+ "license": "MIT",
+ "dependencies": {
+ "@tanstack/virtual-core": "3.13.10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0",
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/@tanstack/virtual-core": {
+ "version": "3.13.10",
+ "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.13.10.tgz",
+ "integrity": "sha512-sPEDhXREou5HyZYqSWIqdU580rsF6FGeN7vpzijmP3KTiOGjOMZASz4Y6+QKjiFQwhWrR58OP8izYaNGVxvViA==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ }
+ },
+ "node_modules/@theguild/remark-mermaid": {
+ "version": "0.0.5",
+ "resolved": "https://registry.npmjs.org/@theguild/remark-mermaid/-/remark-mermaid-0.0.5.tgz",
+ "integrity": "sha512-e+ZIyJkEv9jabI4m7q29wZtZv+2iwPGsXJ2d46Zi7e+QcFudiyuqhLhHG/3gX3ZEB+hxTch+fpItyMS8jwbIcw==",
+ "license": "MIT",
+ "dependencies": {
+ "mermaid": "^10.2.2",
+ "unist-util-visit": "^5.0.0"
+ },
+ "peerDependencies": {
+ "react": "^18.2.0"
+ }
+ },
+ "node_modules/@theguild/remark-npm2yarn": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/@theguild/remark-npm2yarn/-/remark-npm2yarn-0.2.1.tgz",
+ "integrity": "sha512-jUTFWwDxtLEFtGZh/TW/w30ySaDJ8atKWH8dq2/IiQF61dPrGfETpl0WxD0VdBfuLOeU14/kop466oBSRO/5CA==",
+ "license": "MIT",
+ "dependencies": {
+ "npm-to-yarn": "^2.1.0",
+ "unist-util-visit": "^5.0.0"
+ }
+ },
+ "node_modules/@types/acorn": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/@types/acorn/-/acorn-4.0.6.tgz",
+ "integrity": "sha512-veQTnWP+1D/xbxVrPC3zHnCZRjSrKfhbMUlEA43iMZLu7EsnTtkJklIuwrCPbOi8YkvDQAiW05VQQFvvz9oieQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "*"
+ }
+ },
+ "node_modules/@types/d3-scale": {
+ "version": "4.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz",
+ "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-time": "*"
+ }
+ },
+ "node_modules/@types/d3-scale-chromatic": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz",
+ "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-time": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz",
+ "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/debug": {
+ "version": "4.1.12",
+ "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
+ "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/ms": "*"
+ }
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
+ "license": "MIT"
+ },
+ "node_modules/@types/estree-jsx": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz",
+ "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "*"
+ }
+ },
+ "node_modules/@types/hast": {
+ "version": "2.3.10",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz",
+ "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2"
+ }
+ },
+ "node_modules/@types/js-yaml": {
+ "version": "4.0.9",
+ "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-4.0.9.tgz",
+ "integrity": "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/katex": {
+ "version": "0.16.7",
+ "resolved": "https://registry.npmjs.org/@types/katex/-/katex-0.16.7.tgz",
+ "integrity": "sha512-HMwFiRujE5PjrgwHQ25+bsLJgowjGjm5Z8FVSf0N6PwgJrwxH0QxzHYDcKsTfV3wva0vzrpqMTJS2jXPr5BMEQ==",
+ "license": "MIT"
+ },
+ "node_modules/@types/mdast": {
+ "version": "3.0.15",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.15.tgz",
+ "integrity": "sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2"
+ }
+ },
+ "node_modules/@types/mdx": {
+ "version": "2.0.13",
+ "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz",
+ "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/ms": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz",
+ "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/react": {
+ "version": "19.1.8",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.8.tgz",
+ "integrity": "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g==",
+ "license": "MIT",
+ "dependencies": {
+ "csstype": "^3.0.2"
+ }
+ },
+ "node_modules/@types/unist": {
+ "version": "2.0.11",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz",
+ "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==",
+ "license": "MIT"
+ },
+ "node_modules/@ungap/structured-clone": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz",
+ "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==",
+ "license": "ISC"
+ },
+ "node_modules/acorn": {
+ "version": "8.15.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
+ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
+ "license": "MIT",
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/acorn-jsx": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
+ "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
+ "license": "MIT",
+ "peerDependencies": {
+ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/ansi-sequence-parser": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/ansi-sequence-parser/-/ansi-sequence-parser-1.1.3.tgz",
+ "integrity": "sha512-+fksAx9eG3Ab6LDnLs3ZqZa8KVJ/jYnX+D4Qe1azX+LFGFAXqynCQLOdLpNYN/l9e7l6hMWwZbrnctqr6eSQSw==",
+ "license": "MIT"
+ },
+ "node_modules/ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "license": "MIT",
+ "dependencies": {
+ "color-convert": "^1.9.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/arch": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/arch/-/arch-2.2.0.tgz",
+ "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/arg": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/arg/-/arg-1.0.0.tgz",
+ "integrity": "sha512-Wk7TEzl1KqvTGs/uyhmHO/3XLd3t1UeU4IstvPXVzGPM522cTjqjNZ99esCkcL52sjqjo8e8CTBcWhkxvGzoAw==",
+ "license": "MIT"
+ },
+ "node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "license": "MIT",
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/astring": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz",
+ "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==",
+ "license": "MIT",
+ "bin": {
+ "astring": "bin/astring"
+ }
+ },
+ "node_modules/bail": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
+ "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/busboy": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz",
+ "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==",
+ "dependencies": {
+ "streamsearch": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=10.16.0"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001724",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001724.tgz",
+ "integrity": "sha512-WqJo7p0TbHDOythNTqYujmaJTvtYRZrjpP8TCvH6Vb9CYJerJNKamKzIWOM4BkQatWj9H2lYulpdAQNBe7QhNA==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "CC-BY-4.0"
+ },
+ "node_modules/ccount": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz",
+ "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/chalk": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.3.0.tgz",
+ "integrity": "sha512-Az5zJR2CBujap2rqXGaJKaPHyJ0IrUimvYNX+ncCy8PJP4ltOGTrHUIo097ZaL2zMeKYpiCdqDvS6zdrTFok3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^3.1.0",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/chalk/node_modules/escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/character-entities": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz",
+ "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-html4": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz",
+ "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-legacy": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz",
+ "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-reference-invalid": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz",
+ "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/client-only": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz",
+ "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==",
+ "license": "MIT"
+ },
+ "node_modules/clipboardy": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/clipboardy/-/clipboardy-1.2.2.tgz",
+ "integrity": "sha512-16KrBOV7bHmHdxcQiCvfUFYVFyEah4FI8vYT1Fr7CGSA4G+xBWMEfUEQJS1hxeHGtI9ju1Bzs9uXSbj5HZKArw==",
+ "license": "MIT",
+ "dependencies": {
+ "arch": "^2.1.0",
+ "execa": "^0.8.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/clsx": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz",
+ "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "license": "MIT",
+ "dependencies": {
+ "color-name": "1.1.3"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
+ "license": "MIT"
+ },
+ "node_modules/comma-separated-tokens": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz",
+ "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/commander": {
+ "version": "8.3.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz",
+ "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 12"
+ }
+ },
+ "node_modules/compute-scroll-into-view": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-3.1.1.tgz",
+ "integrity": "sha512-VRhuHOLoKYOy4UbilLbUzbYg93XLjv2PncJC50EuTWPA3gaja1UjBsUP/D/9/juV3vQFr6XBEzn9KCAHdUvOHw==",
+ "license": "MIT"
+ },
+ "node_modules/cose-base": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz",
+ "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==",
+ "license": "MIT",
+ "dependencies": {
+ "layout-base": "^1.0.0"
+ }
+ },
+ "node_modules/cross-spawn": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz",
+ "integrity": "sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==",
+ "license": "MIT",
+ "dependencies": {
+ "lru-cache": "^4.0.1",
+ "shebang-command": "^1.2.0",
+ "which": "^1.2.9"
+ }
+ },
+ "node_modules/csstype": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
+ "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==",
+ "license": "MIT"
+ },
+ "node_modules/cytoscape": {
+ "version": "3.32.0",
+ "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.32.0.tgz",
+ "integrity": "sha512-5JHBC9n75kz5851jeklCPmZWcg3hUe6sjqJvyk3+hVqFaKcHwHgxsjeN1yLmggoUc6STbtm9/NQyabQehfjvWQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/cytoscape-cose-bilkent": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz",
+ "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==",
+ "license": "MIT",
+ "dependencies": {
+ "cose-base": "^1.0.0"
+ },
+ "peerDependencies": {
+ "cytoscape": "^3.2.0"
+ }
+ },
+ "node_modules/d3": {
+ "version": "7.9.0",
+ "resolved": "https://registry.npmjs.org/d3/-/d3-7.9.0.tgz",
+ "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-array": "3",
+ "d3-axis": "3",
+ "d3-brush": "3",
+ "d3-chord": "3",
+ "d3-color": "3",
+ "d3-contour": "4",
+ "d3-delaunay": "6",
+ "d3-dispatch": "3",
+ "d3-drag": "3",
+ "d3-dsv": "3",
+ "d3-ease": "3",
+ "d3-fetch": "3",
+ "d3-force": "3",
+ "d3-format": "3",
+ "d3-geo": "3",
+ "d3-hierarchy": "3",
+ "d3-interpolate": "3",
+ "d3-path": "3",
+ "d3-polygon": "3",
+ "d3-quadtree": "3",
+ "d3-random": "3",
+ "d3-scale": "4",
+ "d3-scale-chromatic": "3",
+ "d3-selection": "3",
+ "d3-shape": "3",
+ "d3-time": "3",
+ "d3-time-format": "4",
+ "d3-timer": "3",
+ "d3-transition": "3",
+ "d3-zoom": "3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-array": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz",
+ "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==",
+ "license": "ISC",
+ "dependencies": {
+ "internmap": "1 - 2"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-axis": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz",
+ "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-brush": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz",
+ "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-drag": "2 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-selection": "3",
+ "d3-transition": "3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-chord": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz",
+ "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-path": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-contour": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz",
+ "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-array": "^3.2.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-delaunay": {
+ "version": "6.0.4",
+ "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz",
+ "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==",
+ "license": "ISC",
+ "dependencies": {
+ "delaunator": "5"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dispatch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
+ "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-drag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz",
+ "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-selection": "3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dsv": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz",
+ "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==",
+ "license": "ISC",
+ "dependencies": {
+ "commander": "7",
+ "iconv-lite": "0.6",
+ "rw": "1"
+ },
+ "bin": {
+ "csv2json": "bin/dsv2json.js",
+ "csv2tsv": "bin/dsv2dsv.js",
+ "dsv2dsv": "bin/dsv2dsv.js",
+ "dsv2json": "bin/dsv2json.js",
+ "json2csv": "bin/json2dsv.js",
+ "json2dsv": "bin/json2dsv.js",
+ "json2tsv": "bin/json2dsv.js",
+ "tsv2csv": "bin/dsv2dsv.js",
+ "tsv2json": "bin/dsv2json.js"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dsv/node_modules/commander": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz",
+ "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/d3-ease": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
+ "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-fetch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz",
+ "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dsv": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-force": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz",
+ "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-quadtree": "1 - 3",
+ "d3-timer": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-format": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz",
+ "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-geo": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz",
+ "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-array": "2.5.0 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-hierarchy": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz",
+ "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-path": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz",
+ "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-polygon": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz",
+ "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-quadtree": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz",
+ "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-random": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz",
+ "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-sankey": {
+ "version": "0.12.3",
+ "resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.12.3.tgz",
+ "integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "1 - 2",
+ "d3-shape": "^1.2.0"
+ }
+ },
+ "node_modules/d3-sankey/node_modules/d3-array": {
+ "version": "2.12.1",
+ "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz",
+ "integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "internmap": "^1.0.0"
+ }
+ },
+ "node_modules/d3-sankey/node_modules/d3-path": {
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz",
+ "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/d3-sankey/node_modules/d3-shape": {
+ "version": "1.3.7",
+ "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz",
+ "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-path": "1"
+ }
+ },
+ "node_modules/d3-sankey/node_modules/internmap": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz",
+ "integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==",
+ "license": "ISC"
+ },
+ "node_modules/d3-scale": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
+ "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-array": "2.10.0 - 3",
+ "d3-format": "1 - 3",
+ "d3-interpolate": "1.2.0 - 3",
+ "d3-time": "2.1.1 - 3",
+ "d3-time-format": "2 - 4"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-scale-chromatic": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz",
+ "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3",
+ "d3-interpolate": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-selection": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
+ "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-shape": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz",
+ "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-path": "^3.1.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-time": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz",
+ "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-array": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-time-format": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz",
+ "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-time": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-timer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
+ "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-transition": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz",
+ "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3",
+ "d3-dispatch": "1 - 3",
+ "d3-ease": "1 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-timer": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "peerDependencies": {
+ "d3-selection": "2 - 3"
+ }
+ },
+ "node_modules/d3-zoom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz",
+ "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-drag": "2 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-selection": "2 - 3",
+ "d3-transition": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/dagre-d3-es": {
+ "version": "7.0.10",
+ "resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.10.tgz",
+ "integrity": "sha512-qTCQmEhcynucuaZgY5/+ti3X/rnszKZhEQH/ZdWdtP1tA/y3VoHJzcVrO9pjjJCNpigfscAtoUB5ONcd2wNn0A==",
+ "license": "MIT",
+ "dependencies": {
+ "d3": "^7.8.2",
+ "lodash-es": "^4.17.21"
+ }
+ },
+ "node_modules/dayjs": {
+ "version": "1.11.13",
+ "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.13.tgz",
+ "integrity": "sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==",
+ "license": "MIT"
+ },
+ "node_modules/debug": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz",
+ "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decode-named-character-reference": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz",
+ "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/delaunator": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz",
+ "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==",
+ "license": "ISC",
+ "dependencies": {
+ "robust-predicates": "^3.0.2"
+ }
+ },
+ "node_modules/dequal": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
+ "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/devlop": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz",
+ "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==",
+ "license": "MIT",
+ "dependencies": {
+ "dequal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/diff": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz",
+ "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.3.1"
+ }
+ },
+ "node_modules/dompurify": {
+ "version": "3.1.6",
+ "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.1.6.tgz",
+ "integrity": "sha512-cTOAhc36AalkjtBpfG6O8JimdTMWNXjiePT2xQH/ppBGi/4uIpmj8eKyIkMJErXWARyINV/sB38yf8JCLF5pbQ==",
+ "license": "(MPL-2.0 OR Apache-2.0)"
+ },
+ "node_modules/elkjs": {
+ "version": "0.9.3",
+ "resolved": "https://registry.npmjs.org/elkjs/-/elkjs-0.9.3.tgz",
+ "integrity": "sha512-f/ZeWvW/BCXbhGEf1Ujp29EASo/lk1FDnETgNKwJrsVvGZhUWCZyg3xLJjAsxfOmt8KjswHmI5EwCQcPMpOYhQ==",
+ "license": "EPL-2.0"
+ },
+ "node_modules/entities": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz",
+ "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==",
+ "license": "BSD-2-Clause",
+ "engines": {
+ "node": ">=0.12"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz",
+ "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "license": "BSD-2-Clause",
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/estree-util-attach-comments": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-2.1.1.tgz",
+ "integrity": "sha512-+5Ba/xGGS6mnwFbXIuQiDPTbuTxuMCooq3arVv7gPZtYpjp+VXH/NkHAP35OOefPhNG/UGqU3vt/LTABwcHX0w==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-util-build-jsx": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-2.2.2.tgz",
+ "integrity": "sha512-m56vOXcOBuaF+Igpb9OPAy7f9w9OIkb5yhjsZuaPm7HoGi4oTOQi0h2+yZ+AtKklYFZ+rPC4n0wYCJCEU1ONqg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "estree-util-is-identifier-name": "^2.0.0",
+ "estree-walker": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-util-is-identifier-name": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-2.1.0.tgz",
+ "integrity": "sha512-bEN9VHRyXAUOjkKVQVvArFym08BTWB0aJPppZZr0UNyAqWsLaVfAqP7hbaTJjzHifmB5ebnR8Wm7r7yGN/HonQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-util-to-js": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-1.2.0.tgz",
+ "integrity": "sha512-IzU74r1PK5IMMGZXUVZbmiu4A1uhiPgW5hm1GjcOfr4ZzHaMPpLNJjR7HjXiIOzi25nZDrgFTobHTkV5Q6ITjA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "astring": "^1.8.0",
+ "source-map": "^0.7.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-util-value-to-estree": {
+ "version": "3.4.0",
+ "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.4.0.tgz",
+ "integrity": "sha512-Zlp+gxis+gCfK12d3Srl2PdX2ybsEA8ZYy6vQGVQTNNYLEGRQQ56XB64bjemN8kxIKXP1nC9ip4Z+ILy9LGzvQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/remcohaszing"
+ }
+ },
+ "node_modules/estree-util-visit": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-1.2.1.tgz",
+ "integrity": "sha512-xbgqcrkIVbIG+lI/gzbvd9SGTJL4zqJKBFttUl5pP27KhAjtMKbX/mQXJ7qgyXpMgVy/zvpm0xoQQaGL8OloOw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/unist": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/estree-walker": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
+ "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0"
+ }
+ },
+ "node_modules/execa": {
+ "version": "0.8.0",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-0.8.0.tgz",
+ "integrity": "sha512-zDWS+Rb1E8BlqqhALSt9kUhss8Qq4nN3iof3gsOdyINksElaPyNBtKUMTR62qhvgVWR0CqCX7sdnKe4MnUbFEA==",
+ "license": "MIT",
+ "dependencies": {
+ "cross-spawn": "^5.0.1",
+ "get-stream": "^3.0.0",
+ "is-stream": "^1.1.0",
+ "npm-run-path": "^2.0.0",
+ "p-finally": "^1.0.0",
+ "signal-exit": "^3.0.0",
+ "strip-eof": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+ "license": "MIT"
+ },
+ "node_modules/extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==",
+ "license": "MIT",
+ "dependencies": {
+ "is-extendable": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/flexsearch": {
+ "version": "0.7.43",
+ "resolved": "https://registry.npmjs.org/flexsearch/-/flexsearch-0.7.43.tgz",
+ "integrity": "sha512-c5o/+Um8aqCSOXGcZoqZOm+NqtVwNsvVpWv6lfmSclU954O3wvQKxxK8zj74fPaSJbXpSLTs4PRhh+wnoCXnKg==",
+ "license": "Apache-2.0"
+ },
+ "node_modules/focus-visible": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/focus-visible/-/focus-visible-5.2.1.tgz",
+ "integrity": "sha512-8Bx950VD1bWTQJEH/AM6SpEk+SU55aVnp4Ujhuuxy3eMEBCRwBnTBnVXr9YAPvZL3/CNjCa8u4IWfNmEO53whA==",
+ "license": "W3C"
+ },
+ "node_modules/get-stream": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz",
+ "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/git-up": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/git-up/-/git-up-7.0.0.tgz",
+ "integrity": "sha512-ONdIrbBCFusq1Oy0sC71F5azx8bVkvtZtMJAsv+a6lz5YAmbNnLD6HAB4gptHZVLPR8S2/kVN6Gab7lryq5+lQ==",
+ "license": "MIT",
+ "dependencies": {
+ "is-ssh": "^1.4.0",
+ "parse-url": "^8.1.0"
+ }
+ },
+ "node_modules/git-url-parse": {
+ "version": "13.1.1",
+ "resolved": "https://registry.npmjs.org/git-url-parse/-/git-url-parse-13.1.1.tgz",
+ "integrity": "sha512-PCFJyeSSdtnbfhSNRw9Wk96dDCNx+sogTe4YNXeXSJxt7xz5hvXekuRn9JX7m+Mf4OscCu8h+mtAl3+h5Fo8lQ==",
+ "license": "MIT",
+ "dependencies": {
+ "git-up": "^7.0.0"
+ }
+ },
+ "node_modules/github-slugger": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz",
+ "integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==",
+ "license": "ISC"
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.11",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
+ "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
+ "license": "ISC"
+ },
+ "node_modules/gray-matter": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz",
+ "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==",
+ "license": "MIT",
+ "dependencies": {
+ "js-yaml": "^3.13.1",
+ "kind-of": "^6.0.2",
+ "section-matter": "^1.0.0",
+ "strip-bom-string": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.0"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-2.0.0.tgz",
+ "integrity": "sha512-P+1n3MnwjR/Epg9BBo1KT8qbye2g2Ou4sFumihwt6I4tsUX7jnLcX4BTOSKg/B1ZrIYMN9FcEnG4x5a7NB8Eng==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/hash-obj": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/hash-obj/-/hash-obj-4.0.0.tgz",
+ "integrity": "sha512-FwO1BUVWkyHasWDW4S8o0ssQXjvyghLV2rfVhnN36b2bbcj45eGiuzdn9XOvOpjV3TKQD7Gm2BWNXdE9V4KKYg==",
+ "license": "MIT",
+ "dependencies": {
+ "is-obj": "^3.0.0",
+ "sort-keys": "^5.0.0",
+ "type-fest": "^1.0.2"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/hast-util-from-dom": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/hast-util-from-dom/-/hast-util-from-dom-5.0.1.tgz",
+ "integrity": "sha512-N+LqofjR2zuzTjCPzyDUdSshy4Ma6li7p/c3pA78uTwzFgENbgbUrm2ugwsOdcjI1muO+o6Dgzp9p8WHtn/39Q==",
+ "license": "ISC",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "hastscript": "^9.0.0",
+ "web-namespaces": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-from-dom/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/hast-util-from-html": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/hast-util-from-html/-/hast-util-from-html-2.0.3.tgz",
+ "integrity": "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "devlop": "^1.1.0",
+ "hast-util-from-parse5": "^8.0.0",
+ "parse5": "^7.0.0",
+ "vfile": "^6.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-from-html-isomorphic": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-from-html-isomorphic/-/hast-util-from-html-isomorphic-2.0.0.tgz",
+ "integrity": "sha512-zJfpXq44yff2hmE0XmwEOzdWin5xwH+QIhMLOScpX91e/NSGPsAzNCvLQDIEPyO2TXi+lBmU6hjLIhV8MwP2kw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "hast-util-from-dom": "^5.0.0",
+ "hast-util-from-html": "^2.0.0",
+ "unist-util-remove-position": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-from-html-isomorphic/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/hast-util-from-html/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/hast-util-from-html/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/hast-util-from-html/node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-from-parse5": {
+ "version": "8.0.3",
+ "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.3.tgz",
+ "integrity": "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "devlop": "^1.0.0",
+ "hastscript": "^9.0.0",
+ "property-information": "^7.0.0",
+ "vfile": "^6.0.0",
+ "vfile-location": "^5.0.0",
+ "web-namespaces": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-from-parse5/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/hast-util-from-parse5/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/hast-util-from-parse5/node_modules/property-information": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
+ "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/hast-util-from-parse5/node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-is-element": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-is-element/-/hast-util-is-element-3.0.0.tgz",
+ "integrity": "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-is-element/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/hast-util-parse-selector": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz",
+ "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-parse-selector/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/hast-util-raw": {
+ "version": "9.1.0",
+ "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.1.0.tgz",
+ "integrity": "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "@ungap/structured-clone": "^1.0.0",
+ "hast-util-from-parse5": "^8.0.0",
+ "hast-util-to-parse5": "^8.0.0",
+ "html-void-elements": "^3.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "parse5": "^7.0.0",
+ "unist-util-position": "^5.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0",
+ "web-namespaces": "^2.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-raw/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/hast-util-raw/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/hast-util-raw/node_modules/unist-util-position": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz",
+ "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-raw/node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-estree": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-2.3.3.tgz",
+ "integrity": "sha512-ihhPIUPxN0v0w6M5+IiAZZrn0LH2uZomeWwhn7uP7avZC6TE7lIiEh2yBMPr5+zi1aUCXq6VoYRgs2Bw9xmycQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^2.0.0",
+ "@types/unist": "^2.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "estree-util-attach-comments": "^2.0.0",
+ "estree-util-is-identifier-name": "^2.0.0",
+ "hast-util-whitespace": "^2.0.0",
+ "mdast-util-mdx-expression": "^1.0.0",
+ "mdast-util-mdxjs-esm": "^1.0.0",
+ "property-information": "^6.0.0",
+ "space-separated-tokens": "^2.0.0",
+ "style-to-object": "^0.4.1",
+ "unist-util-position": "^4.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-parse5": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz",
+ "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "devlop": "^1.0.0",
+ "property-information": "^6.0.0",
+ "space-separated-tokens": "^2.0.0",
+ "web-namespaces": "^2.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-parse5/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/hast-util-to-text": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/hast-util-to-text/-/hast-util-to-text-4.0.2.tgz",
+ "integrity": "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "hast-util-is-element": "^3.0.0",
+ "unist-util-find-after": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-text/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/hast-util-to-text/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/hast-util-whitespace": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-2.0.1.tgz",
+ "integrity": "sha512-nAxA0v8+vXSBDt3AnRUNjyRIQ0rD+ntpbAp4LnPkumc5M9yUbSMa4XDU9Q6etY4f1Wp4bNgvc1yjiZtsTTrSng==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hastscript": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz",
+ "integrity": "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "hast-util-parse-selector": "^4.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hastscript/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/hastscript/node_modules/property-information": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
+ "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/html-void-elements": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz",
+ "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "license": "MIT",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/inline-style-parser": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz",
+ "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==",
+ "license": "MIT"
+ },
+ "node_modules/internmap": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz",
+ "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/intersection-observer": {
+ "version": "0.12.2",
+ "resolved": "https://registry.npmjs.org/intersection-observer/-/intersection-observer-0.12.2.tgz",
+ "integrity": "sha512-7m1vEcPCxXYI8HqnL8CKI6siDyD+eIWSwgB3DZA+ZTogxk9I4CDnj4wilt9x/+/QbHI4YG5YZNmC6458/e9Ktg==",
+ "license": "Apache-2.0"
+ },
+ "node_modules/is-alphabetical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz",
+ "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-alphanumerical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz",
+ "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==",
+ "license": "MIT",
+ "dependencies": {
+ "is-alphabetical": "^2.0.0",
+ "is-decimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-buffer": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz",
+ "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/is-decimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz",
+ "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-hexadecimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz",
+ "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-obj": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-3.0.0.tgz",
+ "integrity": "sha512-IlsXEHOjtKhpN8r/tRFj2nDyTmHvcfNeu/nrRIcXE17ROeatXchkojffa1SpdqW4cr/Fj6QkEf/Gn4zf6KKvEQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-plain-obj": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
+ "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-reference": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.3.tgz",
+ "integrity": "sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.6"
+ }
+ },
+ "node_modules/is-ssh": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/is-ssh/-/is-ssh-1.4.1.tgz",
+ "integrity": "sha512-JNeu1wQsHjyHgn9NcWTaXq6zWSR6hqE0++zhfZlkFBbScNkyvxCdeV8sRkSBaeLKxmbpR21brail63ACNxJ0Tg==",
+ "license": "MIT",
+ "dependencies": {
+ "protocols": "^2.0.1"
+ }
+ },
+ "node_modules/is-stream": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz",
+ "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
+ "license": "ISC"
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "license": "MIT"
+ },
+ "node_modules/js-yaml": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
+ "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "license": "MIT",
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsonc-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.3.1.tgz",
+ "integrity": "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==",
+ "license": "MIT"
+ },
+ "node_modules/katex": {
+ "version": "0.16.22",
+ "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.22.tgz",
+ "integrity": "sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==",
+ "funding": [
+ "https://opencollective.com/katex",
+ "https://github.com/sponsors/katex"
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "commander": "^8.3.0"
+ },
+ "bin": {
+ "katex": "cli.js"
+ }
+ },
+ "node_modules/khroma": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz",
+ "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw=="
+ },
+ "node_modules/kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/kleur": {
+ "version": "4.1.5",
+ "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz",
+ "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/layout-base": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz",
+ "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==",
+ "license": "MIT"
+ },
+ "node_modules/lodash-es": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
+ "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==",
+ "license": "MIT"
+ },
+ "node_modules/lodash.get": {
+ "version": "4.4.2",
+ "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz",
+ "integrity": "sha512-z+Uw/vLuy6gQe8cfaFWD7p0wVv8fJl3mbzXh33RS+0oW2wvUqiRXiQ69gLWSLpgB5/6sU+r6BlQR0MBILadqTQ==",
+ "deprecated": "This package is deprecated. Use the optional chaining (?.) operator instead.",
+ "license": "MIT"
+ },
+ "node_modules/longest-streak": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
+ "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ },
+ "bin": {
+ "loose-envify": "cli.js"
+ }
+ },
+ "node_modules/lru-cache": {
+ "version": "4.1.5",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz",
+ "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==",
+ "license": "ISC",
+ "dependencies": {
+ "pseudomap": "^1.0.2",
+ "yallist": "^2.1.2"
+ }
+ },
+ "node_modules/markdown-extensions": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-1.1.1.tgz",
+ "integrity": "sha512-WWC0ZuMzCyDHYCasEGs4IPvLyTGftYwh6wIEOULOF0HXcqZlhwRzrK0w2VUlxWA98xnvb/jszw4ZSkJ6ADpM6Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/markdown-table": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz",
+ "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/match-sorter": {
+ "version": "6.3.4",
+ "resolved": "https://registry.npmjs.org/match-sorter/-/match-sorter-6.3.4.tgz",
+ "integrity": "sha512-jfZW7cWS5y/1xswZo8VBOdudUiSd9nifYRWphc9M5D/ee4w4AoXLgBEdRbgVaxbMuagBPeUC5y2Hi8DO6o9aDg==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/runtime": "^7.23.8",
+ "remove-accents": "0.5.0"
+ }
+ },
+ "node_modules/mdast-util-definitions": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-5.1.2.tgz",
+ "integrity": "sha512-8SVPMuHqlPME/z3gqVwWY4zVXn8lqKv/pAhC57FuJ40ImXyBpmO5ukh98zB2v7Blql2FiHjHv9LVztSIqjY+MA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "@types/unist": "^2.0.0",
+ "unist-util-visit": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-definitions/node_modules/unist-util-visit": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
+ "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^5.1.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-definitions/node_modules/unist-util-visit-parents": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
+ "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-find-and-replace": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-2.2.2.tgz",
+ "integrity": "sha512-MTtdFRz/eMDHXzeK6W3dO7mXUlF82Gom4y0oOgvHhh/HXZAGvIQDUvQ0SuUx+j2tv44b8xTHOm8K/9OoRFnXKw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "escape-string-regexp": "^5.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-find-and-replace/node_modules/unist-util-visit-parents": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
+ "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-from-markdown": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz",
+ "integrity": "sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "@types/unist": "^2.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "mdast-util-to-string": "^3.1.0",
+ "micromark": "^3.0.0",
+ "micromark-util-decode-numeric-character-reference": "^1.0.0",
+ "micromark-util-decode-string": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "unist-util-stringify-position": "^3.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-2.0.2.tgz",
+ "integrity": "sha512-qvZ608nBppZ4icQlhQQIAdc6S3Ffj9RGmzwUKUWuEICFnd1LVkN3EktF7ZHAgfcEdvZB5owU9tQgt99e2TlLjg==",
+ "license": "MIT",
+ "dependencies": {
+ "mdast-util-from-markdown": "^1.0.0",
+ "mdast-util-gfm-autolink-literal": "^1.0.0",
+ "mdast-util-gfm-footnote": "^1.0.0",
+ "mdast-util-gfm-strikethrough": "^1.0.0",
+ "mdast-util-gfm-table": "^1.0.0",
+ "mdast-util-gfm-task-list-item": "^1.0.0",
+ "mdast-util-to-markdown": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-autolink-literal": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-1.0.3.tgz",
+ "integrity": "sha512-My8KJ57FYEy2W2LyNom4n3E7hKTuQk/0SES0u16tjA9Z3oFkF4RrC/hPAPgjlSpezsOvI8ObcXcElo92wn5IGA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "ccount": "^2.0.0",
+ "mdast-util-find-and-replace": "^2.0.0",
+ "micromark-util-character": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-footnote": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-1.0.2.tgz",
+ "integrity": "sha512-56D19KOGbE00uKVj3sgIykpwKL179QsVFwx/DCW0u/0+URsryacI4MAdNJl0dh+u2PSsD9FtxPFbHCzJ78qJFQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.3.0",
+ "micromark-util-normalize-identifier": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-strikethrough": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-1.0.3.tgz",
+ "integrity": "sha512-DAPhYzTYrRcXdMjUtUjKvW9z/FNAMTdU0ORyMcbmkwYNbKocDpdk+PX1L1dQgOID/+vVs1uBQ7ElrBQfZ0cuiQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.3.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-table": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-1.0.7.tgz",
+ "integrity": "sha512-jjcpmNnQvrmN5Vx7y7lEc2iIOEytYv7rTvu+MeyAsSHTASGCCRA79Igg2uKssgOs1i1po8s3plW0sTu1wkkLGg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "markdown-table": "^3.0.0",
+ "mdast-util-from-markdown": "^1.0.0",
+ "mdast-util-to-markdown": "^1.3.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-task-list-item": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-1.0.2.tgz",
+ "integrity": "sha512-PFTA1gzfp1B1UaiJVyhJZA1rm0+Tzn690frc/L8vNX1Jop4STZgOE6bxUhnzdVSB+vm2GU1tIsuQcA9bxTQpMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.3.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-math": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-math/-/mdast-util-math-2.0.2.tgz",
+ "integrity": "sha512-8gmkKVp9v6+Tgjtq6SYx9kGPpTf6FVYRa53/DLh479aldR9AyP48qeVOgNZ5X7QUK7nOy4yw7vg6mbiGcs9jWQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "longest-streak": "^3.0.0",
+ "mdast-util-to-markdown": "^1.3.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-2.0.1.tgz",
+ "integrity": "sha512-38w5y+r8nyKlGvNjSEqWrhG0w5PmnRA+wnBvm+ulYCct7nsGYhFVb0lljS9bQav4psDAS1eGkP2LMVcZBi/aqw==",
+ "license": "MIT",
+ "dependencies": {
+ "mdast-util-from-markdown": "^1.0.0",
+ "mdast-util-mdx-expression": "^1.0.0",
+ "mdast-util-mdx-jsx": "^2.0.0",
+ "mdast-util-mdxjs-esm": "^1.0.0",
+ "mdast-util-to-markdown": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-expression": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-1.3.2.tgz",
+ "integrity": "sha512-xIPmR5ReJDu/DHH1OoIT1HkuybIfRGYRywC+gJtI7qHjCJp/M9jrmBEJW22O8lskDWm562BX2W8TiAwRTb0rKA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^2.0.0",
+ "@types/mdast": "^3.0.0",
+ "mdast-util-from-markdown": "^1.0.0",
+ "mdast-util-to-markdown": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-jsx": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-2.1.4.tgz",
+ "integrity": "sha512-DtMn9CmVhVzZx3f+optVDF8yFgQVt7FghCRNdlIaS3X5Bnym3hZwPbg/XW86vdpKjlc1PVj26SpnLGeJBXD3JA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^2.0.0",
+ "@types/mdast": "^3.0.0",
+ "@types/unist": "^2.0.0",
+ "ccount": "^2.0.0",
+ "mdast-util-from-markdown": "^1.1.0",
+ "mdast-util-to-markdown": "^1.3.0",
+ "parse-entities": "^4.0.0",
+ "stringify-entities": "^4.0.0",
+ "unist-util-remove-position": "^4.0.0",
+ "unist-util-stringify-position": "^3.0.0",
+ "vfile-message": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-jsx/node_modules/unist-util-remove-position": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-4.0.2.tgz",
+ "integrity": "sha512-TkBb0HABNmxzAcfLf4qsIbFbaPDvMO6wa3b3j4VcEzFVaw1LBKwnW4/sRJ/atSLSzoIg41JWEdnE7N6DIhGDGQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-visit": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-jsx/node_modules/unist-util-visit": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
+ "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^5.1.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-jsx/node_modules/unist-util-visit-parents": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
+ "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-jsx/node_modules/vfile-message": {
+ "version": "3.1.4",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz",
+ "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdxjs-esm": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-1.3.1.tgz",
+ "integrity": "sha512-SXqglS0HrEvSdUEfoXFtcg7DRl7S2cwOXc7jkuusG472Mmjag34DUDeOJUZtl+BVnyeO1frIgVpHlNRWc2gk/w==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^2.0.0",
+ "@types/mdast": "^3.0.0",
+ "mdast-util-from-markdown": "^1.0.0",
+ "mdast-util-to-markdown": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-phrasing": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-3.0.1.tgz",
+ "integrity": "sha512-WmI1gTXUBJo4/ZmSk79Wcb2HcjPJBzM1nlI/OUWA8yk2X9ik3ffNbBGsU+09BFmXaL1IBb9fiuvq6/KMiNycSg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "unist-util-is": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-hast": {
+ "version": "13.2.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz",
+ "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@ungap/structured-clone": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "trim-lines": "^3.0.0",
+ "unist-util-position": "^5.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-hast/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/mdast-util-to-hast/node_modules/@types/mdast": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz",
+ "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/mdast-util-to-hast/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/mdast-util-to-hast/node_modules/micromark-util-character": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz",
+ "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/mdast-util-to-hast/node_modules/micromark-util-encode": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz",
+ "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/mdast-util-to-hast/node_modules/micromark-util-sanitize-uri": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz",
+ "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/mdast-util-to-hast/node_modules/micromark-util-symbol": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz",
+ "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/mdast-util-to-hast/node_modules/micromark-util-types": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz",
+ "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/mdast-util-to-hast/node_modules/unist-util-position": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz",
+ "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-hast/node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-markdown": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-1.5.0.tgz",
+ "integrity": "sha512-bbv7TPv/WC49thZPg3jXuqzuvI45IL2EVAr/KxF0BSdHsU0ceFHOmwQn6evxAh1GaoK/6GQ1wp4R4oW2+LFL/A==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "@types/unist": "^2.0.0",
+ "longest-streak": "^3.0.0",
+ "mdast-util-phrasing": "^3.0.0",
+ "mdast-util-to-string": "^3.0.0",
+ "micromark-util-decode-string": "^1.0.0",
+ "unist-util-visit": "^4.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-markdown/node_modules/unist-util-visit": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
+ "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^5.1.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-markdown/node_modules/unist-util-visit-parents": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
+ "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-string": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz",
+ "integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mermaid": {
+ "version": "10.9.3",
+ "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.9.3.tgz",
+ "integrity": "sha512-V80X1isSEvAewIL3xhmz/rVmc27CVljcsbWxkxlWJWY/1kQa4XOABqpDl2qQLGKzpKm6WbTfUEKImBlUfFYArw==",
+ "license": "MIT",
+ "dependencies": {
+ "@braintree/sanitize-url": "^6.0.1",
+ "@types/d3-scale": "^4.0.3",
+ "@types/d3-scale-chromatic": "^3.0.0",
+ "cytoscape": "^3.28.1",
+ "cytoscape-cose-bilkent": "^4.1.0",
+ "d3": "^7.4.0",
+ "d3-sankey": "^0.12.3",
+ "dagre-d3-es": "7.0.10",
+ "dayjs": "^1.11.7",
+ "dompurify": "^3.0.5 <3.1.7",
+ "elkjs": "^0.9.0",
+ "katex": "^0.16.9",
+ "khroma": "^2.0.0",
+ "lodash-es": "^4.17.21",
+ "mdast-util-from-markdown": "^1.3.0",
+ "non-layered-tidy-tree-layout": "^2.0.2",
+ "stylis": "^4.1.3",
+ "ts-dedent": "^2.2.0",
+ "uuid": "^9.0.0",
+ "web-worker": "^1.2.0"
+ }
+ },
+ "node_modules/micromark": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/micromark/-/micromark-3.2.0.tgz",
+ "integrity": "sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "@types/debug": "^4.0.0",
+ "debug": "^4.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-core-commonmark": "^1.0.1",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-combine-extensions": "^1.0.0",
+ "micromark-util-decode-numeric-character-reference": "^1.0.0",
+ "micromark-util-encode": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-resolve-all": "^1.0.0",
+ "micromark-util-sanitize-uri": "^1.0.0",
+ "micromark-util-subtokenize": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.1",
+ "uvu": "^0.5.0"
+ }
+ },
+ "node_modules/micromark-core-commonmark": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-1.1.0.tgz",
+ "integrity": "sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-factory-destination": "^1.0.0",
+ "micromark-factory-label": "^1.0.0",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-factory-title": "^1.0.0",
+ "micromark-factory-whitespace": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-classify-character": "^1.0.0",
+ "micromark-util-html-tag-name": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-resolve-all": "^1.0.0",
+ "micromark-util-subtokenize": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.1",
+ "uvu": "^0.5.0"
+ }
+ },
+ "node_modules/micromark-extension-gfm": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-2.0.3.tgz",
+ "integrity": "sha512-vb9OoHqrhCmbRidQv/2+Bc6pkP0FrtlhurxZofvOEy5o8RtuuvTq+RQ1Vw5ZDNrVraQZu3HixESqbG+0iKk/MQ==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-extension-gfm-autolink-literal": "^1.0.0",
+ "micromark-extension-gfm-footnote": "^1.0.0",
+ "micromark-extension-gfm-strikethrough": "^1.0.0",
+ "micromark-extension-gfm-table": "^1.0.0",
+ "micromark-extension-gfm-tagfilter": "^1.0.0",
+ "micromark-extension-gfm-task-list-item": "^1.0.0",
+ "micromark-util-combine-extensions": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-autolink-literal": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-1.0.5.tgz",
+ "integrity": "sha512-z3wJSLrDf8kRDOh2qBtoTRD53vJ+CWIyo7uyZuxf/JAbNJjiHsOpG1y5wxk8drtv3ETAHutCu6N3thkOOgueWg==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-sanitize-uri": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-footnote": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-1.1.2.tgz",
+ "integrity": "sha512-Yxn7z7SxgyGWRNa4wzf8AhYYWNrwl5q1Z8ii+CSTTIqVkmGZF1CElX2JI8g5yGoM3GAman9/PVCUFUSJ0kB/8Q==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-core-commonmark": "^1.0.0",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-sanitize-uri": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-strikethrough": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-1.0.7.tgz",
+ "integrity": "sha512-sX0FawVE1o3abGk3vRjOH50L5TTLr3b5XMqnP9YDRb34M0v5OoZhG+OHFz1OffZ9dlwgpTBKaT4XW/AsUVnSDw==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-classify-character": "^1.0.0",
+ "micromark-util-resolve-all": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-table": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-1.0.7.tgz",
+ "integrity": "sha512-3ZORTHtcSnMQEKtAOsBQ9/oHp9096pI/UvdPtN7ehKvrmZZ2+bbWhi0ln+I9drmwXMt5boocn6OlwQzNXeVeqw==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-tagfilter": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-1.0.2.tgz",
+ "integrity": "sha512-5XWB9GbAUSHTn8VPU8/1DBXMuKYT5uOgEjJb8gN3mW0PNW5OPHpSdojoqf+iq1xo7vWzw/P8bAHY0n6ijpXF7g==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-types": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-task-list-item": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-1.0.5.tgz",
+ "integrity": "sha512-RMFXl2uQ0pNQy6Lun2YBYT9g9INXtWJULgbt01D/x8/6yJ2qpKyzdZD3pi6UIkzF++Da49xAelVKUeUMqd5eIQ==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-math": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/micromark-extension-math/-/micromark-extension-math-2.1.2.tgz",
+ "integrity": "sha512-es0CcOV89VNS9wFmyn+wyFTKweXGW4CEvdaAca6SWRWPyYCbBisnjaHLjWO4Nszuiud84jCpkHsqAJoa768Pvg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/katex": "^0.16.0",
+ "katex": "^0.16.0",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-mdx-expression": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-1.0.8.tgz",
+ "integrity": "sha512-zZpeQtc5wfWKdzDsHRBY003H2Smg+PUi2REhqgIhdzAa5xonhP03FcXxqFSerFiNUr5AWmHpaNPQTBVOS4lrXw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "micromark-factory-mdx-expression": "^1.0.0",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-events-to-acorn": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "node_modules/micromark-extension-mdx-jsx": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-1.0.5.tgz",
+ "integrity": "sha512-gPH+9ZdmDflbu19Xkb8+gheqEDqkSpdCEubQyxuz/Hn8DOXiXvrXeikOoBA71+e8Pfi0/UYmU3wW3H58kr7akA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/acorn": "^4.0.0",
+ "@types/estree": "^1.0.0",
+ "estree-util-is-identifier-name": "^2.0.0",
+ "micromark-factory-mdx-expression": "^1.0.0",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0",
+ "vfile-message": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-mdx-jsx/node_modules/vfile-message": {
+ "version": "3.1.4",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz",
+ "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-mdx-md": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-1.0.1.tgz",
+ "integrity": "sha512-7MSuj2S7xjOQXAjjkbjBsHkMtb+mDGVW6uI2dBL9snOBCbZmoNgDAeZ0nSn9j3T42UE/g2xVNMn18PJxZvkBEA==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-types": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-mdxjs": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-1.0.1.tgz",
+ "integrity": "sha512-7YA7hF6i5eKOfFUzZ+0z6avRG52GpWR8DL+kN47y3f2KhxbBZMhmxe7auOeaTBrW2DenbbZTf1ea9tA2hDpC2Q==",
+ "license": "MIT",
+ "dependencies": {
+ "acorn": "^8.0.0",
+ "acorn-jsx": "^5.0.0",
+ "micromark-extension-mdx-expression": "^1.0.0",
+ "micromark-extension-mdx-jsx": "^1.0.0",
+ "micromark-extension-mdx-md": "^1.0.0",
+ "micromark-extension-mdxjs-esm": "^1.0.0",
+ "micromark-util-combine-extensions": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-mdxjs-esm": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-1.0.5.tgz",
+ "integrity": "sha512-xNRBw4aoURcyz/S69B19WnZAkWJMxHMT5hE36GtDAyhoyn/8TuAeqjFJQlwk+MKQsUD7b3l7kFX+vlfVWgcX1w==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "micromark-core-commonmark": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-events-to-acorn": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "unist-util-position-from-estree": "^1.1.0",
+ "uvu": "^0.5.0",
+ "vfile-message": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-mdxjs-esm/node_modules/vfile-message": {
+ "version": "3.1.4",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz",
+ "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-factory-destination": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-1.1.0.tgz",
+ "integrity": "sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-factory-label": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-1.1.0.tgz",
+ "integrity": "sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "node_modules/micromark-factory-mdx-expression": {
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-1.0.9.tgz",
+ "integrity": "sha512-jGIWzSmNfdnkJq05c7b0+Wv0Kfz3NJ3N4cBjnbO4zjXIlxJr+f8lk+5ZmwFvqdAbUy2q6B5rCY//g0QAAaXDWA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-events-to-acorn": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "unist-util-position-from-estree": "^1.0.0",
+ "uvu": "^0.5.0",
+ "vfile-message": "^3.0.0"
+ }
+ },
+ "node_modules/micromark-factory-mdx-expression/node_modules/vfile-message": {
+ "version": "3.1.4",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz",
+ "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-factory-space": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz",
+ "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-factory-title": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-1.1.0.tgz",
+ "integrity": "sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-factory-whitespace": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-1.1.0.tgz",
+ "integrity": "sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-character": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz",
+ "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-chunked": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-1.1.0.tgz",
+ "integrity": "sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-classify-character": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-1.1.0.tgz",
+ "integrity": "sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-combine-extensions": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.1.0.tgz",
+ "integrity": "sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-numeric-character-reference": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.1.0.tgz",
+ "integrity": "sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-string": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-1.1.0.tgz",
+ "integrity": "sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-decode-numeric-character-reference": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-encode": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-1.1.0.tgz",
+ "integrity": "sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-events-to-acorn": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-1.2.3.tgz",
+ "integrity": "sha512-ij4X7Wuc4fED6UoLWkmo0xJQhsktfNh1J0m8g4PbIMPlx+ek/4YdW5mvbye8z/aZvAPUoxgXHrwVlXAPKMRp1w==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "@types/acorn": "^4.0.0",
+ "@types/estree": "^1.0.0",
+ "@types/unist": "^2.0.0",
+ "estree-util-visit": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0",
+ "vfile-message": "^3.0.0"
+ }
+ },
+ "node_modules/micromark-util-events-to-acorn/node_modules/vfile-message": {
+ "version": "3.1.4",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz",
+ "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-util-html-tag-name": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.2.0.tgz",
+ "integrity": "sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-normalize-identifier": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.1.0.tgz",
+ "integrity": "sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-resolve-all": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-1.1.0.tgz",
+ "integrity": "sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-sanitize-uri": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz",
+ "integrity": "sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-encode": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-subtokenize": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-1.1.0.tgz",
+ "integrity": "sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "node_modules/micromark-util-symbol": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz",
+ "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-types": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz",
+ "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/mri": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz",
+ "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.11",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/next": {
+ "version": "14.2.30",
+ "resolved": "https://registry.npmjs.org/next/-/next-14.2.30.tgz",
+ "integrity": "sha512-+COdu6HQrHHFQ1S/8BBsCag61jZacmvbuL2avHvQFbWa2Ox7bE+d8FyNgxRLjXQ5wtPyQwEmk85js/AuaG2Sbg==",
+ "license": "MIT",
+ "dependencies": {
+ "@next/env": "14.2.30",
+ "@swc/helpers": "0.5.5",
+ "busboy": "1.6.0",
+ "caniuse-lite": "^1.0.30001579",
+ "graceful-fs": "^4.2.11",
+ "postcss": "8.4.31",
+ "styled-jsx": "5.1.1"
+ },
+ "bin": {
+ "next": "dist/bin/next"
+ },
+ "engines": {
+ "node": ">=18.17.0"
+ },
+ "optionalDependencies": {
+ "@next/swc-darwin-arm64": "14.2.30",
+ "@next/swc-darwin-x64": "14.2.30",
+ "@next/swc-linux-arm64-gnu": "14.2.30",
+ "@next/swc-linux-arm64-musl": "14.2.30",
+ "@next/swc-linux-x64-gnu": "14.2.30",
+ "@next/swc-linux-x64-musl": "14.2.30",
+ "@next/swc-win32-arm64-msvc": "14.2.30",
+ "@next/swc-win32-ia32-msvc": "14.2.30",
+ "@next/swc-win32-x64-msvc": "14.2.30"
+ },
+ "peerDependencies": {
+ "@opentelemetry/api": "^1.1.0",
+ "@playwright/test": "^1.41.2",
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "sass": "^1.3.0"
+ },
+ "peerDependenciesMeta": {
+ "@opentelemetry/api": {
+ "optional": true
+ },
+ "@playwright/test": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/next-mdx-remote": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/next-mdx-remote/-/next-mdx-remote-4.4.1.tgz",
+ "integrity": "sha512-1BvyXaIou6xy3XoNF4yaMZUCb6vD2GTAa5ciOa6WoO+gAUTYsb1K4rI/HSC2ogAWLrb/7VSV52skz07vOzmqIQ==",
+ "license": "MPL-2.0",
+ "dependencies": {
+ "@mdx-js/mdx": "^2.2.1",
+ "@mdx-js/react": "^2.2.1",
+ "vfile": "^5.3.0",
+ "vfile-matter": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=14",
+ "npm": ">=7"
+ },
+ "peerDependencies": {
+ "react": ">=16.x <=18.x",
+ "react-dom": ">=16.x <=18.x"
+ }
+ },
+ "node_modules/next-seo": {
+ "version": "6.8.0",
+ "resolved": "https://registry.npmjs.org/next-seo/-/next-seo-6.8.0.tgz",
+ "integrity": "sha512-zcxaV67PFXCSf8e6SXxbxPaOTgc8St/esxfsYXfQXMM24UESUVSXFm7f2A9HMkAwa0Gqn4s64HxYZAGfdF4Vhg==",
+ "license": "MIT",
+ "peerDependencies": {
+ "next": "^8.1.1-canary.54 || >=9.0.0",
+ "react": ">=16.0.0",
+ "react-dom": ">=16.0.0"
+ }
+ },
+ "node_modules/next-themes": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/next-themes/-/next-themes-0.2.1.tgz",
+ "integrity": "sha512-B+AKNfYNIzh0vqQQKqQItTS8evEouKD7H5Hj3kmuPERwddR2TxvDSFZuTj6T7Jfn1oyeUyJMydPl1Bkxkh0W7A==",
+ "license": "MIT",
+ "peerDependencies": {
+ "next": "*",
+ "react": "*",
+ "react-dom": "*"
+ }
+ },
+ "node_modules/nextra": {
+ "version": "2.13.4",
+ "resolved": "https://registry.npmjs.org/nextra/-/nextra-2.13.4.tgz",
+ "integrity": "sha512-7of2rSBxuUa3+lbMmZwG9cqgftcoNOVQLTT6Rxf3EhBR9t1EI7b43dted8YoqSNaigdE3j1CoyNkX8N/ZzlEpw==",
+ "license": "MIT",
+ "dependencies": {
+ "@headlessui/react": "^1.7.17",
+ "@mdx-js/mdx": "^2.3.0",
+ "@mdx-js/react": "^2.3.0",
+ "@napi-rs/simple-git": "^0.1.9",
+ "@theguild/remark-mermaid": "^0.0.5",
+ "@theguild/remark-npm2yarn": "^0.2.0",
+ "clsx": "^2.0.0",
+ "github-slugger": "^2.0.0",
+ "graceful-fs": "^4.2.11",
+ "gray-matter": "^4.0.3",
+ "katex": "^0.16.9",
+ "lodash.get": "^4.4.2",
+ "next-mdx-remote": "^4.2.1",
+ "p-limit": "^3.1.0",
+ "rehype-katex": "^7.0.0",
+ "rehype-pretty-code": "0.9.11",
+ "rehype-raw": "^7.0.0",
+ "remark-gfm": "^3.0.1",
+ "remark-math": "^5.1.1",
+ "remark-reading-time": "^2.0.1",
+ "shiki": "^0.14.3",
+ "slash": "^3.0.0",
+ "title": "^3.5.3",
+ "unist-util-remove": "^4.0.0",
+ "unist-util-visit": "^5.0.0",
+ "zod": "^3.22.3"
+ },
+ "engines": {
+ "node": ">=16"
+ },
+ "peerDependencies": {
+ "next": ">=9.5.3",
+ "react": ">=16.13.1",
+ "react-dom": ">=16.13.1"
+ }
+ },
+ "node_modules/nextra-theme-docs": {
+ "version": "2.13.4",
+ "resolved": "https://registry.npmjs.org/nextra-theme-docs/-/nextra-theme-docs-2.13.4.tgz",
+ "integrity": "sha512-2XOoMfwBCTYBt8ds4ZHftt9Wyf2XsykiNo02eir/XEYB+sGeUoE77kzqfidjEOKCSzOHYbK9BDMcg2+B/2vYRw==",
+ "license": "MIT",
+ "dependencies": {
+ "@headlessui/react": "^1.7.17",
+ "@popperjs/core": "^2.11.8",
+ "clsx": "^2.0.0",
+ "escape-string-regexp": "^5.0.0",
+ "flexsearch": "^0.7.31",
+ "focus-visible": "^5.2.0",
+ "git-url-parse": "^13.1.0",
+ "intersection-observer": "^0.12.2",
+ "match-sorter": "^6.3.1",
+ "next-seo": "^6.0.0",
+ "next-themes": "^0.2.1",
+ "scroll-into-view-if-needed": "^3.1.0",
+ "zod": "^3.22.3"
+ },
+ "peerDependencies": {
+ "next": ">=9.5.3",
+ "nextra": "2.13.4",
+ "react": ">=16.13.1",
+ "react-dom": ">=16.13.1"
+ }
+ },
+ "node_modules/non-layered-tidy-tree-layout": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/non-layered-tidy-tree-layout/-/non-layered-tidy-tree-layout-2.0.2.tgz",
+ "integrity": "sha512-gkXMxRzUH+PB0ax9dUN0yYF0S25BqeAYqhgMaLUFmpXLEk7Fcu8f4emJuOAY0V8kjDICxROIKsTAKsV/v355xw==",
+ "license": "MIT"
+ },
+ "node_modules/npm-run-path": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz",
+ "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==",
+ "license": "MIT",
+ "dependencies": {
+ "path-key": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/npm-to-yarn": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/npm-to-yarn/-/npm-to-yarn-2.2.1.tgz",
+ "integrity": "sha512-O/j/ROyX0KGLG7O6Ieut/seQ0oiTpHF2tXAcFbpdTLQFiaNtkyTXXocM1fwpaa60dg1qpWj0nHlbNhx6qwuENQ==",
+ "license": "MIT",
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/nebrelbug/npm-to-yarn?sponsor=1"
+ }
+ },
+ "node_modules/object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+ "license": "MIT",
+ "peer": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/p-finally": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
+ "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/p-limit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+ "license": "MIT",
+ "dependencies": {
+ "yocto-queue": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/parse-entities": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz",
+ "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "character-entities-legacy": "^3.0.0",
+ "character-reference-invalid": "^2.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "is-alphanumerical": "^2.0.0",
+ "is-decimal": "^2.0.0",
+ "is-hexadecimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/parse-numeric-range": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz",
+ "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==",
+ "license": "ISC"
+ },
+ "node_modules/parse-path": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/parse-path/-/parse-path-7.1.0.tgz",
+ "integrity": "sha512-EuCycjZtfPcjWk7KTksnJ5xPMvWGA/6i4zrLYhRG0hGvC3GPU/jGUj3Cy+ZR0v30duV3e23R95T1lE2+lsndSw==",
+ "license": "MIT",
+ "dependencies": {
+ "protocols": "^2.0.0"
+ }
+ },
+ "node_modules/parse-url": {
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/parse-url/-/parse-url-8.1.0.tgz",
+ "integrity": "sha512-xDvOoLU5XRrcOZvnI6b8zA6n9O9ejNk/GExuz1yBuWUGn9KA97GI6HTs6u02wKara1CeVmZhH+0TZFdWScR89w==",
+ "license": "MIT",
+ "dependencies": {
+ "parse-path": "^7.0.0"
+ }
+ },
+ "node_modules/parse5": {
+ "version": "7.3.0",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz",
+ "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==",
+ "license": "MIT",
+ "dependencies": {
+ "entities": "^6.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/inikulin/parse5?sponsor=1"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz",
+ "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/periscopic": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz",
+ "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "estree-walker": "^3.0.0",
+ "is-reference": "^3.0.0"
+ }
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "license": "ISC"
+ },
+ "node_modules/postcss": {
+ "version": "8.4.31",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
+ "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "nanoid": "^3.3.6",
+ "picocolors": "^1.0.0",
+ "source-map-js": "^1.0.2"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/prop-types": {
+ "version": "15.8.1",
+ "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
+ "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "loose-envify": "^1.4.0",
+ "object-assign": "^4.1.1",
+ "react-is": "^16.13.1"
+ }
+ },
+ "node_modules/property-information": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz",
+ "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/protocols": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/protocols/-/protocols-2.0.2.tgz",
+ "integrity": "sha512-hHVTzba3wboROl0/aWRRG9dMytgH6ow//STBZh43l/wQgmMhYhOFi0EHWAPtoCz9IAUymsyP0TSBHkhgMEGNnQ==",
+ "license": "MIT"
+ },
+ "node_modules/pseudomap": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz",
+ "integrity": "sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==",
+ "license": "ISC"
+ },
+ "node_modules/react": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz",
+ "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-dom": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz",
+ "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0",
+ "scheduler": "^0.23.2"
+ },
+ "peerDependencies": {
+ "react": "^18.3.1"
+ }
+ },
+ "node_modules/react-is": {
+ "version": "16.13.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
+ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==",
+ "license": "MIT",
+ "peer": true
+ },
+ "node_modules/react-katex": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/react-katex/-/react-katex-3.1.0.tgz",
+ "integrity": "sha512-At9uLOkC75gwn2N+ZXc5HD8TlATsB+3Hkp9OGs6uA8tM3dwZ3Wljn74Bk3JyHFPgSnesY/EMrIAB1WJwqZqejA==",
+ "license": "MIT",
+ "dependencies": {
+ "katex": "^0.16.0"
+ },
+ "peerDependencies": {
+ "prop-types": "^15.8.1",
+ "react": ">=15.3.2 <20"
+ }
+ },
+ "node_modules/reading-time": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz",
+ "integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==",
+ "license": "MIT"
+ },
+ "node_modules/rehype-katex": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/rehype-katex/-/rehype-katex-7.0.1.tgz",
+ "integrity": "sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/katex": "^0.16.0",
+ "hast-util-from-html-isomorphic": "^2.0.0",
+ "hast-util-to-text": "^4.0.0",
+ "katex": "^0.16.0",
+ "unist-util-visit-parents": "^6.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/rehype-katex/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/rehype-katex/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/rehype-katex/node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/rehype-pretty-code": {
+ "version": "0.9.11",
+ "resolved": "https://registry.npmjs.org/rehype-pretty-code/-/rehype-pretty-code-0.9.11.tgz",
+ "integrity": "sha512-Eq90eCYXQJISktfRZ8PPtwc5SUyH6fJcxS8XOMnHPUQZBtC6RYo67gGlley9X2nR8vlniPj0/7oCDEYHKQa/oA==",
+ "license": "MIT",
+ "workspaces": [
+ "./word-highlighter-playground"
+ ],
+ "dependencies": {
+ "@types/hast": "^2.0.0",
+ "hash-obj": "^4.0.0",
+ "parse-numeric-range": "^1.3.0"
+ },
+ "engines": {
+ "node": ">=16"
+ },
+ "peerDependencies": {
+ "shiki": "*"
+ }
+ },
+ "node_modules/rehype-raw": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz",
+ "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "hast-util-raw": "^9.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/rehype-raw/node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/rehype-raw/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/rehype-raw/node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-gfm": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-3.0.1.tgz",
+ "integrity": "sha512-lEFDoi2PICJyNrACFOfDD3JlLkuSbOa5Wd8EPt06HUdptv8Gn0bxYTdbU/XXQ3swAPkEaGxxPN9cbnMHvVu1Ig==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-gfm": "^2.0.0",
+ "micromark-extension-gfm": "^2.0.0",
+ "unified": "^10.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-math": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/remark-math/-/remark-math-5.1.1.tgz",
+ "integrity": "sha512-cE5T2R/xLVtfFI4cCePtiRn+e6jKMtFDR3P8V3qpv8wpKjwvHoBA4eJzvX+nVrnlNy0911bdGmuspCSwetfYHw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-math": "^2.0.0",
+ "micromark-extension-math": "^2.0.0",
+ "unified": "^10.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-mdx": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-2.3.0.tgz",
+ "integrity": "sha512-g53hMkpM0I98MU266IzDFMrTD980gNF3BJnkyFcmN+dD873mQeD5rdMO3Y2X+x8umQfbSE0PcoEDl7ledSA+2g==",
+ "license": "MIT",
+ "dependencies": {
+ "mdast-util-mdx": "^2.0.0",
+ "micromark-extension-mdxjs": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-parse": {
+ "version": "10.0.2",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-10.0.2.tgz",
+ "integrity": "sha512-3ydxgHa/ZQzG8LvC7jTXccARYDcRld3VfcgIIFs7bI6vbRSxJJmzgLEIIoYKyrfhaY+ujuWaf/PJiMZXoiCXgw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-from-markdown": "^1.0.0",
+ "unified": "^10.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-reading-time": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/remark-reading-time/-/remark-reading-time-2.0.2.tgz",
+ "integrity": "sha512-ILjIuR0dQQ8pELPgaFvz7ralcSN62rD/L1pTUJgWb4gfua3ZwYEI8mnKGxEQCbrXSUF/OvycTkcUbifGOtOn5A==",
+ "license": "ISC",
+ "dependencies": {
+ "estree-util-is-identifier-name": "^2.0.0",
+ "estree-util-value-to-estree": "^3.3.3",
+ "reading-time": "^1.3.0",
+ "unist-util-visit": "^3.1.0"
+ }
+ },
+ "node_modules/remark-reading-time/node_modules/unist-util-visit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-3.1.0.tgz",
+ "integrity": "sha512-Szoh+R/Ll68QWAyQyZZpQzZQm2UPbxibDvaY8Xc9SUtYgPsDzx5AWSk++UUt2hJuow8mvwR+rG+LQLw+KsuAKA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-reading-time/node_modules/unist-util-visit-parents": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-4.1.1.tgz",
+ "integrity": "sha512-1xAFJXAKpnnJl8G7K5KgU7FY55y3GcLIXqkzUj5QF/QVP7biUm0K0O2oqVkYsdjzJKifYeWn9+o6piAK2hGSHw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-rehype": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-10.1.0.tgz",
+ "integrity": "sha512-EFmR5zppdBp0WQeDVZ/b66CWJipB2q2VLNFMabzDSGR66Z2fQii83G5gTBbgGEnEEA0QRussvrFHxk1HWGJskw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^2.0.0",
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-hast": "^12.1.0",
+ "unified": "^10.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-rehype/node_modules/mdast-util-to-hast": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-12.3.0.tgz",
+ "integrity": "sha512-pits93r8PhnIoU4Vy9bjW39M2jJ6/tdHyja9rrot9uujkN7UTU9SDnE6WNJz/IGyQk3XHX6yNNtrBH6cQzm8Hw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^2.0.0",
+ "@types/mdast": "^3.0.0",
+ "mdast-util-definitions": "^5.0.0",
+ "micromark-util-sanitize-uri": "^1.1.0",
+ "trim-lines": "^3.0.0",
+ "unist-util-generated": "^2.0.0",
+ "unist-util-position": "^4.0.0",
+ "unist-util-visit": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-rehype/node_modules/unist-util-visit": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
+ "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^5.1.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-rehype/node_modules/unist-util-visit-parents": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
+ "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remove-accents": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/remove-accents/-/remove-accents-0.5.0.tgz",
+ "integrity": "sha512-8g3/Otx1eJaVD12e31UbJj1YzdtVvzH85HV7t+9MJYk/u3XmkOUJ5Ys9wQrf9PCPK8+xn4ymzqYCiZl6QWKn+A==",
+ "license": "MIT"
+ },
+ "node_modules/robust-predicates": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz",
+ "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==",
+ "license": "Unlicense"
+ },
+ "node_modules/rw": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz",
+ "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/sade": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz",
+ "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==",
+ "license": "MIT",
+ "dependencies": {
+ "mri": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "license": "MIT"
+ },
+ "node_modules/scheduler": {
+ "version": "0.23.2",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz",
+ "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ }
+ },
+ "node_modules/scroll-into-view-if-needed": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/scroll-into-view-if-needed/-/scroll-into-view-if-needed-3.1.0.tgz",
+ "integrity": "sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ==",
+ "license": "MIT",
+ "dependencies": {
+ "compute-scroll-into-view": "^3.0.2"
+ }
+ },
+ "node_modules/section-matter": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz",
+ "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==",
+ "license": "MIT",
+ "dependencies": {
+ "extend-shallow": "^2.0.1",
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/shebang-command": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz",
+ "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==",
+ "license": "MIT",
+ "dependencies": {
+ "shebang-regex": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz",
+ "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/shiki": {
+ "version": "0.14.7",
+ "resolved": "https://registry.npmjs.org/shiki/-/shiki-0.14.7.tgz",
+ "integrity": "sha512-dNPAPrxSc87ua2sKJ3H5dQ/6ZaY8RNnaAqK+t0eG7p0Soi2ydiqbGOTaZCqaYvA/uZYfS1LJnemt3Q+mSfcPCg==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-sequence-parser": "^1.1.0",
+ "jsonc-parser": "^3.2.0",
+ "vscode-oniguruma": "^1.7.0",
+ "vscode-textmate": "^8.0.0"
+ }
+ },
+ "node_modules/signal-exit": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
+ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
+ "license": "ISC"
+ },
+ "node_modules/slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/sort-keys": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-5.1.0.tgz",
+ "integrity": "sha512-aSbHV0DaBcr7u0PVHXzM6NbZNAtrr9sF6+Qfs9UUVG7Ll3jQ6hHi8F/xqIIcn2rvIVbr0v/2zyjSdwSV47AgLQ==",
+ "license": "MIT",
+ "dependencies": {
+ "is-plain-obj": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/source-map": {
+ "version": "0.7.4",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz",
+ "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/space-separated-tokens": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz",
+ "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/streamsearch": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz",
+ "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==",
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/stringify-entities": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz",
+ "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities-html4": "^2.0.0",
+ "character-entities-legacy": "^3.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/strip-bom-string": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz",
+ "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/strip-eof": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz",
+ "integrity": "sha512-7FCwGGmx8mD5xQd3RPUvnSpUXHM3BWuzjtpD4TXsfcZ9EL4azvVVUscFYwD9nx8Kh+uCBC00XBtAykoMHwTh8Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/style-to-object": {
+ "version": "0.4.4",
+ "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.4.tgz",
+ "integrity": "sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg==",
+ "license": "MIT",
+ "dependencies": {
+ "inline-style-parser": "0.1.1"
+ }
+ },
+ "node_modules/styled-jsx": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz",
+ "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==",
+ "license": "MIT",
+ "dependencies": {
+ "client-only": "0.0.1"
+ },
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "peerDependencies": {
+ "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0"
+ },
+ "peerDependenciesMeta": {
+ "@babel/core": {
+ "optional": true
+ },
+ "babel-plugin-macros": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/stylis": {
+ "version": "4.3.6",
+ "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz",
+ "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==",
+ "license": "MIT"
+ },
+ "node_modules/supports-color": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-4.5.0.tgz",
+ "integrity": "sha512-ycQR/UbvI9xIlEdQT1TQqwoXtEldExbCEAJgRo5YXlmSKjv6ThHnP9/vwGa1gr19Gfw+LkFd7KqYMhzrRC5JYw==",
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/title": {
+ "version": "3.5.3",
+ "resolved": "https://registry.npmjs.org/title/-/title-3.5.3.tgz",
+ "integrity": "sha512-20JyowYglSEeCvZv3EZ0nZ046vLarO37prvV0mbtQV7C8DJPGgN967r8SJkqd3XK3K3lD3/Iyfp3avjfil8Q2Q==",
+ "license": "MIT",
+ "dependencies": {
+ "arg": "1.0.0",
+ "chalk": "2.3.0",
+ "clipboardy": "1.2.2",
+ "titleize": "1.0.0"
+ },
+ "bin": {
+ "title": "bin/title.js"
+ }
+ },
+ "node_modules/titleize": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/titleize/-/titleize-1.0.0.tgz",
+ "integrity": "sha512-TARUb7z1pGvlLxgPk++7wJ6aycXF3GJ0sNSBTAsTuJrQG5QuZlkUQP+zl+nbjAh4gMX9yDw9ZYklMd7vAfJKEw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/trim-lines": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz",
+ "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/trough": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz",
+ "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/ts-dedent": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz",
+ "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.10"
+ }
+ },
+ "node_modules/tslib": {
+ "version": "2.8.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
+ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
+ "license": "0BSD"
+ },
+ "node_modules/type-fest": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz",
+ "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==",
+ "license": "(MIT OR CC0-1.0)",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/unified": {
+ "version": "10.1.2",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
+ "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "bail": "^2.0.0",
+ "extend": "^3.0.0",
+ "is-buffer": "^2.0.0",
+ "is-plain-obj": "^4.0.0",
+ "trough": "^2.0.0",
+ "vfile": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-find-after": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz",
+ "integrity": "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-find-after/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/unist-util-find-after/node_modules/unist-util-is": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz",
+ "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-generated": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
+ "integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-is": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
+ "integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-position": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
+ "integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-position-from-estree": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-1.1.2.tgz",
+ "integrity": "sha512-poZa0eXpS+/XpoQwGwl79UUdea4ol2ZuCYguVaJS4qzIOMDzbqz8a3erUCOmubSZkaOuGamb3tX790iwOIROww==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-remove": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-remove/-/unist-util-remove-4.0.0.tgz",
+ "integrity": "sha512-b4gokeGId57UVRX/eVKej5gXqGlc9+trkORhFJpu9raqZkZhU0zm8Doi05+HaiBsMEIJowL+2WtQ5ItjsngPXg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0",
+ "unist-util-visit-parents": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-remove-position": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-5.0.0.tgz",
+ "integrity": "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-visit": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-remove-position/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/unist-util-remove/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/unist-util-remove/node_modules/unist-util-is": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz",
+ "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-stringify-position": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz",
+ "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz",
+ "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0",
+ "unist-util-visit-parents": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit-parents": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz",
+ "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit-parents/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/unist-util-visit-parents/node_modules/unist-util-is": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz",
+ "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/unist-util-visit/node_modules/unist-util-is": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz",
+ "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/uuid": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz",
+ "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==",
+ "funding": [
+ "https://github.com/sponsors/broofa",
+ "https://github.com/sponsors/ctavan"
+ ],
+ "license": "MIT",
+ "bin": {
+ "uuid": "dist/bin/uuid"
+ }
+ },
+ "node_modules/uvu": {
+ "version": "0.5.6",
+ "resolved": "https://registry.npmjs.org/uvu/-/uvu-0.5.6.tgz",
+ "integrity": "sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==",
+ "license": "MIT",
+ "dependencies": {
+ "dequal": "^2.0.0",
+ "diff": "^5.0.0",
+ "kleur": "^4.0.3",
+ "sade": "^1.7.3"
+ },
+ "bin": {
+ "uvu": "bin.js"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/vfile": {
+ "version": "5.3.7",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
+ "integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "is-buffer": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0",
+ "vfile-message": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-location": {
+ "version": "5.0.3",
+ "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz",
+ "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-location/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/vfile-location/node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-matter": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/vfile-matter/-/vfile-matter-3.0.1.tgz",
+ "integrity": "sha512-CAAIDwnh6ZdtrqAuxdElUqQRQDQgbbIrYtDYI8gCjXS1qQ+1XdLoK8FIZWxJwn0/I+BkSSZpar3SOgjemQz4fg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/js-yaml": "^4.0.0",
+ "is-buffer": "^2.0.0",
+ "js-yaml": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-matter/node_modules/argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+ "license": "Python-2.0"
+ },
+ "node_modules/vfile-matter/node_modules/js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "license": "MIT",
+ "dependencies": {
+ "argparse": "^2.0.1"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/vfile-message": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz",
+ "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-stringify-position": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-message/node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/vfile-message/node_modules/unist-util-stringify-position": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz",
+ "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile/node_modules/vfile-message": {
+ "version": "3.1.4",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz",
+ "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vscode-oniguruma": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/vscode-oniguruma/-/vscode-oniguruma-1.7.0.tgz",
+ "integrity": "sha512-L9WMGRfrjOhgHSdOYgCt/yRMsXzLDJSL7BPrOZt73gU0iWO4mpqzqQzOz5srxqTvMBaR0XZTSrVWo4j55Rc6cA==",
+ "license": "MIT"
+ },
+ "node_modules/vscode-textmate": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/vscode-textmate/-/vscode-textmate-8.0.0.tgz",
+ "integrity": "sha512-AFbieoL7a5LMqcnOF04ji+rpXadgOXnZsxQr//r83kLPr7biP7am3g9zbaZIaBGwBRWeSvoMD4mgPdX3e4NWBg==",
+ "license": "MIT"
+ },
+ "node_modules/web-namespaces": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz",
+ "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/web-worker": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/web-worker/-/web-worker-1.5.0.tgz",
+ "integrity": "sha512-RiMReJrTAiA+mBjGONMnjVDP2u3p9R1vkcGz6gDIrOMT3oGuYwX2WRMYI9ipkphSuE5XKEhydbhNEJh4NY9mlw==",
+ "license": "Apache-2.0"
+ },
+ "node_modules/which": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
+ "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
+ "license": "ISC",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "which": "bin/which"
+ }
+ },
+ "node_modules/yallist": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz",
+ "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==",
+ "license": "ISC"
+ },
+ "node_modules/yocto-queue": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/zod": {
+ "version": "3.25.67",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.67.tgz",
+ "integrity": "sha512-idA2YXwpCdqUSKRCACDE6ItZD9TZzy3OZMtpfLoh6oPR47lipysRrJfjzMqFxQ3uJuUPyUeWe1r9vLH33xO/Qw==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/colinhacks"
+ }
+ },
+ "node_modules/zwitch": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz",
+ "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ }
+ }
+}
diff --git a/pages/_meta.json b/pages/_meta.json
index 6fece29..853b2b4 100644
--- a/pages/_meta.json
+++ b/pages/_meta.json
@@ -1,18 +1,19 @@
{
"home": {
- "title": "Home",
+ "title": "Getting Started",
"type": "page"
},
"devs": {
"title": "Developers",
- "type": "page"
+ "type": "page",
+ "href": "/devs/get-started/quick-start"
},
- "community": {
- "title": "Community",
+ "marketplace": {
+ "title": "SDK & Integrations",
"type": "page"
},
- "marketplace": {
- "title": "Marketplace",
+ "release-notes": {
+ "title": "Release Notes",
"type": "page"
}
}
diff --git a/pages/devs/consumers.mdx b/pages/devs/consumers.mdx
index 356f603..d412cd6 100644
--- a/pages/devs/consumers.mdx
+++ b/pages/devs/consumers.mdx
@@ -1,9 +1,145 @@
# Consumers
-Consumers are entities that utilize the inferences generated by the network. These consumers can take various forms, from individuals or organizations making use of inference data to automated contracts that interact with the blockchain.
+
+## What You'll Learn
+- Understanding the role of consumers in the Allora Network
+- Distinction between general consumers and consumer contracts
+- How consumers interact with network inferences and data
+- Integration strategies for utilizing Allora's decentralized intelligence
+
+## Overview
+
+**Consumers are entities that utilize the inferences generated by the network.** These consumers can take various forms, from individuals or organizations making use of inference data to automated contracts that interact with the blockchain.
+
+### Why Consumers Matter
+
+**Consumer benefits and impact**:
+- **Intelligence access**: Tap into high-quality, decentralized AI predictions
+- **Real-world applications**: Bridge AI insights with practical use cases
+- **Network sustainability**: Drive demand and economic activity within the ecosystem
+- **Innovation catalyst**: Enable new types of applications and business models
## Distinction between Consumers and Consumer Contracts
+
### Consumers
-Consumers are all-encompassing actors that consume inferences on the Allora Network. These could be businesses, developers, data scientists, or any entity interested in the intelligence generated by the network.
+
+**Consumers are all-encompassing actors that consume inferences on the Allora Network.** These could be businesses, developers, data scientists, or any entity interested in the intelligence generated by the network.
+
+**Consumer Categories**:
+- **Businesses**: Companies using predictions for strategic decisions
+- **Developers**: Application builders integrating network intelligence
+- **Data scientists**: Researchers leveraging collective intelligence
+- **Organizations**: Institutions requiring high-quality predictions
+
+**Use Cases**:
+- **Financial analysis**: Market predictions and risk assessment
+- **Supply chain optimization**: Demand forecasting and logistics planning
+- **Research and development**: Data-driven insights for innovation
+- **Decision support systems**: Enhanced analytics for business intelligence
### Consumer Contracts
-Consumer contracts are blockchain-deployed contracts that consume inferences. These smart contracts automatically interact with the network to retrieve inference data and use it within their logic. For example, a decentralized finance (DeFi) application might use consumer contracts to obtain and act upon real-time price predictions for cryptocurrencies.
+
+**Consumer contracts are blockchain-deployed contracts that consume inferences.** These smart contracts automatically interact with the network to retrieve inference data and use it within their logic. For example, a decentralized finance (DeFi) application might use consumer contracts to obtain and act upon real-time price predictions for cryptocurrencies.
+
+**Contract Characteristics**:
+- **Automated operation**: Execute predefined logic based on inference data
+- **Blockchain integration**: Native interaction with network protocols
+- **Trustless execution**: Operate without intermediaries or manual intervention
+- **Programmable logic**: Implement complex decision-making algorithms
+
+**Technical Benefits**:
+- **Immutable execution**: Transparent and auditable inference consumption
+- **Real-time responsiveness**: Immediate reaction to new inference data
+- **Decentralized architecture**: No single point of failure or control
+- **Composability**: Easy integration with other smart contracts and protocols
+
+## Consumer Integration Patterns
+
+### API-Based Consumption
+
+**Direct API Access**:
+- **REST endpoints**: Simple HTTP-based inference retrieval
+- **Real-time updates**: Live streaming of inference data
+- **Flexible integration**: Compatible with any programming language or platform
+- **Rate limiting**: Managed access to prevent system overload
+
+### Smart Contract Integration
+
+**On-Chain Consumption**:
+- **Solidity contracts**: Ethereum-compatible smart contract integration
+- **Cross-chain compatibility**: Support for multiple blockchain networks
+- **Gas optimization**: Efficient inference retrieval and processing
+- **Event-driven architecture**: Reactive programming patterns
+
+### Hybrid Approaches
+
+**Combined Strategies**:
+- **Off-chain processing**: Complex analysis using API data
+- **On-chain execution**: Critical decisions implemented via smart contracts
+- **Data validation**: Cross-verification between multiple sources
+- **Fallback mechanisms**: Redundancy for system reliability
+
+## Value Proposition for Consumers
+
+### Quality Assurance
+
+**Network-Validated Intelligence**:
+- **Consensus-based accuracy**: Multiple participants validate predictions
+- **Reputation systems**: Track record of participant performance
+- **Quality metrics**: Transparent scoring and evaluation mechanisms
+- **Continuous improvement**: Adaptive learning from network feedback
+
+### Economic Efficiency
+
+**Cost-Effective Intelligence**:
+- **Competitive pricing**: Market-driven inference costs
+- **Scalable access**: Pay only for what you use
+- **Reduced overhead**: No need for internal AI teams or infrastructure
+- **Risk distribution**: Shared costs across network participants
+
+### Innovation Opportunities
+
+**New Application Possibilities**:
+- **Decentralized prediction markets**: Community-driven forecasting
+- **Autonomous organizations**: AI-guided governance and operations
+- **Dynamic pricing systems**: Real-time market-responsive pricing
+- **Intelligent automation**: Self-adjusting systems based on predictions
+
+## Getting Started as a Consumer
+
+### Assessment Phase
+
+**Evaluation Steps**:
+1. **Identify use cases**: Determine where predictions add value to your operations
+2. **Review available topics**: Explore existing network inference categories
+3. **Assess integration complexity**: Evaluate technical requirements and resources
+4. **Calculate ROI**: Compare costs with potential benefits and efficiency gains
+
+### Implementation Strategy
+
+**Development Approach**:
+1. **Start with API integration**: Begin with simple REST API consumption
+2. **Prototype and test**: Validate predictions against known outcomes
+3. **Scale gradually**: Increase usage as confidence and value are demonstrated
+4. **Consider smart contracts**: Migrate to on-chain consumption for critical applications
+
+### Best Practices
+
+**Optimization Guidelines**:
+- **Validate predictions**: Cross-check against other sources when possible
+- **Monitor performance**: Track accuracy and adjust usage patterns
+- **Plan for latency**: Account for network delays in time-sensitive applications
+- **Implement fallbacks**: Prepare alternative strategies for network unavailability
+
+## Prerequisites
+
+- **Application development skills**: Ability to integrate external APIs or smart contracts
+- **Understanding of AI predictions**: Knowledge of how to interpret and utilize predictive data
+- **Blockchain familiarity**: Basic understanding of decentralized networks (for smart contract integration)
+- **Business case clarity**: Clear vision of how predictions will add value to your operations
+
+## Next Steps
+
+- [Explore the Allora API endpoint](/devs/consumers/allora-api-endpoint) for REST-based integration
+- [Learn about consumer contracts](/devs/consumers/consumer-contracts/dev-consumers) for on-chain consumption
+- [Review existing consumer implementations](/devs/consumers/existing-consumers) for inspiration and patterns
+- [Follow the topic inference walkthrough](/devs/consumers/walkthrough-use-topic-inference) for hands-on experience
diff --git a/pages/devs/consumers/allora-api-endpoint.mdx b/pages/devs/consumers/allora-api-endpoint.mdx
index e0afbe2..a262bbb 100644
--- a/pages/devs/consumers/allora-api-endpoint.mdx
+++ b/pages/devs/consumers/allora-api-endpoint.mdx
@@ -2,7 +2,23 @@ import { Callout } from 'nextra/components'
# Allora API: How to Query Data of Existing Topics
-The **Allora API** provides an interface to query real-time on-chain data of the latest inferences made by workers. Here's an explanation of how it works using the example endpoint:
+## What You'll Learn
+- How to authenticate and use the Allora API for real-time on-chain data access
+- Understanding API response structure and key data fields
+- Practical examples of querying inference data from existing topics
+- Best practices for API key security and rate limiting
+
+## Overview
+
+**The Allora API provides an interface to query real-time on-chain data of the latest inferences made by workers.** Here's an explanation of how it works using the example endpoint:
+
+### Why Use the Allora API?
+
+The API offers:
+- **Real-time access**: Get the latest inferences as they're processed
+- **Structured data**: Clean JSON responses with comprehensive inference details
+- **Simple integration**: RESTful interface that works with any programming language
+- **Reliable service**: Production-ready API with rate limiting and authentication
## API Authentication
@@ -23,12 +39,21 @@ curl -X 'GET' \
-H 'x-api-key: '
```
-Replace `` with your actual API key, `` with the chain ID (e.g., `ethereum-11155111` for Sepolia), and `` with the topic ID you want to query.
+**Parameter Substitution**:
+- Replace `` with your actual API key
+- Replace `` with the chain ID (e.g., `ethereum-11155111` for Sepolia)
+- Replace `` with the topic ID you want to query
### API Key Security
Your API key is a sensitive credential that should be kept secure. Do not share your API key or commit it to version control systems. Instead, use environment variables or secure credential storage mechanisms to manage your API key.
+**Best Practices**:
+- Store keys in environment variables
+- Use secure credential management systems
+- Never commit keys to version control
+- Rotate keys regularly
+
```javascript
// Example of using an environment variable for API key
const apiKey = process.env.ALLORA_API_KEY;
@@ -38,17 +63,31 @@ const apiKey = process.env.ALLORA_API_KEY;
API requests are subject to rate limiting. If you exceed the rate limit, you will receive a 429 Too Many Requests response. To avoid rate limiting issues, consider implementing retry logic with exponential backoff in your applications.
+**Rate Limiting Guidelines**:
+- Monitor your request frequency
+- Implement exponential backoff for retries
+- Handle 429 responses gracefully
+- Consider caching responses when appropriate
+
## API Endpoints
-**Generic**: `https://allora-api.testnet.allora.network/emissions/{version_number}/latest_network_inferences/{topic_id}`
+### Endpoint Structure
-**Example**: `https://allora-api.testnet.allora.network/emissions/v7/latest_network_inferences/1`
+**Generic Format**:
+```
+https://allora-api.testnet.allora.network/emissions/{version_number}/latest_network_inferences/{topic_id}
+```
+
+**Example Request**:
+```
+https://allora-api.testnet.allora.network/emissions/v7/latest_network_inferences/1
+```
-Where:
-- "v7" represents the latest network version number
-- "1" represents the topic ID
+**URL Components**:
+- **`v9`**: Latest emissions API version number
+- **`1`**: Topic ID for the inference you want to query
-Sample Response:
+### Sample Response
```json
{
@@ -130,28 +169,38 @@ Sample Response:
}
```
+### Important Notes
+
Please be aware that there may be some expected volatility in predictions due to the nascency of the network and the more forgiving testnet configurations currently in place. We are actively working on implementing an outlier protection mechanism, which will be applied at the consumer layer and tailored to individual use cases in the near future.
-## Breaking Down the Response
+**Current Limitations**:
+- Testnet volatility in predictions
+- Outlier protection mechanism in development
+- Network configurations may change as the platform matures
+
+## Understanding the Response
Below is an explanation of important sub-objects displayed in the JSON output:
-### `topic_id`
-In this case, "1" represents the topic being queried. [Topics](/devs/topic-creators/how-to-create-topic) define the context and rules for a particular inference.
+### Core Response Fields
+
+#### `topic_id`
+**Purpose**: In this case, "1" represents the topic being queried. [Topics](/devs/topic-creators/how-to-create-topic) define the context and rules for a particular inference.
+
+#### `naive_value`
+**Purpose**: The **naive value** omits all forecast-implied inferences from the weighted average by setting their weights to zero. The naive network inference is used to quantify the contribution of the forecasting task to the network accuracy, which in turn sets the reward distribution between the inference and forecasting tasks.
-### `naive_value`
-The **naive value** omits all forecast-implied inferences from the weighted average by setting their weights to zero. The naive network inference is used to quantify the contribution of the
-forecasting task to the network accuracy, which in turn sets the reward distribution between the inference and forecasting tasks.
+#### `combined_value`
+**Purpose**: The **combined value** is an optimized inference that represents a collective intelligence approach, taking both naive submissions and forecast data into account.
-### `combined_value`
-The **combined value** is an optimized inference that represents a collective intelligence approach, taking both naive submissions and forecast data into account.
+> **Recommended for oracles**: If you are looking to just get one value or number from Allora for a data oracle, this would be the one to take.
-> If you are looking to just get one value or number from Allora for a data oracle, this would be the one to take.
+### Worker Data Fields
-### `inferer_values`
-Workers in the network submit their inferences, each represented by an `allo` address. For example:
+#### `inferer_values`
+**Purpose**: Workers in the network submit their inferences, each represented by an `allo` address. For example:
```json
{
@@ -160,20 +209,35 @@ Workers in the network submit their inferences, each represented by an `allo` ad
}
```
-Each worker submits a value based on their own models. These individual submissions contribute to both the naive and combined values. The combined value gives higher weighting to more reliable workers, based on performance or other criteria.
+**How it works**: Each worker submits a value based on their own models. These individual submissions contribute to both the naive and combined values. The combined value gives higher weighting to more reliable workers, based on performance or other criteria.
+
+#### `one_out_inferer_values`
+**Purpose**: These values simulate removing a single worker from the pool to see how the overall inference changes. This is a technique used to evaluate the impact of individual inferences on the combined result.
+
+### Advanced Fields
+
+#### `forecast_implied_inferences`
+**Purpose**: The [Forecast-Implied Inference](/home/layers/forecast-synthesis/synthesis#forecast-implied-inferences) uses forecasted losses and worker inferences to produce a predicted value where each prediction is weighted based on how accurately the forecasters predicted losses in previous time steps, or epochs.
+
+#### `inference_block_height`
+**Purpose**: The specific chain block that the inference data was generated
+
+#### `confidence_interval_raw_percentiles`
+**Purpose**: Fixed percentiles that are used to generate [confidence intervals](/home/confidence-intervals)
-### `one_out_inferer_values`
-These values simulate removing a single worker from the pool to see how the overall inference changes. This is a technique used to evaluate the impact of individual inferences on the combined result.
+#### `confidence_interval_values`
+**Purpose**: [Confidence intervals](/home/confidence-intervals) show the predicted range of outcomes based on worker inferences.
-### `forecast_implied_inferences`
-The [Forecast-Implied Inference](/home/layers/forecast-synthesis/synthesis#forecast-implied-inferences) uses forecasted losses and worker inferences to produce a predicted value where each prediction is weighted based on how accurately the forecasters predicted losses in previous time steps, or epochs.
+## Prerequisites
-### `inference_block_height`
-The specific chain block that the inference data was generated
+- Basic understanding of REST APIs and JSON responses
+- API key obtained from the Allora team via Discord
+- Knowledge of the topic IDs you want to query
+- Understanding of [Allora Network concepts](/home/concepts/overview)
-### `confidence_interval_raw_percentiles`
-Fixed percentiles that are used to generate [confidence intervals](/home/confidence-intervals)
+## Next Steps
-### `confidence_interval_values`
-[Confidence intervals](/home/confidence-intervals) show the predicted range of outcomes based on worker inferences.
+- [Learn how to integrate API data into smart contracts](/devs/consumers/walkthrough-use-topic-inference)
+- [Explore RPC access as an alternative](/devs/consumers/rpc-data-access)
+- [Understand consumer contract patterns](/devs/consumers/consumer-contracts/dev-consumers)
diff --git a/pages/devs/consumers/consumer-contracts/dev-consumers.mdx b/pages/devs/consumers/consumer-contracts/dev-consumers.mdx
index 7379ca9..227cd51 100644
--- a/pages/devs/consumers/consumer-contracts/dev-consumers.mdx
+++ b/pages/devs/consumers/consumer-contracts/dev-consumers.mdx
@@ -1,13 +1,35 @@
# Getting Started with Consumers Contracts
+## What You'll Learn
+- How to develop smart contracts that consume Allora Network inferences
+- Two main patterns for using inference data in your contracts
+- Setting up consumer contracts with proper verification
+
+## Overview
+
> Sample code snippets to help you get started using inferences from Allora topics
-Consumer contracts are essential for bringing Allora Network prices on-chain. You can find the code repository containing example consumer contracts [here](https://github.com/allora-network/allora-consumer). Consumer contracts verify that the data is correctly formatted, and signed by a valid signer.
+**Consumer contracts are essential for bringing Allora Network prices on-chain.** You can find the code repository containing example consumer contracts [here](https://github.com/allora-network/allora-consumer). Consumer contracts verify that the data is correctly formatted, and signed by a valid signer.
+
+### Why Use Consumer Contracts?
+
+Consumer contracts provide:
+- **Data verification**: Ensures inference data is correctly formatted and signed
+- **On-chain integration**: Brings Allora Network predictions directly into your smart contracts
+- **Security**: Cryptographic verification of all inference data
+
+## Prerequisites
+
+- Solidity development experience (version 0.8.13+)
+- Understanding of smart contract interfaces
+- Experience with OpenZeppelin contracts
## Consuming Allora Inferences
Below is a complete example of a contract that brings inference data on-chain for use in a protocol, and verifies the data against an Allora Consumer contract. This example code can be found [here](https://github.com/allora-network/allora-consumer/blob/main/src/examples/AlloraConsumerBringPredictionOnChainExample.sol).
+### Complete Contract Example
+
```solidity
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.13;
@@ -105,5 +127,54 @@ contract AlloraConsumerBringPredictionOnChainExample is Ownable2Step {
alloraConsumer = alloraConsumer_;
}
}
-
```
+
+## Understanding the Contract
+
+### Key Components
+
+**Contract Setup**:
+- **Consumer Address**: Points to Sepolia testnet consumer at `0x4341a3F0a350C2428184a727BAb86e16D4ba7018`
+- **Inheritance**: Uses `Ownable2Step` for secure ownership management
+- **Imports**: Required interfaces for Allora consumer interaction
+
+**Two Usage Patterns**:
+
+1. **Using Existing Stored Values** (`callProtocolFunctionWithExistingValue`):
+ - Reads inference data already stored in the consumer contract
+ - Checks that data is not stale (less than 1 hour old)
+ - More gas efficient for repeated reads
+
+2. **Real-time Verification** (`callProtocolFunctionWithAlloraTopicInference`):
+ - Verifies fresh inference data with cryptographic signatures
+ - Uses the latest available inference data
+ - More secure but higher gas cost
+
+### Function Breakdown
+
+**`callProtocolFunctionWithExistingValue`**:
+- Gets stored topic value from consumer contract
+- Validates data freshness (1 hour limit)
+- Passes verified data to your protocol logic
+
+**`callProtocolFunctionWithAlloraTopicInference`**:
+- Verifies signed inference data on-chain
+- Extracts inference value and confidence intervals
+- Passes verified data to your protocol logic
+
+**`setAlloraConsumerContract`**:
+- Admin function to update consumer contract address
+- Uses `onlyOwner` modifier for security
+
+## Implementation Steps
+
+1. **Deploy the Contract**: Deploy with the correct consumer address for your network
+2. **Configure Consumer**: Set the consumer contract address if needed
+3. **Choose Integration Pattern**: Use stored values for efficiency or real-time verification for maximum freshness
+4. **Implement Protocol Logic**: Add your specific business logic in `_protocolFunctionRequiringPredictionValue`
+
+## Next Steps
+
+- [Complete walkthrough of using topic inferences](/devs/consumers/walkthrough-use-topic-inference)
+- [Deploy consumer contracts](/devs/consumers/consumer-contracts/deploy-consumer)
+- [Explore existing consumer implementations](/devs/consumers/existing-consumers)
diff --git a/pages/devs/consumers/existing-consumers.mdx b/pages/devs/consumers/existing-consumers.mdx
index ba6189c..bc5d310 100644
--- a/pages/devs/consumers/existing-consumers.mdx
+++ b/pages/devs/consumers/existing-consumers.mdx
@@ -1,17 +1,133 @@
# Existing Consumer Deployments
-> Where deployments on supported chains can be found
+## What You'll Learn
+- How to find and interact with deployed consumer contracts
+- Available blockchain networks with consumer contract deployments
+- Process for deploying consumer contracts to additional chains
+- Resources for exploring existing Allora Network topics
-## Consumer Contract Deployments
+## Overview
-You can find the deployed consumer contracts by looking at the latest saved deployments in the repository:
-- [Sepolia](https://sepolia.etherscan.io/address/0x8E45fbef38DaC54e32AfB27AC8cBab30E6818ce6#code)
-- [Arbitrum One](https://arbiscan.io/address/0xd75A47C0e5Eb0CeDF57072268F48ba971d2cD7F3#code)
+**Where deployments on supported chains can be found.**
-## Deploying to additional chains
+### Why Use Existing Deployments?
-If you would like to deploy to an additional chain not listed above, you can learn how to do so [here](./deploy-consumer).
+**Benefits of leveraging deployed contracts**:
+- **Ready-to-use infrastructure**: No deployment overhead or setup complexity
+- **Tested reliability**: Proven contracts with established track records
+- **Cost efficiency**: Avoid deployment gas costs and development time
+- **Community validation**: Contracts used and validated by other developers
-## Existing Allora Appchain Topics
+## Deployed Consumer Contracts
-Existing Allora Appchain Topics can be found [here](/devs/get-started/existing-topics).
+### Supported Networks
+
+**You can find the deployed consumer contracts by looking at the latest saved deployments in the repository:**
+
+#### Ethereum Sepolia Testnet
+- **Network**: [Sepolia](https://sepolia.etherscan.io/address/0x8E45fbef38DaC54e32AfB27AC8cBab30E6818ce6#code)
+- **Purpose**: Testing and development environment
+- **Benefits**: Free testnet tokens for experimentation
+- **Use case**: Prototype development and integration testing
+
+#### Arbitrum One Mainnet
+- **Network**: [Arbitrum One](https://arbiscan.io/address/0xd75A47C0e5Eb0CeDF57072268F48ba971d2cD7F3#code)
+- **Purpose**: Production-ready Layer 2 solution
+- **Benefits**: Lower gas costs and faster transactions
+- **Use case**: Production deployments and high-frequency applications
+
+### Contract Verification
+
+**Deployment Verification**:
+- **Source code**: All contracts are verified on their respective block explorers
+- **Transparency**: Review contract functionality before interaction
+- **Security**: Audit contract code for security considerations
+- **Compatibility**: Ensure contract interfaces match your integration needs
+
+## Extending to Additional Chains
+
+### Custom Deployment Process
+
+**If you would like to deploy to an additional chain not listed above, you can learn how to do so [here](./deploy-consumer).**
+
+**When to Deploy to New Chains**:
+- **Network requirements**: Target blockchain not currently supported
+- **Custom modifications**: Need contract changes for specific use cases
+- **Geographic considerations**: Regional blockchain preferences
+- **Cost optimization**: More favorable gas costs on alternative networks
+
+**Deployment Considerations**:
+- **Gas costs**: Evaluate deployment and operational costs
+- **Network security**: Assess blockchain maturity and decentralization
+- **Developer tooling**: Availability of debugging and monitoring tools
+- **Community support**: Active developer community and documentation
+
+## Network Topics and Resources
+
+### Available Topics
+
+**Existing Allora Appchain Topics can be found [here](/devs/get-started/network-interaction#available-topics).**
+
+**Topic Integration Strategy**:
+- **Review active topics**: Understand available inference categories
+- **Assess data quality**: Evaluate prediction accuracy and reliability
+- **Match use cases**: Align topic data with your application needs
+- **Monitor performance**: Track topic activity and participant engagement
+
+### Integration Planning
+
+**Development Workflow**:
+1. **Explore existing topics**: Review available inference categories and data
+2. **Select target networks**: Choose between testnet (development) and mainnet (production)
+3. **Integrate with deployed contracts**: Use existing contracts for rapid development
+4. **Test thoroughly**: Validate integration functionality before production use
+
+## Getting Started with Existing Deployments
+
+### Development Phase
+
+**Testnet Integration**:
+- **Start with Sepolia**: Use the testnet deployment for initial development
+- **Test all functionality**: Validate inference retrieval and processing
+- **Monitor gas usage**: Optimize transaction patterns for cost efficiency
+- **Debug edge cases**: Handle network delays and data availability issues
+
+### Production Deployment
+
+**Mainnet Migration**:
+- **Deploy to Arbitrum One**: Use the production-ready Layer 2 deployment
+- **Implement monitoring**: Track contract interactions and performance
+- **Plan for scaling**: Design for increased usage and data volume
+- **Establish fallbacks**: Prepare contingency plans for network issues
+
+## Best Practices
+
+### Security Considerations
+
+**Safe Integration Guidelines**:
+- **Verify contract addresses**: Always double-check deployment addresses
+- **Test with small amounts**: Start with minimal value transactions
+- **Monitor for updates**: Stay informed about contract upgrades or changes
+- **Implement rate limiting**: Avoid overwhelming the network with requests
+
+### Performance Optimization
+
+**Efficiency Strategies**:
+- **Batch requests**: Group multiple inference queries when possible
+- **Cache responses**: Store frequently accessed data to reduce network calls
+- **Optimize gas usage**: Use efficient transaction patterns and timing
+- **Monitor latency**: Track response times and adjust accordingly
+
+## Prerequisites
+
+- **Smart contract interaction knowledge**: Understanding of blockchain transactions and contract calls
+- **Web3 development tools**: Familiarity with libraries like ethers.js or web3.js
+- **Network configuration**: Ability to connect to different blockchain networks
+- **Gas management**: Understanding of transaction fees and optimization
+
+## Next Steps
+
+- [Learn to deploy your own consumer contracts](./deploy-consumer) for custom functionality
+- [Explore the consumer contract development guide](./dev-consumers) for detailed implementation
+- [Review existing topics](/devs/get-started/network-interaction#available-topics) to understand available data sources
+- [Follow the topic inference walkthrough](./walkthrough-use-topic-inference) for hands-on experience
diff --git a/pages/devs/consumers/rpc-data-access.mdx b/pages/devs/consumers/rpc-data-access.mdx
index 3c902ff..5712028 100644
--- a/pages/devs/consumers/rpc-data-access.mdx
+++ b/pages/devs/consumers/rpc-data-access.mdx
@@ -2,47 +2,76 @@ import { Callout } from 'nextra/components'
# Accessing Allora Data Through RPC
-In addition to the [Allora API](/devs/consumers/allora-api-endpoint), you can also access Allora network data directly through RPC (Remote Procedure Call) endpoints. This provides an alternative method for consuming outputs from the network, especially useful for applications that need to interact directly with the blockchain.
+## What You'll Learn
+- How to access Allora network data directly through RPC endpoints
+- Differences between RPC and API access methods
+- Complete guide to querying inference data using allorad CLI
+- Programming examples in JavaScript/TypeScript and Python
+
+## Overview
+
+**In addition to the [Allora API](/devs/consumers/allora-api-endpoint), you can also access Allora network data directly through RPC (Remote Procedure Call) endpoints.** This provides an alternative method for consuming outputs from the network, especially useful for applications that need to interact directly with the blockchain.
+
+### Why Use RPC Access?
+
+RPC access provides:
+- **Direct blockchain integration**: Unmediated access to blockchain data
+- **Historical data access**: Query data that may not be available through APIs
+- **Lower latency**: Direct communication with blockchain nodes
+- **Custom query control**: Full control over query parameters and data filtering
## Prerequisites
-- [`allorad` CLI](/devs/get-started/cli) installed
-- Access to an Allora RPC node
+- **[`allorad` CLI](/devs/get-started/quick-start#install-the-allora-cli) installed**: Command-line tool for network interaction
+- **Access to an Allora RPC node**: Network connectivity to blockchain endpoints
+- **Basic RPC knowledge**: Understanding of JSON-RPC protocol concepts
For a complete list of available RPC endpoints and commands, see the [allorad reference section](/devs/reference/allorad).
-## RPC URL and Chain ID
+## Network Configuration
+
+### RPC URL and Chain ID
Each network uses a different RPC URL and Chain ID which are needed to specify which network to run commands on when using specific commands on allorad.
-### Testnet
+#### Testnet Configuration
- **RPC URLs**:
- `https://rpc.ankr.com/allora_testnet`
- `https://allora-rpc.testnet.allora.network/`
- **Chain ID**: `allora-testnet-1`
-## RPC Endpoints for Consumers
+**URL Selection Tips**:
+- Use the official Allora RPC endpoint for best reliability
+- The Ankr endpoint provides additional redundancy
+- Test both endpoints to determine optimal latency for your location
+
+## Core RPC Methods
+
+### RPC Endpoints for Consumers
The following RPC methods are particularly useful for consumers looking to access inference data from the Allora network:
-### Get Latest Available Network Inferences
+#### Get Latest Available Network Inferences
This is the primary method for consumers to retrieve the latest network inference for a specific topic.
+**Command Structure**:
```bash
allorad q emissions latest-available-network-inferences [topic_id] --node
```
-**Parameters:**
-- `topic_id`: The identifier of the topic for which you want to retrieve the latest available network inference.
-- `RPC_URL`: The URL of the RPC node you're connecting to.
+**Parameters**:
+- **`topic_id`**: The identifier of the topic for which you want to retrieve the latest available network inference.
+- **`RPC_URL`**: The URL of the RPC node you're connecting to.
-**Example:**
+**Example Usage**:
```bash
allorad q emissions latest-available-network-inferences 1 --node https://allora-rpc.testnet.allora.network/
```
-**Response:**
+#### Understanding the Response
+
+**Response Structure**:
The response includes the network inference data, including the combined value, individual worker values, confidence intervals, and more. Here's a simplified example:
```json
@@ -72,13 +101,21 @@ The response includes the network inference data, including the combined value,
}
```
+**Key Response Fields**:
+- **`combined_value`**: Final network inference combining all worker inputs
+- **`inferer_values`**: Individual worker predictions with their addresses
+- **`naive_value`**: Simple average of worker submissions
+- **`confidence_interval_values`**: Prediction uncertainty bounds
+
The `combined_value` field represents the optimized inference that takes both naive submissions and forecast data into account. This is typically the value you want to use for most consumer applications.
-## Using RPC in Your Applications
+## Programming Integration
+
+### Using RPC in Your Applications
-### JavaScript/TypeScript Example
+#### JavaScript/TypeScript Implementation
Here's an example of how to query the Allora network using RPC in a JavaScript/TypeScript application:
@@ -126,7 +163,13 @@ getLatestInference(1, 'https://allora-rpc.testnet.allora.network/')
});
```
-### Python Example
+**Function Breakdown**:
+- Creates JSON-RPC request with proper formatting
+- Handles response parsing and error checking
+- Decodes base64-encoded blockchain data
+- Provides clean API for inference retrieval
+
+#### Python Implementation
Here's an example of how to query the Allora network using RPC in a Python application:
@@ -173,22 +216,44 @@ except Exception as e:
print(f"Failed to get inference: {e}")
```
-## RPC vs API: When to Use Each
+**Function Features**:
+- Comprehensive error handling with proper exceptions
+- HTTP status code validation
+- Base64 decoding for blockchain response data
+- Clean example usage with output formatting
+
+## Decision Guide: RPC vs API
### Use RPC When:
-- You need direct blockchain access without intermediaries
-- You want to query historical data that might not be available through the API
-- You're building applications that need to interact with multiple aspects of the Allora network
-- You want to avoid potential rate limiting on the API
+- **Direct blockchain access**: You need unmediated access to blockchain data
+- **Historical data queries**: You want to query historical data that might not be available through the API
+- **Multi-aspect interaction**: You're building applications that need to interact with multiple aspects of the Allora network
+- **Rate limiting concerns**: You want to avoid potential rate limiting on the API
+
+**Best for**:
+- Advanced blockchain applications
+- Data analysis and research tools
+- Enterprise integrations requiring maximum control
### Use the API When:
-- You need a simpler interface with standardized authentication
-- You want to avoid the complexity of RPC calls
-- You're primarily interested in the latest inference data
-- You need additional features provided by the API that aren't available through RPC
+- **Simplified integration**: You need a simpler interface with standardized authentication
+- **Reduced complexity**: You want to avoid the complexity of RPC calls
+- **Latest data focus**: You're primarily interested in the latest inference data
+- **Additional features**: You need additional features provided by the API that aren't available through RPC
+
+**Best for**:
+- Web and mobile applications
+- Rapid prototyping and development
+- Standard consumer applications
RPC nodes may have their own rate limiting or access restrictions. Make sure to implement proper error handling and retry logic in your applications.
+
+## Next Steps
+
+- [Learn complete smart contract integration workflows](/devs/consumers/walkthrough-use-topic-inference)
+- [Explore simplified API alternatives](/devs/consumers/allora-api-endpoint)
+- [Develop advanced consumer contracts](/devs/consumers/consumer-contracts/dev-consumers)
diff --git a/pages/devs/consumers/walkthrough-use-topic-inference.mdx b/pages/devs/consumers/walkthrough-use-topic-inference.mdx
index cc6c78c..3aae2db 100644
--- a/pages/devs/consumers/walkthrough-use-topic-inference.mdx
+++ b/pages/devs/consumers/walkthrough-use-topic-inference.mdx
@@ -1,8 +1,29 @@
# Walkthrough: Using a Topic Inference on-chain
-Follow these instructions to bring the most recent inference data on-chain for a given topic.
+## What You'll Learn
+- Complete workflow for integrating Allora Network inferences into smart contracts
+- How to query the Consumer Inference API for prediction data
+- Step-by-step implementation of on-chain inference verification
-## Complete Example:
+## Overview
+
+**Follow these instructions to bring the most recent inference data on-chain for a given topic.**
+
+This walkthrough shows the complete process:
+- Query inference data from the API
+- Use that data in your smart contract
+- Verify the data on-chain with cryptographic signatures
+
+## Prerequisites
+
+- Smart contract development experience (Solidity)
+- Web3 development knowledge (TypeScript/JavaScript)
+- Access to Allora Consumer API with valid API key
+- Understanding of [consumer contracts](/devs/consumers/consumer-contracts/dev-consumers)
+
+## Complete Example
+
+Here's the smart contract function you'll implement:
```solidity
/**
@@ -30,19 +51,25 @@ Follow these instructions to bring the most recent inference data on-chain for a
}
```
-## Step by Step Guide:
+## Step-by-Step Guide
+
+### Step 1: Query the API
Call the Consumer Inference API using the `asset` and `timeframe` you want to query.
-- `asset` is the asset you want to query, e.g. `BTC`, `ETH`
-- `timeframe` is the timeframe you want to query, e.g. `5m`, `8h`
+**Parameters**:
+- **`asset`**: The asset you want to query, e.g. `BTC`, `ETH`
+- **`timeframe`**: The timeframe you want to query, e.g. `5m`, `8h`
+**API Request**:
```shell
curl -X 'GET' --url 'https://api.allora.network/v2/allora/consumer/price/ethereum-111551111/ETH/5m' -H 'x-api-key: '
```
-Here is an example response:
+### Step 2: Parse the API Response
+
+**Example API Response**:
```json
{
"request_id": "b52b7c20-57ae-4852-bdbb-8f39cf317974",
@@ -61,12 +88,23 @@ Here is an example response:
}
```
-3. Construct a call to the Allora Consumer contract on the chain of your choice (options listed under [deployments](./existing-consumers)) using the returned `signature` and `network-inference` as follows:
+**Key Response Fields**:
+- **`signature`**: Cryptographic signature for on-chain verification
+- **`network_inference`**: The main prediction value
+- **`confidence_interval_percentiles`**: Statistical confidence levels
+- **`confidence_interval_values`**: Prediction bounds for risk assessment
+- **`topic_id`**: Identifier for the prediction topic
+- **`timestamp`**: When the inference was generated
+
+### Step 3: Create the Smart Contract Transaction
+
+Construct a call to the Allora Consumer contract on the chain of your choice (options listed under [deployments](./existing-consumers)) using the returned `signature` and `network-inference` as follows:
-## Creating the Transaction:
+#### Creating the Transaction
-Note you be doing something more like `callProtocolFunctionWithAlloraTopicInference` in the example above, so you would want to construct your call to that contract in a similar way to the following. You can find the complete example [here](https://github.com/allora-network/allora-consumer/blob/main/script/verifyDataExampleSimple.ts).
+Note you be doing something more like `callProtocolFunctionWithAlloraTopicInference` in the example above, so you would want to construct your call to that contract in a similar way to the following. You can find the complete example [here](https://github.com/allora-network/allora-consumer/blob/main/script/verifyDataExampleSimple.ts).
+**TypeScript Implementation**:
```typescript
const alloraConsumer =
(new AlloraConsumer__factory())
@@ -94,13 +132,31 @@ const result = await tx.wait()
console.info('tx receipt:', result)
```
+**Transaction Flow**:
+1. Create consumer contract instance with factory
+2. Connect your wallet for transaction signing
+3. Call `verifyNetworkInference` with the API response data
+4. Wait for transaction confirmation
-## Notes
+## Important Notes
+**Data Format Differences**:
- The API endpoint uses `snake_case`, while the smart contract uses `camelCase` for attribute names.
- Ethers.js does not accept `''` for `extraData`. Empty `extraData` should be denoted with `'0x'`.
-## Code Links
+**Field Mapping**:
+- API: `network_inference` → Contract: `networkInference`
+- API: `confidence_interval_percentiles` → Contract: `confidenceIntervalPercentiles`
+- API: `confidence_interval_values` → Contract: `confidenceIntervalValues`
+## Source Code References
+
+**Open Source Resources**:
- [Open source consumer code](https://github.com/allora-network/allora-consumer/blob/main/src/)
- [IAlloraConsumer](https://github.com/allora-network/allora-consumer/blob/main/src/interface/IAlloraConsumer.sol), including the structs used for Solidity code.
+
+## Next Steps
+
+- [Learn about consumer contract development](/devs/consumers/consumer-contracts/dev-consumers)
+- [Deploy your own consumer contracts](/devs/consumers/consumer-contracts/deploy-consumer)
+- [Explore alternative data access methods](/devs/consumers/rpc-data-access)
diff --git a/pages/devs/get-started/_meta.json b/pages/devs/get-started/_meta.json
index 7626d67..87ccf16 100644
--- a/pages/devs/get-started/_meta.json
+++ b/pages/devs/get-started/_meta.json
@@ -1,10 +1,5 @@
{
- "overview": "Overview",
- "setup-wallet": "Setup Wallet",
- "cli": "Installation",
- "basic-usage": "Basic Usage",
- "managing-gas": "Managing Gas",
- "existing-topics": "Existing Topics",
- "query-network-data": "How to Query Network Data using allorad",
- "model-forge": "Model Forge Quickstart"
+ "quick-start": "Quick Start",
+ "network-interaction": "Network Interaction",
+ "model-forge": "Model Forge Competition"
}
diff --git a/pages/devs/get-started/basic-usage.mdx b/pages/devs/get-started/basic-usage.mdx
deleted file mode 100644
index 2fa4bfd..0000000
--- a/pages/devs/get-started/basic-usage.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Basic Usage
-
-The Allora Network is a sophisticated ecosystem designed to facilitate various participants, including inference workers, forecasters, reputers, and validators, each playing a crucial role in the network's functionality and integrity. Spinning up these different network participants involves a deep understanding of the network's architecture and protocols.
-
-Despite the complexities involved in the setup and operation of different participants, interacting with the Allora Network on a basic level is straightforward. Here are some ways to get started:
-
-## Querying an Inference On-chain
-
-Interacting with the Allora Network also involves querying data of existing topics on-chain. This can be efficiently done using the Allorad CLI tool. The CLI tool provides a command-line interface to interact with the network, enabling users to retrieve on-chain data seamlessly.
-
-Follow the tutorial [here](/devs/get-started/query-network-data#get-latest-available-network-inferences) to learn how to query an inference on-chain using the `allorad` CLI tool.
-
-## Delegating Stake to a Reputer
-
-Users can delegate their stake to a reputer, contributing to the network's overall health and performance. This involves a basic understanding of staking mechanisms and can be done through the `allorad` CLI tool.
-
-Follow the tutorial [here](/devs/reference/allorad#delegate-stake-to-a-reputer-for-a-topic) to learn how to delegate your stake to a reputer.
diff --git a/pages/devs/get-started/cli.mdx b/pages/devs/get-started/cli.mdx
deleted file mode 100644
index 3676155..0000000
--- a/pages/devs/get-started/cli.mdx
+++ /dev/null
@@ -1,76 +0,0 @@
-import { Callout } from 'nextra/components'
-
-# Allora CLI Spec
-
-Allora provides a CLI tools that allows network participants to perform different functions on the Allora Network:
-
-- `allorad` - Used to read and write data to the chain, e.g. to create a wallet, create new topics or add/delegate stake to a reputer
- - Refer to the [Allorad Reference](/devs/reference/allorad) section for a full list of `allorad` commands with their explanations
-
-## Installing `allorad`
-
-### Prerequisites
-
-You will need to install `go` to download and use `allorad` successfully.
-
-To install Go, follow one of the recommended methods below or consult the [official Go documentation](https://go.dev/doc/install) for the correct download for your operating system. The command-line instructions are based on standard installation locations, but you may customize them as needed.
-
-### Installation
-
-```Text bash
-curl -sSL https://raw.githubusercontent.com/allora-network/allora-chain/main/install.sh | bash -s -- v0.8.0
-
-```
-
-A **successful** installation should output the following line:
-
-```bash
-YYYY-MM-DD hh:mm:ss (N MB/s) - ‘/tmp/allorad’ saved [116706514/116706514]
-```
-
-### Verifying Installation
-
-After installation, verify that `allorad` is correctly installed and ready to interact with the Allora Network by running:
-
-```
-allorad version
-```
-
-`allorad` supports general Cosmos SDK and Tendermint commands. You can run the tool to see a list of commands with explanations of what they do:
-
-```text
-$ allorad
-allorad - the Allora chain
-
-Usage:
- allorad [command]
-
-Available Commands:
- comet CometBFT subcommands
- completion Generate the autocompletion script for the specified shell
- config Utilities for managing application configuration
- debug Tool for helping with debugging your application
- export Export state to JSON
- genesis Application's genesis-related subcommands
- help Help about any command
- init Initialize private validator, p2p, genesis, and application configuration files
- keys Manage your application's keys
- prune Prune app history states by keeping the recent heights and deleting old heights
- query Querying subcommands
- rollback rollback Cosmos SDK and CometBFT state by one height
- snapshots Manage local snapshots
- start Run the full node
- status Query remote node for status
- tx Transactions subcommands
- version Print the application binary version information
-
-Flags:
- -h, --help help for allorad
- --home string directory for config and data (default "/Users//.allorad")
- --log_format string The logging format (json|plain) (default "plain")
- --log_level string The logging level (trace|debug|info|warn|error|fatal|panic|disabled or '*:,:') (default "info")
- --log_no_color Disable colored logs
- --trace print out full stack trace on errors
-
-Use "allorad [command] --help" for more information about a command.
-```
\ No newline at end of file
diff --git a/pages/devs/get-started/existing-topics.mdx b/pages/devs/get-started/existing-topics.mdx
deleted file mode 100644
index c367fe2..0000000
--- a/pages/devs/get-started/existing-topics.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
-import { Callout } from 'nextra/components'
-
-# Existing Allora Appchain Topics
-
-> Some useful topics have already been created
-
-The Allora Appchain already contains the following topics on Testnet. Below, you'll find the topic ID, name, and a brief description for each.
-
-
-| Topic ID | Metadata | Default Arg |
-|----------|-----------------------------------------|----------------------|
-| 1 | ETH 10min Prediction | ETH |
-| 2 | ETH 24h Prediction | ETH |
-| 3 | BTC 10min Prediction | BTC |
-| 4 | BTC 24h Prediction | BTC |
-| 5 | SOL 10min Prediction | SOL |
-| 6 | SOL 24h Prediction | SOL |
-| 7 | ETH 20min Prediction | ETH |
-| 8 | BNB 20min Prediction | BNB |
-| 9 | ARB 20min Prediction | ARB |
-| 10 | Memecoin 1h Prediction | TOKEN_FROM_API |
-| 11 | US Presidential Election 2024 - Winning Party | R |
-| 13 | ETH 5min Prediction | ETH |
-| 14 | BTC 5min Prediction | BTC |
-| 15 | ETH 5min Volatility Prediction | ETH |
-| 16 | BTC 5min Volatility Prediction | BTC |
-
-
-
-
-**Warning**: Topic ordering is never guaranteed to be consistent between separate chains/deployments.
-
-
-[Install `allorad`](/devs/get-started/cli#installing-allorad) and [create your first topic](/devs/topic-creators/how-to-create-topic) by following the instructions in the hyperlinks provided.
diff --git a/pages/devs/get-started/managing-gas.mdx b/pages/devs/get-started/managing-gas.mdx
deleted file mode 100644
index a321e5f..0000000
--- a/pages/devs/get-started/managing-gas.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
-import { Callout } from 'nextra/components'
-
-# Managing Gas with `allorad`
-
-Invoking transactions causes network validators to do computations on your behalf and update the chain's state. These actions are compensated via _gas_. Gas is paid by wallets who send transactions to the Allora chain.
-
-In its [v0.7.0 release](https://github.com/allora-network/allora-chain/releases/tag/v0.7.0), Allora incorporated the [x/feemarket module](https://github.com/skip-mev/feemarket), which means that gas calculations follow an [EIP-1559-like schedule](https://help.coinbase.com/en/coinbase/getting-started/crypto-education/eip-1559) (see: the [original EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md)). This requires transactions to be structured differently from other cosmos chains.
-
-Prior to `allora-chain` `v0.7.0` and in many other Cosmos chains, transactions are typically structured like so:
-
-```
-allorad tx emissions ... --from ACCOUNT_NAME --node RPC --chain-id allora-testnet-1 --keyring-backend test --keyring-dir ~/.allorad/ --gas auto --gas-adjustment 1.2 --fees 2024700uallo ...
-```
-
-Since `v0.7.0`, transactions should instead abide by the structure:
-```
-allorad tx emissions ... --from ACCOUNT_NAME --node RPC --chain-id allora-testnet-1 --keyring-backend test --keyring-dir ~/.allorad/ --gas 130206 --gas-adjustment 1.2 --gas-prices 10uallo ...
-```
-
-To emphasize: This^^ is the way transactions should be structured today using `allorad`.
-
-The specific differences are:
-
-__`fees Xuallo` becomes `gas-prices 10uallo`__
-- This is a config set in the network validators, so `10uallo` is the universally recommended value
-
-__`gas auto` becomes `gas 130206`__
-- This value can change per the use case
-
-Other clients such as [CosmJS](https://github.com/cosmos/cosmjs) and [Ignite](https://docs.ignite.com/clients/go-client) would similarly need to include these flags when building transactions.
diff --git a/pages/devs/get-started/model-forge.mdx b/pages/devs/get-started/model-forge.mdx
index 8976dc7..f8f14e7 100644
--- a/pages/devs/get-started/model-forge.mdx
+++ b/pages/devs/get-started/model-forge.mdx
@@ -1,154 +1,187 @@
-import { Callout } from "nextra/components";
-
-# Model Forge Quickstart
-
-The [Allora Model Forge Competition](https://forge.allora.network) is an open-source hackathon where participants attempt to create the best model for a given [topic](/devs/topic-creators/topic-life-cycle) on the Allora Network.
-
-- Inferences are submitted to the Allora Network.
-- Participants are scored based on the accuracy of their model compared to the ground truth, and rewarded accordingly.
-
-## Getting Started
-
-### Creating a Wallet
+import { Callout, Steps, Cards, Card } from 'nextra/components'
+
+# Model Forge Competition
+
+Build and deploy AI models to compete in Allora's hackathon-style prediction competition. Submit inferences, earn rewards based on accuracy.
+
+## Competition Overview
+
+- **Open-source hackathon** for AI model development
+- **Accuracy-based scoring** against ground truth data
+- **Network rewards** for top-performing models
+- **Real-time leaderboards** and performance metrics
+
+## Available Topics
+
+You can develop and deploy AI models using any of these active prediction topics on Allora Testnet:
+
+| Topic ID | Metadata | Default Arg |
+|----------|----------|-------------|
+| 1 | ETH 10min Prediction | ETH |
+| 2 | ETH 24h Prediction | ETH |
+| 3 | BTC 10min Prediction | BTC |
+| 4 | BTC 24h Prediction | BTC |
+| 5 | SOL 10min Prediction | SOL |
+| 6 | SOL 24h Prediction | SOL |
+| 7 | ETH 20min Prediction | ETH |
+| 8 | BNB 20min Prediction | BNB |
+| 9 | ARB 20min Prediction | ARB |
+| 10 | Memecoin 1h Prediction | TOKEN_FROM_API |
+| 11 | US Presidential Election 2024 - Winning Party | R |
+| 13 | ETH 5min Prediction | ETH |
+| 14 | BTC 5min Prediction | BTC |
+| 15 | ETH 5min Volatility Prediction | ETH |
+| 17 | ETH 8h Prediction | ETH |
+| 18 | BTC 8h Prediction | BTC |
+| 37 | SOL/USD - 5min Price Prediction | SOL |
+| 38 | SOL/USD - 8h Price Prediction | SOL |
+| 41 | ETH/USD - 8h Price Prediction | ETH |
+| 42 | BTC/USD - 8h Price Prediction | BTC |
+| 47 | 5 min BTC Price Prediction | BTC |
+| 50 | 6h BTC Volatility Prediction | BTC |
+| 56 | 1 hour BERA/USD Log-Return Prediction | BERA |
+| 58 | 8 hour SOL/USD Log-Return Prediction | SOL |
+| 60 | 24 hour XAU/USD Log-Return Prediction | XAU |
+| 61 | 1 day BTC/USD Log-Return Prediction | BTC |
+| 62 | 1 day SOL/USD Log-Return Prediction | SOL |
+| 63 | 1 day ETH/USD Log-Return Prediction | ETH |
+
+
+**Topic Selection:** Choose topics that align with your model's strengths and interests. Each topic has different prediction challenges and reward structures.
+
-To get started, you'll need to create a wallet on the Allora Network.
+## Quick Registration
-#### Download the `allorad` CLI Tool
+
-Open your terminal and run the following command to install the `allorad` CLI tool.
+### Create Wallet & Get Funds
```bash
+# Install CLI (if not done)
curl -sSL https://raw.githubusercontent.com/allora-network/allora-chain/main/install.sh | bash -s -- v0.8.0
-```
-
-#### Create a Wallet
-
-Run the following command to create a wallet.
-```bash
+# Create wallet
allorad keys add
-```
-
-Save your key name, wallet address, and mnemonic in a secure location. You will need this to submit inferences to the Allora Network.
-
-### Register for the Competition
-
-To register for the competition, go to the [Sign Up](https://vk4z45e3hne.typeform.com/to/ypA2Yl1J?utm_source=landing-page&typeform-source=forge.allora.network) page.
-
-Fill out the form and submit your application.
-
-
-Input your wallet address that you created in the previous step in the `Allora Wallet Address` field.
-
-
-Although a discord account is not required to register, participants with a discord account will be able to join exclusive channels in the Allora Discord server to get priority access to help from the Allora Labs team and other participants.
+# Get testnet funds
+# Visit: https://faucet.testnet.allora.network/
+```
-Join the Allora Discord server [here](https://discord.gg/allora).
-
+### Register for Competition
-### Successful Registration
+1. **Sign up:** Visit the [Model Forge Registration](https://forge.allora.network) page
+2. **Submit wallet address** from step 1
+3. **Join Discord** (optional): [Allora Discord](https://discord.gg/allora) for exclusive support channels
-Once you've been notified that you've been accepted into the competition:
+### Connect & Verify
-1. Download the [Keplr Wallet Chrome Extension](https://chromewebstore.google.com/detail/keplr/dmkamcknogkgcdfhhbddcghachkejeap?hl=en&pli=1) from the Chrome Web Store.
-2. Connect your wallet to the [Allora Forge](forge.allora.network) site.
+1. **Install Keplr:** [Chrome Extension](https://chrome.google.com/webstore/detail/keplr/dmkamcknogkgcdfhhbddcghachkejeap)
+2. **Connect wallet** to Allora Forge site

-3. You should see an `eligible` status in the top left corner of the screen once connected.
+3. **Verify eligibility** - You should see "eligible" status in the top left corner

-Congratulations! You're now registered for the Allora Model Forge Competition.
+**Congratulations! You're now registered for the Allora Model Forge Competition.**
-## Topics
+
-The Allora Network is categorized into distinct [topics](/devs/topic-creators/how-to-create-topic) that serve as Schelling points for model-makers to submit inferences on.
+## Model Development
-Topics are assigned an ID and categorized by a specific Target Variable, Epoch, Loss Method, and Metadata. Let's quickly explain each in the context of the Forge Competition:
+### Allora MDK Framework
-- **Topic ID**: A unique identifier for the topic that participants will need to submit inferences on.
-- **Target Variable**: The asset or asset pair that models are trying to predict for
- - E.g. `ETH`, `USDC/ETH`.
-- **Epoch**: A discrete period during which inferences and forecasts are submitted, and rewards are distributed.
- - Epochs are defined in blocks on the Network
- - Each epoch provides a timeframe for evaluating and scoring the performance of workers and reputers.
- - In the Forge Competition, epochs are abstracted into universal timeframes, e.g. `5min`, `1hr`, `1d`.
-- **Loss Function**: Measures how far off your model's predictions are from the actual values
- - For all topics in the Forge Competition, the loss function is [`Mean Squared Error`](https://en.wikipedia.org/wiki/Mean_squared_error).
-- **Metadata**: Additional information about the topic, including the financial indicator, prediction period, and target variable.
- - E.g. ETH 12hr Volatility Prediction [Epoch length: 5min]
- - Indicates that the topic is predicting the volatility of the ETH asset 12hr into the future.
- - Epoch length indicates that models are submitting the 12hr predicted volatility of the ETH asset every 5min.
+Use the [Allora Model Development Kit](https://github.com/allora-network/allora-mdk) for rapid model development:
+**Features:**
+- Multiple regression strategies (Linear, SVR, Ridge, etc.)
+- Optimized for price/volatility/volume predictions
+- Easy customization and deployment
+- Comprehensive documentation
-
-[Additional fields that define a given topic](/devs/topic-creators/how-to-create-topic) can be pulled using the following [`allorad`](/devs/get-started/model-forge#download-the-allorad-cli-tool) command:
-
+**Quick Start:**
```bash
-allorad query emissions topic --node
+git clone https://github.com/allora-network/allora-mdk
+cd allora-mdk
+# Follow setup instructions in repository
```
-- `node-rpc` is the RPC URL of the Allora Network node you are querying.
+### Topic Structure
-Example Usage:
+Understanding topic parameters for model development:
```bash
-allorad query emissions topic 13 --node https://allora-rpc.testnet.allora.network
+# Get topic details
+allorad query emissions topic --node https://allora-rpc.testnet.allora.network/
+
+# Example for ETH 10min prediction (Topic 1)
+allorad query emissions topic 1 --node https://allora-rpc.testnet.allora.network/
```
-
-### Competition Topics
+**Key Fields:**
+- **Target Variable:** Asset to predict (ETH, BTC, SOL, etc.)
+- **Epoch Length:** Submission frequency (varies by topic)
+- **Loss Function:** Depends on topic type
+- **Metadata:** Prediction timeframe and details
+
+## Performance Monitoring
-1. [5min ETH Price Prediction](https://forge.allora.network/competitions/1)
-2. [12hr USDC/ETH Volume Prediction](https://forge.allora.network/competitions/2)
-3. [12hr ETH Volatility Prediction](https://forge.allora.network/competitions/3)
+### Live Leaderboards
-| Topic ID | Topic | Target Variable | Epoch | Loss Function | Metadata |
-|----------|-------|----------------|--------|---------------|-----------|
-| 30 | ETH Price Prediction | `ETH` | `5min` | Mean Squared Error | ETH 5min Price Prediction |
-| 29 | USDC/ETH Volume Prediction | `USDC/ETH` | `5min` | Mean Squared Error | USDC/ETH 12hr Volume Prediction |
-| 28 | ETH Volatility Prediction | `ETH` | `5min` | Mean Squared Error | ETH 12hr Volatility Prediction |
+Track your model's performance on the [Allora Forge Platform](https://forge.allora.network):
+
+
+**Connect your wallet** to the Allora Forge site to view active competitions and see your ranking across different prediction topics.
+
-## Model Creation
+### Allora Explorer Metrics
-If your registration is successful, start building your model and preparing it for inference submission to the network.
+**View detailed performance:**
-We've built out the **Allora Model Development Kit** (MDK) framework to help you get started with model creation. These models are optimized for price prediction, but can be used for other topics as well.
+1. **Connect wallet** to [Allora Explorer](https://explorer.allora.network/)
-[**Allora MDK Repository**](https://github.com/allora-network/allora-model-maker)
-- Features:
- - Large set of regression strategies to choose from
- - Easy to customize for volatility and volume predictions
+
-We offer a comprehensive set of [documentation](/devs/workers/deploy-worker/allora-mdk) for the Allora MDK for you to dive into.
+2. **Click "Your Topics"** to see your submissions
-## Leaderboard
+
-The leaderboard is a live-updating list of the top performing models in the competition. Go to the competition page for the specific topic you've entered to view the leaderboard.
+3. **Select topic** for detailed metrics
-1. [5min ETH Price Prediction](https://forge.allora.network/competitions/1)
-2. [5min USDC/ETH Volume Prediction](https://forge.allora.network/competitions/2)
-3. [5min ETH Volatility Prediction](https://forge.allora.network/competitions/3)
+
-
-Don't forget to [connect your wallet](/devs/get-started/model-forge#successful-registration) to the Allora Forge site to view the leaderboard.
-
+**Key Metrics:**
+- **Your Score:** Overall performance ranking
+- **ALLO Earned:** Rewards based on accuracy
+- **Inference History:** Track prediction accuracy over time
-## Detailed Performance Metrics
+## Development Tips
-We've built out an [Allora Explorer](explorer.allora.network) that displays performance metrics to help you evaluate the success of your model. These metrics are used to score your model in the competition.
+
+**Model Optimization:**
+- Understand the loss function for your chosen topic
+- Test with historical data before deployment
+- Monitor epoch timing for submission windows
+- Use MDK framework for faster iteration
+
-Steps:
+**Best Practices:**
+- **Data quality:** Ensure clean, relevant training data
+- **Model validation:** Cross-validate before topic submission
+- **Resource management:** Optimize for topic-specific epoch intervals
+- **Community engagement:** Join Discord for tips and support
-1. Connect your wallet to the Allora Explorer
+## Getting Help
-
-2. Click `Your Topics`
+- **Documentation:** [Allora MDK Docs](https://github.com/allora-network/allora-mdk)
+- **Community:** [Discord Support Channels](https://discord.gg/allora)
+- **Technical Issues:** [GitHub Issues](https://github.com/allora-network)
+- **Competition Updates:** Follow official announcements
-
-3. Click on a topic to view your performance metrics
+## Next Steps
-
-- Your `Score` and `ALLO Earned` indicate your overall performance in the topic.
\ No newline at end of file
+- **Start Building:** Clone the MDK repository and begin model development
+- **Join Community:** Connect with other participants on Discord
+- **Monitor Performance:** Use Explorer to track your model's accuracy
+- **Scale Up:** Deploy multiple models or optimize existing ones
\ No newline at end of file
diff --git a/pages/devs/get-started/network-interaction.mdx b/pages/devs/get-started/network-interaction.mdx
new file mode 100644
index 0000000..74a2524
--- /dev/null
+++ b/pages/devs/get-started/network-interaction.mdx
@@ -0,0 +1,188 @@
+import { Callout, Tabs } from 'nextra/components'
+
+# Network Interaction
+
+Learn how to interact with Allora Network: query data, manage transactions, and work with prediction topics.
+
+## Gas Management
+
+Allora uses an **EIP-1559-like fee structure**. Always use this transaction format:
+
+```bash
+allorad tx emissions [command] \
+ --from \
+ --node \
+ --chain-id allora-testnet-1 \
+ --keyring-backend test \
+ --gas 130206 \
+ --gas-adjustment 1.2 \
+ --gas-prices 10uallo
+```
+
+
+**Key differences from other Cosmos chains:**
+- Use `--gas-prices 10uallo` (not `--fees`)
+- Use specific `--gas` amount (not `--gas auto`)
+
+
+**`fees Xuallo` becomes `gas-prices 10uallo`**
+
+## Available Topics
+
+Current prediction topics on Allora Testnet:
+
+| Topic ID | Metadata | Default Arg |
+|----------|----------|-------------|
+| 1 | ETH 10min Prediction | ETH |
+| 2 | ETH 24h Prediction | ETH |
+| 3 | BTC 10min Prediction | BTC |
+| 4 | BTC 24h Prediction | BTC |
+| 5 | SOL 10min Prediction | SOL |
+| 6 | SOL 24h Prediction | SOL |
+| 7 | ETH 20min Prediction | ETH |
+| 8 | BNB 20min Prediction | BNB |
+| 9 | ARB 20min Prediction | ARB |
+| 10 | Memecoin 1h Prediction | TOKEN_FROM_API |
+| 11 | US Presidential Election 2024 - Winning Party | R |
+| 13 | ETH 5min Prediction | ETH |
+| 14 | BTC 5min Prediction | BTC |
+| 15 | ETH 5min Volatility Prediction | ETH |
+| 17 | ETH 8h Prediction | ETH |
+| 18 | BTC 8h Prediction | BTC |
+| 37 | SOL/USD - 5min Price Prediction | SOL |
+| 38 | SOL/USD - 8h Price Prediction | SOL |
+| 41 | ETH/USD - 8h Price Prediction | ETH |
+| 42 | BTC/USD - 8h Price Prediction | BTC |
+| 47 | 5 min BTC Price Prediction | BTC |
+| 50 | 6h BTC Volatility Prediction | BTC |
+| 56 | 1 hour BERA/USD Log-Return Prediction | BERA |
+| 58 | 8 hour SOL/USD Log-Return Prediction | SOL |
+| 60 | 24 hour XAU/USD Log-Return Prediction | XAU |
+| 61 | 1 day BTC/USD Log-Return Prediction | BTC |
+| 62 | 1 day SOL/USD Log-Return Prediction | SOL |
+| 63 | 1 day ETH/USD Log-Return Prediction | ETH |
+
+
+Topic ordering varies between deployments. Always verify topic details before use.
+
+
+## Querying Network Data
+
+Use these commands to retrieve network information:
+
+
+
+
+**Get topic details:**
+```bash
+allorad query emissions topic \
+ --node https://allora-rpc.testnet.allora.network/
+```
+
+**Example:**
+```bash
+allorad query emissions topic 1 \
+ --node https://allora-rpc.testnet.allora.network/
+```
+
+
+
+**Get latest network predictions:**
+```bash
+allorad query emissions latest-available-network-inferences \
+ --node https://allora-rpc.testnet.allora.network/
+```
+
+**Use case:** Retrieve complete network-wide predictions when all data is available.
+
+**Example:**
+```bash
+allorad query emissions latest-available-network-inferences 1 \
+ --node https://allora-rpc.testnet.allora.network/
+```
+
+
+
+**Get total reward pool:**
+```bash
+allorad query emissions total-rewards \
+ --node https://allora-rpc.testnet.allora.network/
+```
+
+**Use case:** Check total rewards available for distribution in current block.
+
+
+
+**Get network parameters:**
+```bash
+allorad query emissions params \
+ --node https://allora-rpc.testnet.allora.network/
+```
+
+**Use case:** View current network configuration and module settings.
+
+
+
+
+## Common Operations
+
+### Query an Inference
+
+```bash
+# Get latest prediction for ETH (topic 1)
+allorad query emissions latest-available-network-inferences 1 \
+ --node https://allora-rpc.testnet.allora.network/
+```
+
+### Delegate Stake to Reputer
+
+```bash
+allorad tx emissions delegate-stake uallo \
+ --from \
+ --node https://allora-rpc.testnet.allora.network/ \
+ --chain-id allora-testnet-1 \
+ --gas 130206 \
+ --gas-adjustment 1.2 \
+ --gas-prices 10uallo
+```
+
+### Check Your Balance
+
+```bash
+allorad query bank balances \
+ --node https://allora-rpc.testnet.allora.network/
+```
+
+## Network Endpoints
+
+**Testnet:**
+- **RPC:** `https://allora-rpc.testnet.allora.network/`
+- **Alternative RPC:** `https://rpc.ankr.com/allora_testnet`
+- **Chain ID:** `allora-testnet-1`
+
+## Best Practices
+
+- **Always specify gas explicitly** - Don't use `--gas auto`
+- **Use consistent gas prices** - `10uallo` is recommended
+- **Verify topic IDs** - Topics may vary between networks
+- **Check network status** - Ensure RPC endpoints are accessible
+- **Monitor gas usage** - Adjust gas amounts based on transaction complexity
+
+## Troubleshooting
+
+**Transaction Failed:**
+- Check gas settings (use explicit `--gas` and `--gas-prices`)
+- Verify sufficient wallet balance
+- Confirm correct chain ID and RPC URL
+
+**Query Timeout:**
+- Try alternative RPC endpoint
+- Check network connectivity
+- Verify topic ID exists
+
+## Next Steps
+
+- **Deploy a Worker:** [Worker Documentation](/devs/workers)
+- **Build Consumers:** [Consumer Guides](/devs/consumers)
+- **Validate Network:** [Validator Setup](/devs/validators)
+- **Join Competition:** [Model Forge](/devs/get-started/model-forge)
\ No newline at end of file
diff --git a/pages/devs/get-started/overview.mdx b/pages/devs/get-started/overview.mdx
deleted file mode 100644
index 353b247..0000000
--- a/pages/devs/get-started/overview.mdx
+++ /dev/null
@@ -1,78 +0,0 @@
-import { Callout } from 'nextra/components'
-
-# Getting Started with Allora
-
-Welcome to the Allora developer's guide! This page will help you get up to speed with the Allora repositories that will be used throughout our documentation and show you how to start contributing to our ecosystem.
-
-## Overview
-
-Allora is a decentralized network that leverages the power of collective participation in tasks such as data inference, forecasting, and verification. [Contribute](/community/contribute) to any of the repositories below to help enhance and grow the Allora ecosystem.
-
-## Allora Chain
-
-The [Allora chain](https://github.com/allora-network/allora-chain) is a Cosmos Hub chain that forms the core of the Allora network. This repository contains the blockchain's codebase and is useful for validators.
-
-You can use the [`allorad` CLI tool](/devs/get-started/cli#installing-allorad) to query the chain and make transactions.
-
-
-Some subsections in the Table of Contents below require downloading and installing [`allorad`](/devs/get-started/cli#installing-allorad).
-
-As a general rule, it is suggested to download and install `allorad` for any developer looking to interact with the network.
-
-
-## [Setup Wallet](/devs/get-started/setup-wallet.mdx)
-Instructions for setting up your wallet to interact with the Allora network.
-
-### Subsections
-#### [Basic Usage](/devs/get-started/basic-usage.mdx)
-#### [Existing Topics](/devs/get-started/existing-topics.mdx)
-#### [How to Query Network Data using `allorad`](/devs/get-started/query-network-data.mdx)
-
----
-
-## [Workers](/devs/workers/workers.mdx)
-Explore how workers function in Allora, including setup and data querying.
-
-### Subsections
-#### [System Requirements](/devs/workers/nop-requirements.mdx)
-#### [Build/Deploy an Inference Worker](/devs/workers/deploy-worker.mdx)
-#### [Walkthroughs](/devs/workers/walkthroughs/index.mdx)
-#### [Build and Deploy a Forecaster](/devs/workers/deploy-forecaster.mdx)
-#### [How To Query Worker Data using `allorad`](/devs/workers/query-worker-data.mdx)
-#### [Query EMA Score of a Worker using `allorad`](/devs/workers/query-ema-score.mdx)
-
----
-
-## [Reputers](/devs/reputers/reputers.mdx)
-Understand how reputers contribute to reputation management and query operations.
-
-### Subsections
-#### [Coin Prediction Reputer](/devs/reputers/coin-prediction-reputer.mdx)
-#### [Query EMA Score for a Reputer using `allorad`](/devs/reputers/query-ema-score.mdx)
-#### [Query Reputer Data using `allorad`](/devs/reputers/query-reputer-data.mdx)
-#### [Set and Adjust Stake](/devs/reputers/set-and-adjust-stake.mdx)
-
----
-
-## [Validators](/devs/validators/validators.mdx)
-Details on how to set up and run a validator in the Allora network.
-
-### Subsections
-#### [System Requirements](/devs/validators/nop-requirements.mdx)
-#### [Deploy Allora Chain](/devs/validators/deploy-chain.mdx)
-#### [Run a Full Node](/devs/validators/run-full-node.mdx)
-#### [Stake a Validator](/devs/validators/stake-a-validator.mdx)
-#### [Validator Operations](/devs/validators/validator-operations.mdx)
-#### [Software Upgrades](/devs/validators/software-upgrades.mdx)
-
----
-
-## [Consumers](/devs/consumers/consumers.mdx)
-Resources for interacting with Allora as a consumer, including querying data and contracts.
-
-### Subsections
-#### [Allora API Endpoint](/devs/consumers/allora-api-endpoint.mdx)
-#### [Consumer Contracts](/devs/consumers/consumer-contracts/deploy-consumer.mdx)
-##### [Deploy Consumer Contracts](/devs/consumers/consumer-contracts/dev-consumers.mdx)
-#### [Existing Consumers](/devs/consumers/existing-consumers.mdx)
-#### [Walkthrough: Using a Topic Inference on-chain](/devs/consumers/walkthrough-use-topic-inference.mdx)
\ No newline at end of file
diff --git a/pages/devs/get-started/query-network-data.mdx b/pages/devs/get-started/query-network-data.mdx
deleted file mode 100644
index 687ea5f..0000000
--- a/pages/devs/get-started/query-network-data.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
-# How to Query Network Data using `allorad`
-
-
-To query network-level data on the Allora chain using the `allorad` CLI, you need to interact with various RPC methods designed to return aggregate or holistic information about the
-network. These methods enable you to pull data that is crucial for understanding the overall state and performance of the network.
-
-## Prerequisites
-
-- [`allorad` CLI](/devs/get-started/cli)
-
-## Query Functions
-
-These functions read from the appchain only and do not write. Add the **Command** value into your query to retrieve the expected data.
-
-```bash
-allorad q emissions [Command] --node
-```
-
-### Get Latest Available Network Inferences
-
-- **RPC Method:** `GetLatestAvailableNetworkInferences`
-- **Command:** `latest-available-network-inferences [topic_id]`
-- **Description:** Returns the latest network inference for a given topic, but only if all necessary information to compute the inference is present. The result is only provided when complete data from the network is available to ensure accuracy.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic for which you want to retrieve the latest available network inference.
-
-#### Use Case:
-**Why use it?**
-- This command is useful when you need to retrieve the most recent network-wide inference for a topic, ensuring that all necessary data has been collected and processed. It is ideal for situations where decision-making relies on the completeness of the data and where partial data may lead to inaccurate conclusions.
-
-**Example Scenario:**
-- If you want to make a decision based on network predictions, but only when the inference is fully computed, use this command. For example, you might want the latest ETH price prediction, but only when all worker and forecaster data is available to provide an accurate result.
-
----
-
-### Get Total Rewards to Distribute
-
-- **RPC Method:** `GetTotalRewardToDistribute`
-- **Command:** `total-rewards`
-- **Description:** Returns the total amount of rewards that will be distributed across all rewardable topics in the current block. It provides an aggregate view of the rewards available for distribution.
-
-#### Use Case:
-**Why use it?**
-- This command is useful if you want to understand the total reward pool for a given block. It helps participants gauge the potential rewards available and how they may be distributed across topics based on performance.
-
-**Example Scenario:**
-- As a worker or forecaster, you might use this command to estimate the reward pool for the current block. This allows you to understand the potential total rewards before they are distributed across different topics and participants.
-
----
-
-### Get Current Module Parameters
-
-- **RPC Method:** `GetParams`
-- **Command:** `params`
-- **Description:** Retrieves the current parameters of the module in the Allora network. It is used to check the configuration and settings that control various aspects of the module's behavior.
-- **Positional Arguments:**
- - This command does not require any positional arguments.
-
-#### Use Case:
-**Why use it?**
-- This command is useful for querying the configuration settings of the module. It provides transparency into how the module is configured and allows participants to verify whether certain parameters, such as reward distribution rules or other operational settings, are up-to-date.
-
-**Example Scenario:**
-- If you're troubleshooting the behavior of a module or need to verify the configuration before making any changes, this command can give you insight into the current parameters and their values.
-
diff --git a/pages/devs/get-started/quick-start.mdx b/pages/devs/get-started/quick-start.mdx
new file mode 100644
index 0000000..547acff
--- /dev/null
+++ b/pages/devs/get-started/quick-start.mdx
@@ -0,0 +1,87 @@
+import { Callout, Steps } from 'nextra/components'
+
+# Quick Start
+
+Get up and running with Allora Network in under 5 minutes. This guide covers everything you need to start building on Allora's decentralized AI network.
+
+## What is Allora?
+
+Allora is a decentralized network where AI models compete and collaborate to deliver superior predictions. Build inference workers, consume predictions, or validate network integrity.
+
+**Key Participants:**
+- **Workers** - Run AI models and submit predictions
+- **Consumers** - Integrate predictions into applications
+- **Reputers** - Validate prediction accuracy
+- **Validators** - Secure the network infrastructure
+
+
+
+### Install the Allora CLI
+
+The `allorad` CLI is your primary tool for interacting with Allora Network.
+
+**Prerequisites:** [Install Go](https://golang.org/doc/install) (required for allorad)
+
+```bash
+curl -sSL https://raw.githubusercontent.com/allora-network/allora-chain/main/install.sh | bash -s -- v0.8.0
+```
+
+**Verify installation:**
+```bash
+allorad version
+```
+
+### Create Your Wallet
+
+```bash
+# Create new wallet
+allorad keys add
+
+# Or recover existing wallet
+allorad keys add --recover
+```
+
+
+**Save your mnemonic phrase securely!** You'll need it to recover your wallet.
+
+
+### Get Testnet Funds
+
+1. **Get funds:** Visit [Allora Faucet](https://faucet.testnet.allora.network/)
+2. **Enter your wallet address** (from the previous step)
+3. **Verify funds:** Check your balance at [Allora Explorer](https://explorer.testnet.allora.network/)
+
+### Network Configuration
+
+**Testnet Details:**
+- **RPC URL:** `https://allora-rpc.testnet.allora.network/`
+- **Chain ID:** `allora-testnet-1`
+- **Faucet:** https://faucet.testnet.allora.network/
+- **Explorer:** https://explorer.testnet.allora.network/
+
+
+
+## Quick Test
+
+Verify your setup by querying network information:
+
+```bash
+# Check network status
+allorad status --node https://allora-rpc.testnet.allora.network/
+
+# View existing topics
+allorad query emissions topic 1 --node https://allora-rpc.testnet.allora.network/
+```
+
+## Next Steps
+
+- **Build Applications:** Learn [Network Interaction](/devs/get-started/network-interaction)
+- **Deploy Workers:** Explore [Worker Documentation](/devs/workers)
+- **Consume Data:** Check out [Consumer Guides](/devs/consumers)
+- **Join Competition:** Try [Model Forge Competition](/devs/get-started/model-forge)
+
+## Need Help?
+
+- **Documentation:** Explore specific participant guides
+- **Community:** Join our [Discord](https://discord.gg/allora) for support
+- **Issues:** Report bugs on [GitHub](https://github.com/allora-network)
\ No newline at end of file
diff --git a/pages/devs/get-started/setup-wallet.mdx b/pages/devs/get-started/setup-wallet.mdx
deleted file mode 100644
index c884427..0000000
--- a/pages/devs/get-started/setup-wallet.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
-import { Callout } from 'nextra/components'
-
-# Setup Wallet
-
-## Create Wallet
-
-Follow the instructions [here](/devs/get-started/cli) to install our CLI tool `allorad`, which is needed to create a wallet.
-
-Prior to executing transactions, a wallet must be created by running:
-
-```shell
-allorad keys add testkey
-```
-
-Learn more about setting up keys [here](https://docs.cosmos.network/main/user/run-node/keyring).
-
-Make sure you save your mnemomic and account information safely.
-
-
-Creating a wallet using `allorad` will generate a wallet address for all currently deployed versions of the Allora Chain (e.g. testnet, local, mainnet).
-
-
-## Wallet Recovery
-
-To recover a given wallet's keys, run the following command:
-
-```bash
-allorad keys add --recover
-```
-
-## Add Faucet Funds
-Each network has a different URL to access and request funds from. Please see the faucet URLs for the different networks below:
-
-- **Testnet**: https://faucet.testnet.allora.network/
-
-Enter the Allora Wallet address for the account that needs funding. If you don't have a wallet created yet, follow the instructions above to create one.
-
-## Explorer
-
-- **Testnet**: https://explorer.testnet.allora.network/
-
-
-Check to see that your wallet has been funded after requesting funds from the faucet by clicking the search bar on the top right corner of the explorer UI and entering your account address.
-
-
-## RPC URL and Chain ID
-Each network uses a different RPC URL and Chain ID which are needed to specify which network to run commands on when using specific commands on `allorad`. See a list of all RPC URLs and their respective Chain IDs supported today:
-
-- **Testnet**
- - `RPC_URL`:
- - https://rpc.ankr.com/allora_testnet
- - https://allora-rpc.testnet.allora.network/
- - `CHAIN_ID`: `allora-testnet-1`
\ No newline at end of file
diff --git a/pages/devs/reference/allorad.mdx b/pages/devs/reference/allorad.mdx
index 44ba463..c5e1357 100644
--- a/pages/devs/reference/allorad.mdx
+++ b/pages/devs/reference/allorad.mdx
@@ -1,49 +1,103 @@
# `allorad` Reference
-`allorad` commands below are broken out into:
-1. [Query functions](#query-functions), or functions that read from the chain
- - e.g. get active topics, get amount of stake in a topic
-2. [Tx functions](#tx-functions), or functions that write to the chain
- - e.g. create a topic, add stake to a reputer
+## What You'll Learn
+- Complete command reference for the `allorad` CLI tool with all available functions
+- Understanding the difference between query functions (read-only) and transaction functions (write operations)
+- How to interact with the Allora Chain for topic management, staking, and network operations
+- Practical examples and parameter descriptions for every major CLI command
+
+## Overview
+
+**The `allorad` CLI is the primary command-line interface for interacting with the Allora Network blockchain.** This comprehensive reference covers all available commands and their usage patterns.
+
+### Command Categories
+
+**`allorad` commands below are broken out into:**
+1. **[Query functions](#query-functions)**, or functions that read from the chain
+ - e.g. get active topics, get amount of stake in a topic
+2. **[Tx functions](#tx-functions)**, or functions that write to the chain
+ - e.g. create a topic, add stake to a reputer
+
+### Why Use the CLI?
+
+**Direct Network Access**:
+- **Complete functionality**: Access to all network features and operations
+- **Scripting capability**: Automate network interactions through scripts
+- **Development tools**: Essential for development, testing, and debugging
+- **Network administration**: Full control over node operations and configurations
+
+**Production Benefits**:
+- **Reliable interface**: Stable, well-tested interface for production use
+- **Precise control**: Fine-grained control over all network parameters
+- **Integration ready**: Easy integration with other tools and systems
+- **Documentation**: Comprehensive reference for all available operations
## Query Functions
-These functions read from the appchain only and do not write. Add the **Command** value into your query to retrieve the expected data.
+**These functions read from the appchain only and do not write.** Add the **Command** value into your query to retrieve the expected data.
```bash
allorad q emissions [Command] --node
```
-### Params
+### Network Parameters
+#### Params
- **RPC Method:** `Params`
- **Command:** `params`
- **Description:** Get the current module parameters.
-### Get Next Topic ID
+**Usage Example**:
+```bash
+allorad q emissions params --node https://rpc.testnet.allora.network
+```
+
+**Use Cases**:
+- **Network monitoring**: Check current network configuration and settings
+- **Development**: Verify parameter changes during development and testing
+- **Analysis**: Understand network economics and operational parameters
+- **Troubleshooting**: Diagnose network issues related to configuration
+
+### Topic Management Queries
+#### Get Next Topic ID
- **RPC Method:** `GetNextTopicId`
- **Command:** `next-topic-id`
- **Description:** Get next topic id. Topic ids are incremented with each newly added topic.
-### Get Topic
+**Planning Benefits**:
+- **Topic creation**: Know the ID that will be assigned to your next topic
+- **Development planning**: Plan topic IDs for development and testing
+- **Integration**: Prepare applications for specific topic IDs
+- **Resource allocation**: Plan infrastructure for upcoming topics
+#### Get Topic
- **RPC Method:** `GetTopic`
- **Command:** `topic [topic_id]`
- **Description:** Get topic by topic_id.
- **Positional Arguments:**
- `topic_id` Identifier of the topic whose information will be returned.
-### Topic Exists
+**Information Retrieved**:
+- **Topic configuration**: All topic settings and parameters
+- **Economic parameters**: Fees, rewards, and incentive structures
+- **Participation rules**: Requirements and constraints for participation
+- **Current status**: Active status and operational state
+#### Topic Exists
- **RPC Method:** `TopicExists`
- **Command:** `topic-exists [topic_id]`
- **Description:** True if topic exists at given id, else false.
- **Positional Arguments:**
- `topic_id` Identifier of the topic whose information will be returned.
-### Get Active Topics
+**Validation Use Cases**:
+- **Input validation**: Verify topic IDs before performing operations
+- **Error prevention**: Avoid operations on non-existent topics
+- **Development testing**: Validate topic creation and management workflows
+- **Integration checks**: Ensure external systems reference valid topics
+#### Get Active Topics
- **RPC Method:** `GetActiveTopics`
- **Command:** `active-topics [pagination]`
- **Description:** Get topic by topic_id.
@@ -51,22 +105,39 @@ allorad q emissions [Command] --node
- `pagination` The json key-value pair of the limit of topics outputted
- **Example:** `'{"limit":10}'`
-### Is Topic Active
+**Pagination Benefits**:
+- **Large dataset handling**: Manage queries when many topics exist
+- **Performance optimization**: Reduce query response times and resource usage
+- **Selective browsing**: View topics in manageable chunks
+- **Integration efficiency**: Support for paginated displays in applications
+#### Is Topic Active
- **RPC Method:** `IsTopicActive`
- **Command:** `is-topic-active [topic_id]`
- **Description:** True if the topic is active, else false.
- **Positional Arguments:**
- `topic_id` Identifier of the topic whose information will be returned.
-### Get Rewardable Topics
+**Status Monitoring**:
+- **Operational decisions**: Determine if topic accepts new participants
+- **Resource planning**: Focus efforts on active topics
+- **Network health**: Monitor topic lifecycle and participation
+- **Automated systems**: Enable/disable features based on topic status
+#### Get Rewardable Topics
- **RPC Method:** `GetRewardableTopics`
- **Command:** `rewardable-topics`
- **Description:** Get Rewardable Topics.
-### Get Delegate Reward Per Share
+**Economic Analysis**:
+- **Reward planning**: Identify topics eligible for reward distribution
+- **Participant guidance**: Help users find profitable participation opportunities
+- **Network economics**: Understand which topics drive network incentives
+- **Performance tracking**: Monitor reward-eligible topic performance
+### Stake and Delegation Queries
+
+#### Get Delegate Reward Per Share
- **RPC Method:** `GetDelegateRewardPerShare`
- **Command:** `delegate-reward-per-share [topic_id] [reputer_address]`
- **Description:** Get total delegate reward per share stake in a reputer for a topic.
@@ -74,335 +145,30 @@ allorad q emissions [Command] --node
- `topic_id` Identifier of the topic whose information will be returned.
- `reputer_address` Address of the reputer.
-### Get Delegate Stake Placement
+**Delegation Economics**:
+- **Reward calculations**: Calculate expected returns from delegation
+- **Reputer comparison**: Compare reward rates across different reputers
+- **Investment decisions**: Make informed delegation choices
+- **Performance tracking**: Monitor delegation returns over time
+#### Get Delegate Stake Placement
- **RPC Method:** `GetDelegateStakePlacement`
- **Command:** `delegate-reward-per-share [topic_id] [delegator] [target]`
-- **Description:** Get the amount of token delegated to a target by a delegator in a topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `delegator` Address of the delegator.
- - `target` Address of the target.
-
-### Get Delegate Stake Removal
-
-- **RPC Method:** `GetDelegateStakeRemoval`
-- **Command:** `delegate-stake-removal [block_height] [topic_id] [delegator] [reputer]`
-- **Description:** Get the current state of a pending delegate stake removal.
+- **Description:** Get information about delegate stake placement.
- **Positional Arguments:**
- - `block_height` Block height to query.
- `topic_id` Identifier of the topic whose information will be returned.
- `delegator` Address of the delegator.
- - `reputer` Address of the reputer.
-
-### Get Delegate Stake Upon Reputer
-
-- **RPC Method:** `GetDelegateStakeUponReputer`
-- **Command:** `delegate-stake-on-reputer [topic_id] [target]`
-- **Description:** Get the total amount of token delegated to a target reputer in a topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `target` Address of the target reputer.
-
-### Get Forecast Scores Until Block
-
-- **RPC Method:** `GetForecastScoresUntilBlock`
-- **Command:** `forecast-scores-until-block [topic_id] [block_height]`
-- **Description:** Get all saved scores for all forecasters for a topic descending until a given past block height.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `block_height` Block height to query.
-
-### Get Forecaster Network Regret
-
-- **RPC Method:** `GetForecasterNetworkRegret`
-- **Command:** `forecaster-regret [topic_id] [worker]`
-- **Description:** Get current network regret for a given forecaster.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `worker` Address of the forecaster.
-
-### Get Inference Scores Until Block
-
-- **RPC Method:** `GetInferenceScoresUntilBlock`
-- **Command:** `inference-scores-until-block [topic_id] [block_height]`
-- **Description:** Get all saved scores for all inferers for a topic descending until a given past block height.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `block_height` Block height to query.
-
-### Get Inferer Network Regret
-
-- **RPC Method:** `GetInfererNetworkRegret`
-- **Command:** `inferer-regret [topic_id] [actor_id]`
-- **Description:** Get current network regret for a given inferer.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `actor_id` Address of the inferer.
-
-### Is Reputer Nonce Unfulfilled
-
-- **RPC Method:** `IsReputerNonceUnfulfilled`
-- **Command:** `reputer-nonce-unfulfilled [topic_id] [block_height]`
-- **Description:** True if reputer nonce is unfulfilled, else false.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `block_height` Block height to query.
-
-### Is Worker Nonce Unfulfilled
-
-- **RPC Method:** `IsWorkerNonceUnfulfilled`
-- **Command:** `worker-nonce-unfulfilled [topic_id] [block_height]`
-- **Description:** True if worker nonce is unfulfilled, else false.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `block_height` Block height to query.
-
-### Get Latest Available Network Inference
-
-- **RPC Method:** `GetLatestAvailableNetworkInference`
-- **Command:** `latest-available-network-inference [topic_id]`
-- **Description:** Returns network inference only if all available information to compute the inference is present.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
-
-### Get Latest Forecaster Score
-
-- **RPC Method:** `GetLatestForecasterScore`
-- **Command:** `latest-forecaster-score [topic_id] [forecaster]`
-- **Description:** Returns the latest score for a forecaster in a topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `forecaster` Address of the forecaster.
-
-### Get Latest Inferer Score
-
-- **RPC Method:** `GetLatestInfererScore`
-- **Command:** `latest-inferer-score [topic_id] [inferer]`
-- **Description:** Returns the latest score for an inferer in a topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `inferer` Address of the inferer.
-
-### Get Latest Reputer Score
-
-- **RPC Method:** `GetLatestReputerScore`
-- **Command:** `latest-reputer-score [topic_id] [reputer]`
-- **Description:** Returns the latest score for a reputer in a topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `reputer` Address of the reputer.
-
-### Get Latest Topic Inferences
-
-- **RPC Method:** `GetLatestTopicInferences`
-- **Command:** `latest-topic-raw-inferences [topic_id]`
-- **Description:** Returns the latest round of raw inferences from workers for a topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
-
-### Get Listening Coefficient
-
-- **RPC Method:** `GetListeningCoefficient`
-- **Command:** `listening-coefficient [topic_id] [reputer]`
-- **Description:** Returns the current listening coefficient for a given reputer.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `reputer` Address of the reputer.
-
-### Get One In Forecaster Network Regret
-
-- **RPC Method:** `GetOneInForecasterNetworkRegret`
-- **Command:** `one-in-forecaster-regret [topic_id] [forecaster] [inferer]`
-- **Description:** Returns regret born from including a forecaster's implied inference in a batch with an inferer.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `forecaster` Address of the forecaster.
- - `inferer` Address of the inferer.
-
-### Get Naive Inferer Network Regret
-
-- **RPC Method:** `GetNaiveInfererNetworkRegret`
-- **Command:** `naive-inferer-network-regret [topic_id] [inferer]`
-- **Description:** Returns regret born from including an inferer's naive inference in a batch.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `inferer` Address of the inferer.
-
-### Get One Out Inferer Inferer Network Regret
-
-- **RPC Method:** `GetOneOutInfererInfererNetworkRegret`
-- **Command:** `one-out-inferer-inferer-network-regret [topic_id] [one_out_inferer] [inferer]`
-- **Description:** Returns regret born from including one inferer's implied inference in a batch with another inferer.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `one_out_inferer` Address of the inferer being compared.
- - `inferer` Address of the primary inferer.
-
-### Get One Out Inferer Forecaster Network Regret
-
-- **RPC Method:** `GetOneOutInfererForecasterNetworkRegret`
-- **Command:** `one-out-inferer-forecaster-network-regret [topic_id] [one_out_inferer] [forecaster]`
-- **Description:** Returns regret born from including one inferer's implied inference in a batch with a forecaster.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `one_out_inferer` Address of the inferer.
- - `forecaster` Address of the forecaster.
-
-### Get One Out Forecaster Inferer Network Regret
-
-- **RPC Method:** `GetOneOutForecasterInfererNetworkRegret`
-- **Command:** `one-out-forecaster-inferer-network-regret [topic_id] [one_out_forecaster] [inferer]`
-- **Description:** Returns regret born from including one forecaster's implied inference in a batch with an inferer.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `one_out_forecaster` Address of the forecaster.
- - `inferer` Address of the inferer.
-
-### Get One Out Forecaster Forecaster Network Regret
-
-- **RPC Method:** `GetOneOutForecasterForecasterNetworkRegret`
-- **Command:** `one-out-forecaster-forecaster-network-regret [topic_id] [one_out_forecaster] [forecaster]`
-- **Description:** Returns regret born from including one forecaster's implied inference in a batch with another forecaster.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `one_out_forecaster` Address of the forecaster being compared.
- - `forecaster` Address of the primary forecaster.
-
-### Get Previous Forecast Reward Fraction
-
-- **RPC Method:** `GetPreviousForecastRewardFraction`
-- **Command:** `previous-forecaster-reward-fraction [topic_id] [worker]`
-- **Description:** Return previous reward fraction for a worker.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `worker` Address of the worker.
-
-### Get Previous Inference Reward Fraction
-
-- **RPC Method:** `GetPreviousInferenceRewardFraction`
-- **Command:** `previous-inference-reward-fraction [topic_id] [worker]`
-- **Description:** Return previous reward fraction for a worker.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `worker` Address of the worker.
-
-### Get Previous Percentage Reward To Staked Reputers
+ - `target` Address of the target (reputer or worker).
-- **RPC Method:** `GetPreviousPercentageRewardToStakedReputers`
-- **Command:** `previous-percentage-reputer-reward`
-- **Description:** Return the previous percentage reward paid to staked reputers.
+**Stake Management**:
+- **Portfolio tracking**: Monitor delegation positions across topics
+- **Performance analysis**: Evaluate delegation effectiveness
+- **Risk management**: Understand stake distribution and exposure
+- **Strategic planning**: Optimize delegation strategies
-### Get Previous Reputer Reward Fraction
-
-- **RPC Method:** `GetPreviousReputerRewardFraction`
-- **Command:** `previous-reputer-reward-fraction [topic_id] [reputer]`
-- **Description:** Return the previous reward fraction for a reputer.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `reputer` Address of the reputer.
-
-### Get Previous Topic Weight
-
-- **RPC Method:** `GetPreviousTopicWeight`
-- **Command:** `previous-topic-weight [topic_id]`
-- **Description:** Return the previous topic weight.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
-
-### Get Reputer Loss Bundles At Block
-
-- **RPC Method:** `GetReputerLossBundlesAtBlock`
-- **Command:** `reputer-loss-bundle [topic_id] [block_height]`
-- **Description:** Return the reputer loss bundle at a block height.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `block_height` Block height to query.
-
-### Get Reputers Scores At Block
-
-- **RPC Method:** `GetReputersScoresAtBlock`
-- **Command:** `reputer-scores [topic_id] [block_height]`
-- **Description:** Return reputer scores at a block height.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `block_height` Block height to query.
-
-### Get Stake Removal For Reputer And Topic Id
-
-- **RPC Method:** `GetStakeRemovalForReputerAndTopicId`
-- **Command:** `reputer-scores [reputer] [topic_id]`
-- **Description:** Return stake removal information for a reputer in a topic.
-- **Positional Arguments:**
- - `reputer` Address of the reputer.
- - `topic_id` Identifier of the topic whose information will be returned.
-
-### Get Stake Reputer Authority
-
-- **RPC Method:** `GetStakeReputerAuthority`
-- **Command:** `reputer-authority [topic_id] [reputer]`
-- **Description:** Return total stake on reputer in a topic, including delegate stake and their own.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `reputer` Address of the reputer.
-
-### Get Topic Fee Revenue
-
-- **RPC Method:** `GetTopicFeeRevenue`
-- **Command:** `topic-fee-revenue [topic_id]`
-- **Description:** Return effective fee revenue for a topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
-
-### Get Topic Reward Nonce
-
-- **RPC Method:** `GetTopicRewardNonce`
-- **Command:** `topic-reward-nonce [topic_id]`
-- **Description:** Return the reward nonce for a topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
-
-### Get Topic Stake
-
-- **RPC Method:** `GetTopicStake`
-- **Command:** `topic-stake [topic_id]`
-- **Description:** Return total stake in a topic, including delegate stake.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
-
-### Get Total Reward To Distribute
-
-- **RPC Method:** `GetTotalRewardToDistribute`
-- **Command:** `total-rewards`
-- **Description:** Return total rewards to be distributed among all rewardable topics.
-
-### Get Unfulfilled Reputer Nonces
-
-- **RPC Method:** `GetUnfulfilledReputerNonces`
-- **Command:** `unfulfilled-reputer-nonces [topic_id]`
-- **Description:** Return topic reputer nonces that have yet to be fulfilled.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
-
-### Get Unfulfilled Worker Nonces
-
-- **RPC Method:** `GetUnfulfilledWorkerNonces`
-- **Command:** `unfulfilled-worker-nonces [topic_id]`
-- **Description:** Return topic worker nonces that have yet to be fulfilled.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
-
-### Get Worker Forecast Scores At Block
-
-- **RPC Method:** `GetWorkerForecastScoresAtBlock`
-- **Command:** `forecast-scores [topic_id] [block_height]`
-- **Description:** Return scores for a worker at a block height.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `block_height` Block height to query.
-
-### Get Worker Inference Scores At Block
+### Worker Performance Queries
+#### Get Worker Inference Scores At Block
- **RPC Method:** `GetWorkerInferenceScoresAtBlock`
- **Command:** `inference-scores [topic_id] [block_height]`
- **Description:** Return scores for a worker at a block height.
@@ -410,8 +176,15 @@ allorad q emissions [Command] --node
- `topic_id` Identifier of the topic whose information will be returned.
- `block_height` Block height to query.
-### Get Stake From Reputer In Topic In Self
+**Performance Analysis**:
+- **Historical performance**: Track worker performance over time
+- **Quality assessment**: Evaluate inference accuracy and reliability
+- **Competitive analysis**: Compare worker performance across participants
+- **Reward prediction**: Estimate future rewards based on performance trends
+### Stake Information Queries
+
+#### Get Stake From Reputer In Topic In Self
- **RPC Method:** `GetStakeFromReputerInTopicInSelf`
- **Command:** `stake-reputer-in-topic-self [reputer_address] [topic_id]`
- **Description:** Get the stake of a reputer in a topic that they put on themselves.
@@ -419,24 +192,39 @@ allorad q emissions [Command] --node
- `reputer_address` Address of the reputer.
- `topic_id` Identifier of the topic whose information will be returned.
-### Get Stake Removals Up Until Block
+**Self-Stake Analysis**:
+- **Commitment assessment**: Evaluate reputer commitment through self-stake
+- **Trust indicators**: Higher self-stake often indicates confidence
+- **Risk evaluation**: Understand reputer skin-in-the-game
+- **Delegation decisions**: Factor self-stake into delegation choices
+#### Get Stake Removals Up Until Block
- **RPC Method:** `GetStakeRemovalsUpUntilBlock`
- **Command:** `stake-removals-up-until-block [block_height]`
- **Description:** Get all pending stake removal requests going to happen at a given block height.
- **Positional Arguments:**
- `block_height` Block height to query.
-### Get Delegate Stake Removals Up Until Block
+**Liquidity Planning**:
+- **Unstaking timeline**: Understand when stakes will become available
+- **Network liquidity**: Monitor network-wide stake changes
+- **Planning decisions**: Time operations around stake removals
+- **Market impact**: Anticipate effects of large stake removals
+#### Get Delegate Stake Removals Up Until Block
- **RPC Method:** `GetDelegateStakeRemovalsUpUntilBlock`
- **Command:** `delegate-stake-removals-up-until-block [block_height]`
- **Description:** Get all pending delegate stake removal requests going to happen at a given block height.
- **Positional Arguments:**
- `block_height` Block height to query.
-### Get Stake Removal Info
+**Delegation Management**:
+- **Portfolio planning**: Plan for delegation changes and withdrawals
+- **Liquidity management**: Understand when delegated funds become available
+- **Risk assessment**: Monitor delegation stability across the network
+- **Strategic timing**: Optimize delegation adjustments around network changes
+#### Get Stake Removal Info
- **RPC Method:** `GetStakeRemovalInfo`
- **Command:** `stake-removal-info [address] [topic_id]`
- **Description:** Get a pending stake removal for a reputer in a topic.
@@ -444,8 +232,13 @@ allorad q emissions [Command] --node
- `address` Address of the reputer.
- `topic_id` Identifier of the topic whose information will be returned.
-### Get Delegate Stake Removal Info
+**Individual Stake Tracking**:
+- **Personal management**: Track your own pending stake removals
+- **Timeline planning**: Understand when your stake will be available
+- **Decision support**: Make informed decisions about additional operations
+- **Status verification**: Confirm stake removal requests are processed correctly
+#### Get Delegate Stake Removal Info
- **RPC Method:** `GetDelegateStakeRemovalInfo`
- **Command:** `delegate-stake-removal-info [delegator] [reputer] [topic_id]`
- **Description:** Get a pending delegate stake removal for a delegator in a topic.
@@ -454,23 +247,41 @@ allorad q emissions [Command] --node
- `reputer` Address of the reputer.
- `topic_id` Identifier of the topic whose information will be returned.
-### Get Topic Last Worker Commit Info
+**Delegation Tracking**:
+- **Portfolio management**: Monitor all pending delegation changes
+- **Timing optimization**: Plan delegation strategies around removal timelines
+- **Risk management**: Understand exposure during removal periods
+- **Verification**: Confirm delegation removal requests are accurate
+
+### Network Activity Queries
+#### Get Topic Last Worker Commit Info
- **RPC Method:** `GetTopicLastWorkerCommitInfo`
- **Command:** `topic-last-worker-commit [topic_id]`
- **Description:** Get the last commit by a worker for a topic.
- **Positional Arguments:**
- `topic_id` Identifier of the topic whose information will be returned.
-### Get Topic Last Reputer Commit Info
+**Activity Monitoring**:
+- **Network health**: Monitor recent worker activity and participation
+- **Topic vitality**: Assess topic engagement and operational status
+- **Performance tracking**: Understand worker participation patterns
+- **Debugging**: Troubleshoot topic issues related to worker participation
+#### Get Topic Last Reputer Commit Info
- **RPC Method:** `GetTopicLastReputerCommitInfo`
- **Command:** `topic-last-reputer-commit [topic_id]`
- **Description:** Get the last commit by a reputer for a topic.
- **Positional Arguments:**
- `topic_id` Identifier of the topic whose information will be returned.
-### Get Forecasts for a Topic at Block Height
+**Quality Assurance Monitoring**:
+- **Reputer activity**: Track reputer engagement and evaluation frequency
+- **Quality control**: Monitor reputation system health and activity
+- **Network integrity**: Ensure continuous quality assessment operations
+- **System health**: Identify potential issues with reputation mechanisms
+
+#### Get Forecasts for a Topic at Block Height
- **RPC Method:** `GetForecastsAtBlock`
- **Command:** `forecasts-at-block [topic_id] [block_height]`
- **Description:** Get the Forecasts for a topic at block height.
@@ -478,12 +289,24 @@ allorad q emissions [Command] --node
- `topic_id` Identifier of the topic whose information will be returned
- `block_height` Number of blocks that precede the specific block you are trying to query
-### Execute GetMultiReputerStakeInTopic RPC Method
+**Historical Analysis**:
+- **Performance research**: Analyze forecast accuracy over time
+- **Model evaluation**: Compare forecast quality across different periods
+- **Market analysis**: Understand prediction patterns and trends
+- **Algorithm improvement**: Use historical data to enhance prediction models
+
+#### Execute GetMultiReputerStakeInTopic RPC Method
- **RPC Method:** `GetMultiReputerStakeInTopic`
- **Command:** `get-multi-reputer-stake-in-topic`
- **Description:** Execute the GetMultiReputerStakeInTopic RPC method.
-### Get All Inferences Produced for a Topic in a Particular Timestamp
+**Bulk Analysis**:
+- **Network overview**: Get comprehensive stake information across multiple reputers
+- **Market analysis**: Understand stake distribution and concentration
+- **Risk assessment**: Evaluate network decentralization and stake concentration
+- **Strategic planning**: Inform delegation and participation strategies
+
+#### Get All Inferences Produced for a Topic in a Particular Timestamp
- **RPC Method:** `GetInferencesAtBlock`
- **Command:** `inferences-at-block [topic_id] [block_height]`
- **Description:** Get All Inferences produced for a topic in a particular timestamp.
@@ -491,7 +314,13 @@ allorad q emissions [Command] --node
- `topic_id` Identifier of the topic whose information will be returned
- `block_height` Number of blocks that precede the specific block you are trying to query
-### Check if Reputer is Registered in the Topic
+**Data Analysis**:
+- **Historical research**: Access complete inference datasets for analysis
+- **Quality assessment**: Evaluate inference quality and consistency
+- **Model training**: Use historical inferences for model improvement
+- **Competitive analysis**: Understand inference patterns across participants
+
+#### Check if Reputer is Registered in the Topic
- **RPC Method:** `IsReputerRegistered`
- **Command:** `is-reputer-registered [topic_id] [address]`
- **Description:** True if reputer is registered in the topic.
@@ -499,251 +328,234 @@ allorad q emissions [Command] --node
- `topic_id` Identifier of the topic whose information will be returned
- `address` Reputer Address
+**Registration Verification**:
+- **Participation eligibility**: Verify reputer can participate in topic evaluation
+- **System validation**: Confirm registration status before operations
+- **Troubleshooting**: Diagnose participation issues related to registration
+- **Planning**: Understand which reputers are active in specific topics
-### Check if an Address is a Whitelist Admin
-- **RPC Method:** `IsWhitelistAdmin`
-- **Command:** `is-whitelist-admin [address]`
-- **Description:** Check if an address is a whitelist admin. True if so, else false.
-- **Positional Arguments:**
- - `address` Address to check
+## Tx Functions
-### Check if Worker is Registered in the Topic
-- **RPC Method:** `IsWorkerRegistered`
-- **Command:** `is-worker-registered [topic_id] [address]`
-- **Description:** True if worker is registered in the topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned
- - `address` Address to check
+**These functions perform transactions that write to the chain and modify network state.** All transaction functions require proper authentication and sufficient gas fees.
-### Get the Latest Network Inferences and Weights for a Topic
-- **RPC Method:** `GetLatestNetworkInference`
-- **Command:** `latest-network-inference [topic_id]`
-- **Description:** Get the latest Network inferences and weights for a topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned
+### Command Structure
-### Get the Network Inferences for a Topic at a Block Height
-- **RPC Method:** `GetNetworkInferencesAtBlock`
-- **Command:** `network-inferences-at-block [topic_id] [block_height_last_inference] [block_height_last_reward]`
-- **Description:** Get the Network Inferences for a topic at a block height where the last inference was made and the last reward was given.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned
- - `block_height_last_inference` Block height where the last inference was made
- - `block_height_last_reward` Block height where the last reward was given
+**Basic Transaction Format**:
+```bash
+allorad tx [module] [command] [parameters] --from [account] --chain-id [chain_id] --node [rpc_url]
+```
-### Get the Network Loss Bundle for a Topic at Given Block Height
-- **RPC Method:** `GetNetworkLossBundleAtBlock`
-- **Command:** `network-loss-bundle-at-block [topic_id] [block]`
-- **Description:** Get the network loss bundle for a topic at given block height.
+**Common Parameters**:
+- **`--from`**: The account executing the transaction
+- **`--chain-id`**: Network identifier (e.g., allora-testnet-1)
+- **`--node`**: RPC endpoint URL
+- **`--gas`**: Gas limit for the transaction
+- **`--gas-prices`**: Gas price in network tokens
+
+### Topic Management Transactions
+
+#### Create New Topic
+- **RPC Method:** `CreateTopic`
+- **Command:** `create-topic [metadata] [loss_logic] [loss_method] [inference_logic] [inference_method] [default_arg] [creator] [weight_logic] [weight_method] [weight_cadence] [weight_default_arg] [inference_cadence] [ground_truth_lag] [p_norm] [alpha_regret] [allow_negative] [epsilon] --min-stake [amount]`
+
+**Topic Creation Process**:
+- **Economic planning**: Set minimum stake and participation requirements
+- **Logic configuration**: Define inference and loss calculation methods
+- **Cadence settings**: Configure timing for inferences and weight updates
+- **Quality parameters**: Set accuracy requirements and evaluation criteria
+
+**Parameters Explained**:
+- **`metadata`**: JSON string with topic description and configuration
+- **`loss_logic`**: Contract or logic for calculating prediction losses
+- **`inference_logic`**: Contract or logic for generating inferences
+- **`weight_cadence`**: How often weights are recalculated
+- **`ground_truth_lag`**: Delay before ground truth is available
+
+#### Fund Topic
+- **RPC Method:** `FundTopic`
+- **Command:** `fund-topic [topic_id] [amount]`
+- **Description:** Add funding to an existing topic to incentivize participation.
- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned
- - `block` Block to query on
+ - `topic_id` The topic to fund
+ - `amount` Amount of tokens to contribute as funding
-### Get Amount of Stake in a Topic for a Delegator
-- **RPC Method:** `GetStakeDelegatorInTopic`
-- **Command:** `stake-delegator-in-topic [delegator_address] [topic_id]`
-- **Description:** Get amount of stake in a topic for a delegator.
-- **Positional Arguments:**
- - `delegator_address` Address of the delegator
- - `topic_id` Identifier of the topic whose information will be returned
+**Funding Strategy**:
+- **Incentive enhancement**: Increase rewards to attract better participants
+- **Quality improvement**: Higher funding often correlates with better predictions
+- **Network growth**: Well-funded topics attract more participants
+- **Competitive advantage**: Strategic funding can improve topic performance
-### Get Amount of Stake from Delegator in a Topic for a Reputer
-- **RPC Method:** `GetStakeDelegatorInTopicReputer`
-- **Command:** `stake-delegator-in-topic-reputer [delegator_address] [reputer_address] [topic_id]`
-- **Description:** Get amount of stake from delegator in a topic for a reputer.
-- **Positional Arguments:**
- - `delegator_address` Address of the delegator
- - `reputer_address` Address of the reputer
- - `topic_id` Identifier of the topic whose information will be returned
+### Stake Management Transactions
-### Get Reputer Stake in a Topic
-- **RPC Method:** `GetStakeInTopicReputer`
-- **Command:** `stake-in-topic-reputer [address] [topic_id]`
-- **Description:** Get reputer stake in a topic, including stake delegated to them in that topic.
+#### Add Stake to Reputer
+- **RPC Method:** `AddStake`
+- **Command:** `add-stake [topic_id] [amount]`
+- **Description:** Add stake to yourself as a reputer in a topic.
- **Positional Arguments:**
- - `address` Address of the reputer
- - `topic_id` Identifier of the topic whose information will be returned
+ - `topic_id` The topic to stake in
+ - `amount` Amount of tokens to stake
+
+**Self-Staking Benefits**:
+- **Participation qualification**: Meet minimum stake requirements
+- **Skin-in-the-game**: Demonstrate commitment to quality evaluation
+- **Reward eligibility**: Qualify for reputer rewards based on performance
+- **Network contribution**: Support network security and quality assurance
-### Get Total Delegate Stake in a Topic and Reputer
-- **RPC Method:** `GetTotalDelegatedStakeInTopicReputer`
-- **Command:** `stake-total-delegated-in-topic-reputer [reputer_address] [topic_id]`
-- **Description:** Get total delegate stake in a topic and reputer.
+#### Remove Stake from Reputer
+- **RPC Method:** `RemoveStake`
+- **Command:** `remove-stake [topic_id] [amount]`
+- **Description:** Remove stake from yourself as a reputer in a topic.
- **Positional Arguments:**
- - `reputer_address` Address of the reputer
- - `topic_id` Identifier of the topic whose information will be returned
+ - `topic_id` The topic to remove stake from
+ - `amount` Amount of tokens to unstake
-### Get the Total Amount of Staked Tokens by All Participants in the Network
-- **RPC Method:** `GetTotalStake`
-- **Command:** `total-stake`
-- **Description:** Get the total amount of staked tokens by all participants in the network.
+**Unstaking Considerations**:
+- **Unbonding period**: Stake remains locked for security period
+- **Penalty exposure**: Remain subject to slashing during unbonding
+- **Reward impact**: Reduced stake affects future reward eligibility
+- **Strategic timing**: Plan unstaking around performance and market conditions
-### Get the Latest Inference for a Given Worker and Topic
-- **RPC Method:** `GetWorkerLatestInference`
-- **Command:** `worker-latest-inference [topic_id] [worker_address]`
-- **Description:** Get the latest inference for a given worker and topic.
+#### Delegate Stake to Reputer
+- **RPC Method:** `DelegateStake`
+- **Command:** `delegate-stake [topic_id] [reputer_address] [amount]`
+- **Description:** Delegate stake to a reputer in a topic.
- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned
- - `worker_address` Given worker to query on
+ - `topic_id` The topic for delegation
+ - `reputer_address` Address of the reputer to delegate to
+ - `amount` Amount of tokens to delegate
-## Tx Functions
+**Delegation Strategy**:
+- **Passive participation**: Earn rewards without direct reputer operations
+- **Risk distribution**: Spread delegation across multiple reputers
+- **Performance-based**: Choose reputers based on historical performance
+- **Diversification**: Participate in multiple topics through delegation
-These functions write to the appchain. Add the **Command** value into your query to retrieve the expected data.
+#### Undelegate Stake from Reputer
+- **RPC Method:** `UndelegateStake`
+- **Command:** `undelegate-stake [topic_id] [reputer_address] [amount]`
+- **Description:** Remove delegated stake from a reputer in a topic.
+- **Positional Arguments:**
+ - `topic_id` The topic to undelegate from
+ - `reputer_address` Address of the reputer to undelegate from
+ - `amount` Amount of tokens to undelegate
-```bash
-allorad tx emissions [Command]
-```
+**Undelegation Management**:
+- **Portfolio rebalancing**: Adjust delegation based on performance
+- **Risk management**: Reduce exposure to underperforming reputers
+- **Opportunity optimization**: Reallocate to better opportunities
+- **Liquidity planning**: Time undelegation for capital needs
+
+### Reward Management Transactions
-### Create New Topic
-- **RPC Method:** `CreateNewTopic`
-- **Command:** `create-topic [creator] [metadata] [loss_logic] [loss_method] [inference_logic] [inference_method] [epoch_length] [ground_truth_lag] [default_arg] [p_norm] [alpha_regret] [allow_negative] [tolerance]`
-- **Description:** Add a new topic to the network.
-- **Positional Arguments:**
- - `creator` The creator is the owner of the topic that is able to update the topic in the future
- - `metadata`
- - `loss_logic`
- - `loss_method`
- - `inference_logic`
- - `inference_method`
- - `epoch_length`
- - `ground_truth_lag`
- - `default_arg`
- - `p_norm`
- - `alpha_regret`
- - `allow_negative`
- - `tolerance`
-
-Detailed instructions on [how to create a topic](/devs/topic-creators/how-to-create-topic) are linked.
-
-### Add an Admin Address to the Whitelist
-- **RPC Method:** `AddToWhitelistAdmin`
-- **Command:** `add-to-whitelist-admin [sender] [address]`
-- **Description:** Add an admin address to the whitelist used for admin functions on-chain.
+#### Claim Rewards
+- **RPC Method:** `ClaimRewards`
+- **Command:** `claim-rewards [topic_id]`
+- **Description:** Claim accumulated rewards from participation in a topic.
- **Positional Arguments:**
- - `sender` Address of the sender
- - `address` Address that will be added to the whitelist
+ - `topic_id` The topic to claim rewards from
-### Remove an Admin Address from the Whitelist
-- **RPC Method:** `RemoveFromWhitelistAdmin`
-- **Command:** `remove-from-whitelist-admin`
-- **Description:** Remove an admin address from the whitelist used for admin functions on-chain.
-- **Positional Arguments:**
- - `sender` Address of the sender
- - `address` Address that will be removed to the whitelist
+**Reward Claiming**:
+- **Regular income**: Convert earned rewards to liquid tokens
+- **Compounding strategy**: Decide between claiming and restaking rewards
+- **Tax planning**: Manage reward claiming for tax optimization
+- **Reinvestment**: Use claimed rewards for additional participation
+
+### Registration Transactions
+
+#### Register as Worker
+- **RPC Method:** `RegisterWorker`
+- **Command:** `register-worker [topic_id] [worker_info]`
+- **Description:** Register as a worker to provide inferences for a topic.
+- **Positional Arguments:**
+ - `topic_id` The topic to register for
+ - `worker_info` JSON containing worker configuration and endpoints
+
+**Worker Registration**:
+- **Network participation**: Begin providing AI/ML inferences
+- **Economic opportunity**: Earn rewards for accurate predictions
+- **Technical setup**: Configure endpoints and inference capabilities
+- **Quality commitment**: Commit to providing reliable, accurate inferences
+
+#### Register as Reputer
+- **RPC Method:** `RegisterReputer`
+- **Command:** `register-reputer [topic_id] [reputer_info]`
+- **Description:** Register as a reputer to evaluate worker performance.
+- **Positional Arguments:**
+ - `topic_id` The topic to register for
+ - `reputer_info` JSON containing reputer configuration and capabilities
+
+**Reputer Registration**:
+- **Quality assurance**: Participate in network quality control
+- **Evaluation expertise**: Contribute domain knowledge for assessment
+- **Economic participation**: Earn rewards for accurate evaluations
+- **Network health**: Support network integrity through reputation management
+
+## Best Practices
+
+### Query Optimization
+
+**Efficient Querying**:
+- **Pagination**: Use pagination for large datasets to improve performance
+- **Specific queries**: Query specific information rather than broad datasets
+- **Caching**: Cache frequently accessed data to reduce API calls
+- **Batch operations**: Group related queries to minimize network overhead
+
+### Transaction Management
+
+**Transaction Best Practices**:
+- **Gas estimation**: Estimate gas requirements before submitting transactions
+- **Fee optimization**: Balance transaction speed with cost considerations
+- **Error handling**: Implement retry logic for failed transactions
+- **Confirmation tracking**: Monitor transaction status and confirmations
+
+### Security Considerations
+
+**Safe Operations**:
+- **Key management**: Secure storage and handling of private keys
+- **Amount verification**: Double-check amounts before submitting transactions
+- **Address validation**: Verify recipient addresses to prevent loss
+- **Network verification**: Confirm you're on the correct network
+
+## Common Workflows
+
+### Topic Participation Workflow
-### Register Network Actor
-- **RPC Method:** `Register`
-- **Command:** `register [sender] [lib_p2p_key] [multi_address] [topic_ids] [initial_stake] [owner] [is_reputer]`
-- **Description:** Register a new reputer or worker for a topic.
-- **Positional Arguments:**
- - `sender`
- - `lib_p2p_key`
- - `multi_address`
- - `topic_ids`
- - `owner`
- - `is_reputer`
-
-### Remove a Reputer or Worker from a Topic
-- **RPC Method:** `RemoveRegistration`
-- **Command:** `remove-registration [creator] [owner] [is_reputer]`
-- **Description:** Remove a reputer or worker from a topic.
-- **Positional Arguments:**
- - `creator` Address of the creator
- - `owner` Address of the owner of the reputer/worker
- - `is_reputer` Set to `true` if the network participant to remove is a reputer
-
-### Insert Bulk Reputer Payload
-- **RPC Method:** `InsertBulkReputerPayload`
-- **Command:** `insert-bulk-reputer-payload [reputer_value_bundles]`
-- **Description:** Insert bulk reputer payload.
-- **Positional Arguments:**
- - `reputer_value_bundles` Reputer payload to insert
+**Getting Started**:
+1. **Query active topics**: Find topics with good participation opportunities
+2. **Check requirements**: Verify minimum stake and participation requirements
+3. **Register**: Register as worker or reputer based on capabilities
+4. **Stake**: Add necessary stake to begin participation
+5. **Monitor**: Track performance and adjust strategy as needed
+
+### Delegation Workflow
-### Insert Bulk Worker Payload
-- **RPC Method:** `InsertBulkWorkerPayload`
-- **Command:** `insert-bulk-worker-payload [worker_value_bundles]`
-- **Description:** Insert bulk worker payload.
-- **Positional Arguments:**
- - `worker_value_bundles` Worker payload to insert
+**Delegation Strategy**:
+1. **Research reputers**: Analyze reputer performance and track records
+2. **Evaluate topics**: Choose topics with good economic potential
+3. **Delegate stake**: Distribute stake across selected reputers and topics
+4. **Monitor performance**: Track delegation returns and reputer performance
+5. **Rebalance**: Adjust delegations based on performance and opportunities
-### Add Stake
-- **RPC Method:** `AddStake`
-- **Command:** `add-stake [sender] [topic_id] [amount]`
-- **Description:** Add stake [amount] to one's self sender [reputer or worker] for a topic.
-- **Positional Arguments:**
- - `sender` The staker. This is the address of the transaction sender.
- - `topic_id` Identifier of the topic to add stake to
- - `amount` The stake
+### Stake Management Workflow
-### Remove Stake from a Topic
-- **RPC Method:** `RemoveStake`
-- **Command:** `remove-stake [sender] [topic_id] [amount]`
-- **Description:** Modify sender's [reputer] stake position by removing [amount] stake from a topic [topic_id].
-- **Positional Arguments:**
- - `sender` The staker. This is the address of the transaction sender.
- - `topic_id` Identifier of the topic to remove stake from
- - `amount` The amount staked
+**Lifecycle Management**:
+1. **Initial staking**: Add stake to begin network participation
+2. **Performance monitoring**: Track rewards and network participation
+3. **Strategic adjustments**: Add or remove stake based on performance
+4. **Reward claiming**: Regularly claim rewards or compound them
+5. **Exit planning**: Plan stake removal timing for optimal returns
-### Delegate Stake to a Reputer for a Topic
-- **RPC Method:** `DelegateStake`
-- **Command:** `delegate-stake [sender] [topic_id] [reputer] [amount]`
-- **Description:** Delegate stake [amount] to a reputer for a topic.
-- **Positional Arguments:**
- - `sender` This is the address of the transaction sender
- - `topic_id` Identifier of the topic to remove stake from
- - `reputer` Address of the reputer
- - `amount` The amount to add to stake
-
-### Remove Delegate Stake from a Topic
-- **RPC Method:** `RemoveDelegateStake`
-- **Command:** `remove-delegate-stake [sender] [topic_id] [reputer] [amount]`
-- **Description:** Modify sender's [reputer] delegate stake position by removing [amount] stake from a topic [topic_id] from a reputer [reputer].
-- **Positional Arguments:**
- - `sender` This is the address of the transaction sender
- - `topic_id` Identifier of the topic to remove stake from
- - `reputer` Address of the reputer
- - `amount` The amount to remove from stake
-
-### Cancel Removing Delegate Stake
-- **RPC Method:** `CancelRemoveDelegateStake`
-- **Command:** `cancel-remove-delegate-stake [sender] [topic_id] [reputer]`
-- **Description:** Cancel the removal of delegated stake for a delegator staking on a reputer in a topic
-- **Positional Arguments:**
- - `sender` This is the address of the transaction sender
- - `topic_id` Identifier of the topic
- - `reputer` Address of the reputer
-
-### Cancel Removing Stake
-- **RPC Method:** `CancelRemoveStake`
-- **Command:** `cancel-remove-stake [sender] [topic_id]`
-- **Description:** Cancel the removal of stake for a reputer in a topic
-- **Positional Arguments:**
- - `sender` This is the address of the transaction sender
- - `topic_id` Identifier of the topic
+## Prerequisites
-### Send Funds to a Topic to Pay for Inferences
-- **RPC Method:** `FundTopic`
-- **Command:** `fund-topic [sender] [topic_id] [amount] [extra_data]`
-- **Description:** Send funds to a topic to pay for inferences.
-- **Positional Arguments:**
- - `sender` This is the address of the transaction sender
- - `topic_id` Identifier of the topic
- - `amount` The amount to send
- - `extra_data`
-
-### Get Reward for Delegator for a Topic
-- **RPC Method:** `RewardDelegateStake`
-- **Command:** `reward-delegate-stake [sender] [topic_id] [reputer]`
-- **Description:** Get Reward for Delegator [sender] for a topic.
-- **Positional Arguments:**
- - `sender` This is the address of the transaction sender
- - `topic_id` Identifier of the topic
- - `reputer` Address of the reputer
-
-### Update Network Parameters
-- **RPC Method:** `UpdateParams`
-- **Command:** `update-params [sender] [params]`
-- **Description:** Update parameters of the network.
-- **Positional Arguments:**
- - `sender` This is the address of the transaction sender
- - `params` Params to be updated
+- **Network access**: Connectivity to Allora Network RPC endpoints
+- **Account setup**: Properly configured account with sufficient tokens
+- **CLI installation**: Installed and configured `allorad` binary
+- **Basic understanding**: Knowledge of blockchain operations and command-line interfaces
+
+## Next Steps
+
+- [Learn about wallet setup](/devs/get-started/quick-start#create-your-wallet) for account configuration
+- [Study network parameters](/devs/reference/params/chain) for understanding network configuration
+- [Explore topic creation](/devs/topic-creators/how-to-create-topic) for launching prediction markets
+- [Review staking guides](/devs/reputers/set-and-adjust-stake) for participation strategies
diff --git a/pages/devs/reference/module-accounts.mdx b/pages/devs/reference/module-accounts.mdx
index ff4aa86..d2642f4 100644
--- a/pages/devs/reference/module-accounts.mdx
+++ b/pages/devs/reference/module-accounts.mdx
@@ -1,46 +1,166 @@
# Allora Module Accounts
-The Allora Chain uses [Cosmos SDK module accounts](https://docs.cosmos.network/main/build/modules/bank#module-accounts) to hold tokens belonging to various different actors on the network. This page describes the various places that module accounts hold funds are held, and the flow of money through the network.
+## What You'll Learn
+- Understanding how Allora Chain manages tokens through Cosmos SDK module accounts
+- Complete overview of actors who earn rewards and sources of token rewards
+- Detailed explanation of module accounts and their roles in fund management
+- How block execution order affects payment flows and reward distribution
+
+## Overview
+
+**The Allora Chain uses [Cosmos SDK module accounts](https://docs.cosmos.network/main/build/modules/bank#module-accounts) to hold tokens belonging to various different actors on the network.** This page describes the various places that module accounts hold funds are held, and the flow of money through the network.
+
+### Why Module Accounts Matter
+
+**System Architecture Benefits**:
+- **Transparent fund management**: Clear separation of different token pools and their purposes
+- **Automated operations**: Smart contract-like behavior without custom smart contract complexity
+- **Audit capability**: Easy tracking of token flows and balances across network functions
+- **Security isolation**: Separate accounts prevent unauthorized access to different fund pools
+
+**Economic Transparency**:
+- **Clear attribution**: Every token has a clear owner and purpose within the system
+- **Predictable flows**: Defined rules for how tokens move between different network participants
+- **Fair distribution**: Algorithmic distribution of rewards based on network contribution
+- **Economic sustainability**: Balanced incentive structure for long-term network health
+
+## Network Economics Overview
### Actors that Earn Token Rewards
-There are three actors in the Allora network that earn token rewards:
+**There are three actors in the Allora network that earn token rewards:**
-- **Cosmos Validators**: For the service of running the cosmos blockchain powering Allora.
-- **Reputers**: For providing ground truth to each topic, and maintaining a reputation system scoring the quality of worker outputs.
-- **Workers**: For creating the actual AI/ML Inferences that the system provides for each topic.
+#### 1. Cosmos Validators
+- **Service provided**: For the service of running the cosmos blockchain powering Allora.
+- **Core function**: Maintain blockchain consensus, validate transactions, and secure network
+- **Reward source**: Block rewards, transaction fees, and inflation distribution
+- **Economic role**: Foundation layer of network security and operation
+
+#### 2. Reputers
+- **Service provided**: For providing ground truth to each topic, and maintaining a reputation system scoring the quality of worker outputs.
+- **Core function**: Quality assurance, data verification, and worker performance evaluation
+- **Reward source**: Share of network rewards based on accuracy and contribution quality
+- **Economic role**: Ensure data integrity and maintain network reliability standards
+
+#### 3. Workers
+- **Service provided**: For creating the actual AI/ML Inferences that the system provides for each topic.
+- **Core function**: Generate predictions, run models, and provide inference data
+- **Reward source**: Performance-based rewards for accurate and valuable predictions
+- **Economic role**: Primary value creators delivering the core network service
### Sources of Token Rewards
-There are also three sources of tokens rewards, that pay the three actors who earn them:
+**There are also three sources of tokens rewards, that pay the three actors who earn them:**
+
+#### 1. Cosmos Network Transaction Fees
+**Transaction fees on Allora are optional, at least at the time of this writing.** However Cosmos SDK does support an optional transaction fee to be paid by the creator of a transaction, paid in units of computational steps taken (like gas for those familiar with the EVM). If the creator of a transaction chooses to add a fee (say, for get a higher priority of being added to a block), that fee will be paid out as token rewards.
+
+**Fee Structure Benefits**:
+- **Optional participation**: Users can choose fee levels based on urgency and priority
+- **Computational fairness**: Fees correlate with actual network resource consumption
+- **Economic incentives**: Higher fees can provide faster transaction processing
+- **Network sustainability**: Transaction fees contribute to validator and network operator rewards
+
+#### 2. Inference Request Fees
+**When making an inference request, the requestor (inference data consumer) will bid a price they are willing to pay for that request.** In that bid, they must send that amount of tokens to the network. If and when an inference is fulfilled, the upshot network will pay out the fee collected for that request as rewards.
+
+**Request Fee Mechanism**:
+- **Market-driven pricing**: Consumers bid based on value and urgency of inference requests
+- **Escrow protection**: Funds are held securely until inference delivery is confirmed
+- **Performance incentives**: Higher quality inferences can command higher prices
+- **Direct value exchange**: Clear connection between consumer payment and worker/reputer rewards
-- **Cosmos network transaction fees**: Transaction fees on Allora are optional, at least at the time of this writing. However Cosmos SDK does support an optional transaction fee to be paid by the creator of a transaction, paid in units of computational steps taken (like gas for those familiar with the EVM). If the creator of a transaction chooses to add a fee (say, for get a higher priority of being added to a block), that fee will be paid out as token rewards.
-- **Inference request fees**: When making an inference request, the requestor (inference data consumer) will bid a price they are willing to pay for that request. In that bid, they must send that amount of tokens to the network. If and when an inference is fulfilled, the upshot network will pay out the fee collected for that request as rewards.
-- **Token inflationary rewards**: Allora has an inflationary token emissions schedule that halves on regular intervals, similar to Bitcoin. Newly minted tokens are paid out each block as rewards.
+#### 3. Token Inflationary Rewards
+**Allora has an inflationary token emissions schedule that halves on regular intervals, similar to Bitcoin.** Newly minted tokens are paid out each block as rewards.
+
+**Inflation Schedule Benefits**:
+- **Predictable supply**: Known emission schedule provides economic certainty
+- **Network bootstrap**: Initial higher inflation attracts early participants and secures network
+- **Long-term sustainability**: Halving schedule ensures controlled long-term token supply
+- **Fair distribution**: Block-by-block distribution ensures proportional reward allocation
+
+## Module Account Architecture
### Module Accounts Used by Allora
-The following represents the list of module accounts that are changed or important in the flow of funds across the Allora Appchain. We do not discuss the standard module accounts used in cosmos-sdk validator staking, as they are unmodified from the Cosmos SDK. Note that the actual string used for the module name is the name in (`monospace`) below:
+**The following represents the list of module accounts that are changed or important in the flow of funds across the Allora Appchain.** We do not discuss the standard module accounts used in cosmos-sdk validator staking, as they are unmodified from the Cosmos SDK. Note that the actual string used for the module name is the name in (`monospace`) below:
+
+#### Core Financial Modules
+
+##### Mint (`mint`)
+**The Allora mint module account is the only account allowed to create new tokens.** It creates new tokens during its `BeginBlock` according to the [Allora inflation schedule](./params/chain#halving_interval) and then immediately sends those token to the Fee Collector account.
+
+**Mint Module Functions**:
+- **Token creation**: Sole authority for creating new tokens on the network
+- **Inflation control**: Implements predetermined inflation schedule and halving events
+- **Supply management**: Maintains accurate total supply accounting
+- **Distribution initiation**: Transfers newly minted tokens to fee collector for distribution
+
+##### Fee Collector (`fee_collector`)
+**This module account collects all transaction fees on the network (this happens in the `auth` module during transaction execution).**
+
+**Fee Collection Process**:
+- **Transaction fee aggregation**: Collects fees from all network transactions
+- **Inference payment processing**: Receives payments from inference requests
+- **Distribution preparation**: Accumulates funds for periodic reward distribution
+- **System integration**: Works with auth module for seamless fee processing
+
+##### Distribution (`distribution`)
+**The distribution module holds the tokens and does the balance accounting for cosmos validator staking.** It takes funds from the fee collector account. Cosmos validators can withdraw their staked tokens and receive validator rewards from this module's RPC functions. The Allora codebase does not change this standard cosmos module, but we do frontrun it (described below).
+
+**Distribution Module Features**:
+- **Validator reward management**: Handles all validator staking rewards and withdrawals
+- **Standard Cosmos functionality**: Unmodified implementation from Cosmos SDK
+- **Balance accounting**: Maintains accurate records of validator stakes and rewards
+- **Withdrawal processing**: Enables validators to claim rewards and unstake tokens
+
+#### Allora-Specific Modules
+
+##### Allora Rewards (`allorarewards`)
+**The Allora Rewards module account holds the tokens earned by reputers and workers for their services to the network.** Reputers and workers share the collected transaction fees and inflationary rewards on the network with cosmos validators at a [percentage rate](./params/chain#percent_rewards_reputers_workers) set in the chain parameters. When rewards are paid out each block, the Allora Rewards module account pays the Allora Staking module, which then also increments the reputer or worker's stake appropriately.
-- **Mint** (`mint`): The Allora mint module account is the only account allowed to create new tokens. It creates new tokens during its `BeginBlock` according to the [Allora inflation schedule](./params/chain#halving_interval) and then immediately sends those token to the Fee Collector account.
-- **Fee Collector** (`fee_collector`): This module account collects all transaction fees on the network (this happens in the `auth` module during transaction execution).
-- **Distribution** (`distribution`): The distribution module holds the tokens and does the balance accounting for cosmos validator staking. It takes funds from the fee collector account. Cosmos validators can withdraw their staked tokens and receive validator rewards from this module's RPC functions. The Allora codebase does not change this standard cosmos module, but we do frontrun it (described below).
-- **Allora Rewards**(`allorarewards`): The Allora Rewards module account holds the tokens earned by reputers and workers for their services to the network. Reputers and workers share the collected transaction fees and inflationary rewards on the network with cosmos validators at a [percentage rate](./params/chain#percent_rewards_reputers_workers) set in the chain parameters. When rewards are paid out each block, the Allora Rewards module account pays the Allora Staking module, which then also increments the reputer or worker's stake appropriately.
-- **Allora Staking** (`allorastaking`): Separate from the standard cosmos validator staking modules and workflow, Allora supports staking for our Reputer and Workers actor roles. The Allora Staking module account is our analog to the distribution module. It holds the tokens that stakers send to network when they deposit stake, and it also holds the rewards that stakers receive from transaction fees, newly minted token inflation, and inference request fees. When reputers or workers go to withdraw their stake, the rewards are automatically combined with their stake and automatically claimed.
-- **Allora Requests** (`allorarequests`): The Allora Requests module account holds the tokens that are paid by Inference Consumers when they make an inference request. This module holds escrowed funds for subscriptions and only pays out when actual inferences are made upon that subscription. That means this module can hold funds for long periods of time before the transaction fees for a given subscription are actually paid out. It pays the Fee Collector account.
+**Allora Rewards Functions**:
+- **Worker/Reputer reward pool**: Dedicated fund management for AI/ML participants
+- **Proportional distribution**: Shares network rewards based on configured percentages
+- **Performance-based allocation**: Distributes rewards based on contribution quality and accuracy
+- **Stake integration**: Works with Allora Staking module for seamless reward compounding
+
+##### Allora Staking (`allorastaking`)
+**Separate from the standard cosmos validator staking modules and workflow, Allora supports staking for our Reputer and Workers actor roles.** The Allora Staking module account is our analog to the distribution module. It holds the tokens that stakers send to network when they deposit stake, and it also holds the rewards that stakers receive from transaction fees, newly minted token inflation, and inference request fees. When reputers or workers go to withdraw their stake, the rewards are automatically combined with their stake and automatically claimed.
+
+**Allora Staking Features**:
+- **Specialized staking system**: Custom staking implementation for workers and reputers
+- **Reward compounding**: Automatic reinvestment of rewards into stake positions
+- **Integrated withdrawals**: Combined stake and reward withdrawal in single transaction
+- **Role-specific logic**: Tailored staking rules for different network participant types
+
+##### Allora Requests (`allorarequests`)
+**The Allora Requests module account holds the tokens that are paid by Inference Consumers when they make an inference request.** This module holds escrowed funds for subscriptions and only pays out when actual inferences are made upon that subscription. That means this module can hold funds for long periods of time before the transaction fees for a given subscription are actually paid out. It pays the Fee Collector account.
+
+**Request Module Functions**:
+- **Escrow management**: Secure holding of consumer payments until inference delivery
+- **Subscription support**: Manages ongoing subscription-based inference requests
+- **Payment processing**: Releases payments only upon successful inference completion
+- **Consumer protection**: Ensures consumers only pay for delivered services
+
+## Block Execution and Payment Flow
### Module Execution Order in a Block and the Impact on Payment Flows
-In Cosmos SDK, before the transactions of a block are processed, modules are able to specify `BeginBlock` and `EndBlock` functions that run at the beginning and end of a block, respectively. Below you can see a snippet from Allora Chain's app.yaml file, which specifies the order that these functions should be run:
+**In Cosmos SDK, before the transactions of a block are processed, modules are able to specify `BeginBlock` and `EndBlock` functions that run at the beginning and end of a block, respectively.** Below you can see a snippet from Allora Chain's app.yaml file, which specifies the order that these functions should be run:
```yaml app.yaml
begin_blockers: [emissions, distribution, staking, mint]
end_blockers: [staking, emissions]
```
-The Cosmos SDK distribution module works by implementing a `BeginBlock` function that takes the money deposited in the Fee Collector account from the _previous block_. After that, the Mint module mints new tokens to the Fee Collector account. In the middle, transactions run, and pay their transaction fees, as well as inference request fees to the Fee Collector account.
+#### Execution Order Strategy
+
+**The Cosmos SDK distribution module works by implementing a `BeginBlock` function that takes the money deposited in the Fee Collector account from the _previous block_.** After that, the Mint module mints new tokens to the Fee Collector account. In the middle, transactions run, and pay their transaction fees, as well as inference request fees to the Fee Collector account.
-The app.yaml places the Allora emissions module in front of the distribution module. This is where the `percent_rewards_reputers_workers` [chain parameter](./params/chain#percent_rewards_reputers_workers) takes some percentage of the Fee Collector's token balance, and sends it to the Allora Rewards module account. So basically, the Allora emissions module frontruns the distribution module to steal funds that it otherwise would have gotten, in order to take the percentage cut of rewards that belong to reputers and workers.
+**The app.yaml places the Allora emissions module in front of the distribution module. This is where the `percent_rewards_reputers_workers` [chain parameter](./params/chain#percent_rewards_reputers_workers) takes some percentage of the Fee Collector's token balance, and sends it to the Allora Rewards module account.** So basically, the Allora emissions module frontruns the distribution module to steal funds that it otherwise would have gotten, in order to take the percentage cut of rewards that belong to reputers and workers.
+
+### Chronological Payment Flow
```Text Chronological Order of Payments
New block starts. Call BeginBlock:
@@ -55,10 +175,72 @@ Block about to end. Call EndBlock:
New block starts. Call BeginBlock...
```
+#### Flow Analysis
+
+**Strategic Execution Order**:
+- **Priority allocation**: Allora-specific rewards are allocated before standard Cosmos rewards
+- **Fair distribution**: Ensures workers and reputers receive their designated share
+- **System integration**: Seamless integration with standard Cosmos SDK modules
+- **Continuous operation**: Cyclical process ensures consistent reward distribution
+
+**Economic Implications**:
+- **Guaranteed allocation**: Workers and reputers receive rewards before validator distribution
+- **Predictable percentages**: Chain parameters define exact allocation percentages
+- **Network sustainability**: Balanced reward distribution maintains all participant types
+- **Growth incentives**: Proper incentives for network growth and quality improvement
+
+## Reward Distribution Mechanics
+
### Rewards Epochs
-Cosmos Validators can use the distribution module and staking module standard cosmos functions to manipulate their validator stake and claim their validator rewards.
+**Cosmos Validators can use the distribution module and staking module standard cosmos functions to manipulate their validator stake and claim their validator rewards.**
+
+**For Reputer and Worker rewards, the `epoch_length` [chain parameter](./params/chain#epoch_length) controls how often the reputer and staker rewards are paid out.** Every `epoch_length` epochs, the rewards calculation will run in the emissions `EndBlock`, which will cause the Allora Rewards module account to pay the Allora Staking module account directly. The Allora Staking module will then increase the staking balances of all actors who are paid rewards as part of this procedure. In this way Allora is able to autocompound stake positions.
+
+**Finally when a Reputer or Worker wishes to withdraw their stake, they do so, and the rewards are automatically combined with their stake and automatically claimed by the reputer or worker in one lump sum.**
+
+### Epoch-Based Distribution Benefits
+
+**Automated Compounding**:
+- **Reward reinvestment**: Automatic compounding of rewards increases stake positions
+- **Gas efficiency**: Batch processing reduces individual transaction costs
+- **Simplified management**: Users don't need to manually claim and restake rewards
+- **Enhanced returns**: Compounding effect increases long-term earnings potential
+
+**Withdrawal Efficiency**:
+- **Unified process**: Single transaction combines stake withdrawal and reward claiming
+- **Reduced complexity**: Simplified user experience for fund management
+- **Cost optimization**: Fewer transactions reduce overall network fees
+- **Immediate liquidity**: Direct access to both stake and accumulated rewards
+
+## System Integration
+
+### Cross-Module Coordination
+
+**Module Interaction Benefits**:
+- **Seamless operation**: All modules work together without user intervention
+- **Consistent state**: Synchronized updates across all relevant modules
+- **Economic coherence**: Balanced incentives across all network participants
+- **Operational efficiency**: Automated processes reduce manual management requirements
+
+### Network Health Indicators
+
+**Financial Health Metrics**:
+- **Module balance tracking**: Monitor fund levels across all module accounts
+- **Reward distribution ratios**: Verify correct percentage allocations
+- **Stake growth patterns**: Track network participation and engagement
+- **Economic sustainability**: Ensure long-term viability of incentive structures
+
+## Prerequisites
+
+- **Blockchain fundamentals**: Understanding of blockchain architecture and token economics
+- **Cosmos SDK knowledge**: Familiarity with Cosmos SDK module system and architecture
+- **Economic principles**: Basic understanding of incentive mechanisms and token distribution
+- **Technical literacy**: Ability to interpret technical documentation and system specifications
-For Reputer and Worker rewards, the `epoch_length` [chain parameter](./params/chain#epoch_length) controls how often the reputer and staker rewards are paid out. Every `epoch_length` epochs, the rewards calculation will run in the emissions `EndBlock`, which will cause the Allora Rewards module account to pay the Allora Staking module account directly. The Allora Staking module will then increase the staking balances of all actors who are paid rewards as part of this procedure. In this way Allora is able to autocompound stake positions.
+## Next Steps
-Finally when a Reputer or Worker wishes to withdraw their stake, they do so, and the rewards are returned together with the original balance staked by the reputer or worker in one lump sum.
+- [Explore chain parameters](/devs/reference/params/chain) for detailed configuration settings
+- [Study emission mechanisms](/devs/reference/params/mint) for inflation and reward calculations
+- [Learn about staking parameters](/devs/reference/params/stake) for network participation rules
+- [Review consensus parameters](/devs/reference/params/consensus) for network operation details
diff --git a/pages/devs/reference/params/chain.mdx b/pages/devs/reference/params/chain.mdx
index 5ac4ea9..66d929c 100644
--- a/pages/devs/reference/params/chain.mdx
+++ b/pages/devs/reference/params/chain.mdx
@@ -1,163 +1,357 @@
# Chain Parameters
-> A glossary and description of chain-level parameters
+## What You'll Learn
+- Complete glossary of all chain-level parameters that control network behavior
+- Understanding of mint module parameters that govern token inflation and supply
+- Overview of Allora-specific parameters for network operations and economics
+- How parameter values affect network performance, security, and participant incentives
-## Mint Module and Token Inflation Parameters
+## Overview
-#### inflation_rate_change
+> **A glossary and description of chain-level parameters**
-The maximum permitted annual change in inflation rate. The mint module will throw an error if the inflation rate exceeds this value.
+**Chain parameters are configurable values that control various aspects of the Allora Network's behavior and economics.** These parameters can be adjusted through governance proposals to optimize network performance, security, and user experience.
-Default Value: 357.3582624
+### Why Chain Parameters Matter
-#### inflation_max
+**Network Configuration**:
+- **Economic tuning**: Fine-tune incentive structures and reward distributions
+- **Performance optimization**: Adjust timing and resource allocation for optimal efficiency
+- **Security enhancement**: Configure parameters to maintain network security and integrity
+- **Governance control**: Enable community-driven adjustments to network behavior
-The maximum inflation rate. The mint module will throw an error if the inflation rate exceeds this value.
+**Dynamic Management**:
+- **Adaptability**: Modify network behavior without code changes
+- **Community governance**: Allow token holders to influence network direction
+- **Economic balance**: Maintain sustainable tokenomics and participant incentives
+- **Risk management**: Adjust parameters to address emerging challenges and opportunities
-Default Value: 357.3582624
+## Token Economics and Inflation
-#### inflation_min
+### Mint Module and Token Inflation Parameters
-The minimum permitted inflation rate. The mint module will throw an error if the inflation rate goes below this value
+#### Core Inflation Controls
-Default Value: 0
+##### inflation_rate_change
+**The maximum permitted annual change in inflation rate.** The mint module will throw an error if the inflation rate exceeds this value.
-#### goal_bonded
+**Default Value:** 357.3582624
-The goal used to target the percentage of bonded staking cosmos validators.
+**Parameter Purpose**:
+- **Rate stability**: Prevent dramatic inflation fluctuations that could destabilize economics
+- **Predictable supply**: Provide predictability for long-term economic planning
+- **Governance safety**: Protect against potentially harmful governance proposals
+- **Economic modeling**: Enable accurate forecasting of token supply changes
-Default Value: 0.67
+##### inflation_max
+**The maximum inflation rate.** The mint module will throw an error if the inflation rate exceeds this value.
-#### blocks_per_year
+**Default Value:** 357.3582624
-The amount of blocks that the inflation schedule believes will happen each year.
+**Economic Impact**:
+- **Supply ceiling**: Set upper bound on token inflation to protect value
+- **Investment confidence**: Provide certainty about maximum dilution rates
+- **Network bootstrapping**: Allow higher initial inflation for network growth
+- **Long-term sustainability**: Balance growth incentives with token value preservation
-Default Value: 6311520
+##### inflation_min
+**The minimum permitted inflation rate.** The mint module will throw an error if the inflation rate goes below this value.
-#### max_supply
+**Default Value:** 0
-The capped amount of tokens that will ever be allowed to exist.
+**Minimum Rate Benefits**:
+- **Reward continuity**: Ensure ongoing rewards for network participants
+- **Economic floor**: Prevent deflationary spirals that could harm participation
+- **Incentive maintenance**: Keep minimum incentives for validators and participants
+- **Network security**: Maintain baseline rewards for network security providers
-Default Value: 1 Billion Allo \* 1e18 (for base unit uAllo): 1e28 uAllo
+#### Staking and Economics
-#### halving_interval
+##### goal_bonded
+**The goal used to target the percentage of bonded staking cosmos validators.**
-The number of blocks at which to halve the inflation rate of newly minted tokens, like Bitcoin's emissions schedule:
+**Default Value:** 0.67
-Default Value: 25246080
+**Target Bonding Strategy**:
+- **Security optimization**: Encourage adequate stake bonding for network security
+- **Liquidity balance**: Maintain balance between staked and liquid tokens
+- **Inflation adjustment**: Influence inflation rates based on staking participation
+- **Economic equilibrium**: Target optimal staking ratio for network health
-#### current_block_provision
+##### blocks_per_year
+**The amount of blocks that the inflation schedule believes will happen each year.**
-Number of tokens that will be minted every block during a halving interval. This chain parameter controls the first value set for the first block. Afterwards, each halvening this value will be divided by two.
+**Default Value:** 6311520
-Default Value: 2831000000000000000000
+**Block Timing Importance**:
+- **Inflation calculation**: Basis for calculating annual inflation and reward distribution
+- **Economic planning**: Enable accurate economic modeling and projections
+- **Reward scheduling**: Determine timing and frequency of reward distributions
+- **Network performance**: Align inflation calculations with actual network performance
-## Allora Specific Parameters
+#### Supply Management
-#### reward_cadence
+##### max_supply
+**The capped amount of tokens that will ever be allowed to exist.**
-The duration of a reward epoch in blocks. Every `reward_cadence` seconds, rewards are recomputed within `EndBlock`.
+**Default Value:** 1 Billion Allo * 1e18 (for base unit uAllo): 1e28 uAllo
-Default Value: 600 blocks
+**Supply Cap Benefits**:
+- **Scarcity assurance**: Guarantee finite token supply for long-term value
+- **Economic predictability**: Provide certainty about maximum token dilution
+- **Investment appeal**: Create scarcity dynamics that may support token value
+- **Inflation control**: Eventually eliminate inflation as supply cap is approached
-Shorter epochs can lead to more frequent reward updates and responsiveness. This is advantageous for rapidly reacting to changes in the network (eg new topics, models, incentives, etc) and make the rewards available earlier. However small values they also have an impact on network efficiency.
+##### halving_interval
+**The number of blocks at which to halve the inflation rate of newly minted tokens, like Bitcoin's emissions schedule:**
-#### min_topic_unmet_demand
+**Default Value:** 25246080
-The minimum unmet demand on a topic to consider it active, and thus enter rounds of inference solicitation and weight adjustment.
+**Halving Schedule Impact**:
+- **Predictable reduction**: Systematic decrease in inflation over time
+- **Bitcoin model**: Proven emissions schedule that balances growth and scarcity
+- **Long-term planning**: Enable participants to plan for known inflation changes
+- **Economic sustainability**: Gradually reduce inflation for sustainable tokenomics
-Default Value: 100 allo
+##### current_block_provision
+**Number of tokens that will be minted every block during a halving interval.** This chain parameter controls the first value set for the first block. Afterwards, each halvening this value will be divided by two.
-The value provides a minimum amount of demand in order to trigger inference and weight adjustment rounds, to protect the network against activity of little to no added value. It is also kept small enough to not represent a barrier of entry for participation.
+**Default Value:** 2831000000000000000000
-#### max_topics_per_block
+**Block Reward Structure**:
+- **Consistent rewards**: Predictable rewards per block for participants
+- **Halving mechanism**: Automatic reduction aligned with halving schedule
+- **Bootstrap incentives**: Higher initial rewards to bootstrap network participation
+- **Economic transition**: Smooth transition between halving periods
-Maximum number of active topics to run inference and weight adjustment rounds for on each block.
+## Network Operations
-Default Value: 2048 topics
+### Allora Specific Parameters
-This value is high enough to allow a reasonable number of active topics to coexist, while also protecting the network against too much activity per block, preventing congestion and ensuring a more predictable block processing time.
+#### Core Network Timing
-#### min_request_unmet_demand
+##### reward_cadence
+**The duration of a reward epoch in blocks.** Every `reward_cadence` seconds, rewards are recomputed within `EndBlock`.
-Threshold under which the inference requests will be deleted or prevented from being created.
+**Default Value:** 600 blocks
-Default Value: 1 allo
+**Shorter epochs can lead to more frequent reward updates and responsiveness.** This is advantageous for rapidly reacting to changes in the network (eg new topics, models, incentives, etc) and make the rewards available earlier. However small values they also have an impact on network efficiency.
-The purpose is to allow to prevent unnecessary processing of requests with minimal impact, keeping the state of the chain tidy, while at the same time be conservative with partially exhausted inference requests.
+**Cadence Strategy**:
+- **Responsive rewards**: Quick adaptation to network changes and performance
+- **Computational efficiency**: Balance responsiveness with network resource usage
+- **Participant engagement**: Frequent updates maintain participant interest and motivation
+- **Network optimization**: Tune for optimal balance of responsiveness and efficiency
-#### max_missing_inference_percent
+#### Activity Thresholds
-The percentage of inferences rounds missed by a worker, over which the worker gets penalized.
+##### min_topic_unmet_demand
+**The minimum unmet demand on a topic to consider it active, and thus enter rounds of inference solicitation and weight adjustment.**
-Default Value: 20 %
+**Default Value:** 100 allo
-Penalizing workers for missing inferences encourages reliability and accountability in the AI inference process. However, setting this value too low may lead to frequent penalties, potentially discouraging worker participation. A value that strikes a balance between both has been set.
+**The value provides a minimum amount of demand in order to trigger inference and weight adjustment rounds, to protect the network against activity of little to no added value.** It is also kept small enough to not represent a barrier of entry for participation.
-#### required_minimum_stake
+**Demand Threshold Benefits**:
+- **Quality control**: Ensure topics have meaningful demand before resource allocation
+- **Resource efficiency**: Prevent waste on low-value activities
+- **Accessibility**: Keep threshold low enough for legitimate participation
+- **Network focus**: Direct resources toward high-value inference opportunities
-Sets the minimum stake to be considered as a reputer in good standing. If a reputer has less than this stake, than their contribution to reputation scoring will be ignored, and they will not receive any rewards from the system.
+##### max_topics_per_block
+**Maximum number of active topics to run inference and weight adjustment rounds for on each block.**
-Default Value: 100 allo
+**Default Value:** 2048 topics
-Setting a minimum stake helps ensure that participants have a vested interest in the network's success and are not simply sybils, enhancing security and commitment, while at the same time not being too high so that it may limit the accessibility of the network and discourage potential legitimate participants.
+**This value is high enough to allow a reasonable number of active topics to coexist, while also protecting the network against too much activity per block, preventing congestion and ensuring a more predictable block processing time.**
-#### remove_stake_delay_window
+**Throughput Management**:
+- **Scalability**: Support large numbers of active topics simultaneously
+- **Performance protection**: Prevent network congestion from excessive activity
+- **Predictable timing**: Ensure consistent block processing times
+- **Resource allocation**: Balance topic participation with network efficiency
-Sets the duration, in seconds, during which a staker's tokens remain staked after initiating the unstaking process. This protects against flash-type attacks.
+##### min_request_unmet_demand
+**Threshold under which the inference requests will be deleted or prevented from being created.**
-Default Value: 172800 (1 day)
+**Default Value:** 1 allo
-A fair delay in unstaking, which can ensure stability in the network by preventing sudden fluctuations in staked tokens and discourage malicious actors, while keeping it low enough so it is not very inconvenient to users who want to unstake their tokens promptly.
+**The purpose is to allow to prevent unnecessary processing of requests with minimal impact, keeping the state of the chain tidy, while at the same time be conservative with partially exhausted inference requests.**
-#### min_request_cadence
+**Request Management**:
+- **State efficiency**: Keep blockchain state clean and manageable
+- **Resource conservation**: Prevent processing of minimal-value requests
+- **Conservative approach**: Protect legitimate requests from premature deletion
+- **Network tidiness**: Maintain organized and efficient chain state
-Sets the minimum allowed time interval, in seconds, between consecutive AI calls from an inference request.
+#### Performance and Quality Control
-Default Value: 10 seconds
+##### max_missing_inference_percent
+**The percentage of inferences rounds missed by a worker, over which the worker gets penalized.**
-Imposing a minimum cadence ensures a reasonable pacing of inference requests, preventing potential abuse or unnecessary strain on the network. Adjusted based on the expected frequency of AI inference requests and the network's capacity, balanced between responsiveness and resource efficiency.
+**Default Value:** 20%
-#### min_weight_cadence
+**Penalizing workers for missing inferences encourages reliability and accountability in the AI inference process.** However, setting this value too low may lead to frequent penalties, potentially discouraging worker participation. A value that strikes a balance between both has been set.
-Sets the minimum allowed time interval, in seconds, between consecutive calls for topic weight adjustment.
+**Reliability Incentives**:
+- **Accountability**: Encourage consistent worker performance and participation
+- **Quality assurance**: Maintain high standards for inference delivery
+- **Balanced penalties**: Avoid excessive penalties that discourage participation
+- **Network reliability**: Ensure dependable inference availability for consumers
-Default Value: 3600 seconds (1 hour)
+##### required_minimum_stake
+**Sets the minimum stake to be considered as a reputer in good standing.** If a reputer has less than this stake, than their contribution to reputation scoring will be ignored, and they will not receive any rewards from the system.
-Imposing a minimum cadence ensures a reasonable pacing of loss-calculation, preventing potential abuse or unnecessary strain on the network. That being said, it need not occur too frequently, because weights accrue over many inferences anyway, and these calls are relatively expensive involving off-chain communication.
+**Default Value:** 100 allo
-#### max_inference_request_validity
+**Setting a minimum stake helps ensure that participants have a vested interest in the network's success and are not simply sybils, enhancing security and commitment, while at the same time not being too high so that it may limit the accessibility of the network and discourage potential legitimate participants.**
-Sets the maximum allowable time, in seconds, for an AI inference request to remain valid before expiration.
+**Stake Requirements Strategy**:
+- **Sybil resistance**: Prevent fake accounts and malicious actors through economic commitment
+- **Quality incentives**: Ensure reputers have skin in the game for honest evaluation
+- **Accessibility balance**: Keep requirements reasonable for legitimate participants
+- **Network security**: Enhance overall network security through economic incentives
-Default Value: 29030400 seconds (1year)
+#### Timing and Cadence Controls
-Setting a maximum validity time ensures that AI inference requests are processed within a reasonable timeframe, preventing outdated requests, while at the same time allowing inference requests to be planned and executed at the designed cadence within a generous timeframe, especially where time-dependent effects (e.g. seasonal effects) can happen.
+##### remove_stake_delay_window
+**Sets the duration, in seconds, during which a staker's tokens remain staked after initiating the unstaking process.** This protects against flash-type attacks.
-#### max_request_cadence
+**Default Value:** 172800 (1 day)
-Sets the maximum allowable time, in seconds, for an AI inference request to remain valid before expiration.
+**A fair delay in unstaking, which can ensure stability in the network by preventing sudden fluctuations in staked tokens and discourage malicious actors, while keeping it low enough so it is not very inconvenient to users who want to unstake their tokens promptly.**
-Default Value: 29030400 seconds (1 year)
+**Unstaking Delay Benefits**:
+- **Attack prevention**: Protect against rapid stake manipulation attacks
+- **Network stability**: Prevent sudden stake withdrawals that could destabilize security
+- **User convenience**: Balance security with reasonable withdrawal timeframes
+- **Economic stability**: Maintain predictable stake levels for network planning
-A shorter validity period ensures that AI inference requests are designed to be processed more quickly and with up-to-date information. However, because lowering this value may lead to the rejection of legitimate requests if they take longer to process, the maximum allowed equals the max inference request, which is a conservative and flexible decision to allow inference requests creators for maximal planning ahead.
+##### min_request_cadence
+**Sets the minimum allowed time interval, in seconds, between consecutive AI calls from an inference request.**
+
+**Default Value:** 10 seconds
+
+**Imposing a minimum cadence ensures a reasonable pacing of inference requests, preventing potential abuse or unnecessary strain on the network.** Adjusted based on the expected frequency of AI inference requests and the network's capacity, balanced between responsiveness and resource efficiency.
+
+**Request Pacing Strategy**:
+- **Abuse prevention**: Prevent rapid-fire requests that could overwhelm the network
+- **Resource management**: Ensure efficient use of network computational resources
+- **Quality maintenance**: Allow adequate time for quality inference generation
+- **Network health**: Balance responsiveness with sustainable resource usage
+
+##### min_weight_cadence
+**Sets the minimum allowed time interval, in seconds, between consecutive calls for topic weight adjustment.**
+
+**Default Value:** 3600 seconds (1 hour)
+
+**Imposing a minimum cadence ensures a reasonable pacing of loss-calculation, preventing potential abuse or unnecessary strain on the network.** That being said, it need not occur too frequently, because weights accrue over many inferences anyway, and these calls are relatively expensive involving off-chain communication.
+
+**Weight Adjustment Timing**:
+- **Computational efficiency**: Prevent expensive calculations from occurring too frequently
+- **Network optimization**: Balance accuracy with computational cost
+- **Abuse prevention**: Prevent manipulation through rapid weight adjustments
+- **Economic efficiency**: Optimize off-chain communication costs
+
+#### Request Management
+
+##### max_inference_request_validity
+**Sets the maximum allowable time, in seconds, for an AI inference request to remain valid before expiration.**
+
+**Default Value:** 29030400 seconds (1 year)
+
+**Setting a maximum validity time ensures that AI inference requests are processed within a reasonable timeframe, preventing outdated requests, while at the same time allowing inference requests to be planned and executed at the designed cadence within a generous timeframe, especially where time-dependent effects (e.g. seasonal effects) can happen.**
+
+**Request Validity Benefits**:
+- **Data freshness**: Ensure inferences are based on reasonably current requests
+- **Planning flexibility**: Allow long-term planning for seasonal or cyclical patterns
+- **Resource management**: Prevent indefinite accumulation of old requests
+- **Quality assurance**: Balance timeliness with planning requirements
+
+##### max_request_cadence
+**Sets the maximum allowable time, in seconds, for an AI inference request to remain valid before expiration.**
+
+**Default Value:** 29030400 seconds (1 year)
+
+**A shorter validity period ensures that AI inference requests are designed to be processed more quickly and with up-to-date information.** However, because lowering this value may lead to the rejection of legitimate requests if they take longer to process, the maximum allowed equals the max inference request, which is a conservative and flexible decision to allow inference requests creators for maximal planning ahead.
+
+**Maximum Cadence Strategy**:
+- **Conservative approach**: Avoid rejecting legitimate long-term requests
+- **Flexibility**: Allow various request patterns and timing strategies
+- **Planning support**: Enable long-term inference planning and scheduling
+- **Balance**: Compromise between freshness and planning flexibility
+
+## Economic Distribution
+
+### Reward Allocation
#### percent_rewards_reputers_workers
+**Cosmos validators, Allora Reputers, and Allora Workers all deserve to be paid out rewards from token inflation as well as collected transaction fees for using the Allora network.** In Allora, we have two [payment flows](/devs/reference/module-accounts) for paying out rewards. Cosmos validators use the standard cosmos-sdk staking flows to get their rewards, and reputers and workers separately get their rewards from the Allora specific algorithm and code. This parameter controls the ratio of rewards between cosmos validators on one side, and reputers and workers on the other.
+
+**Default Value:** 50%
-Cosmos validators, Allora Reputers, and Allora Workers all deserve to be paid out rewards from token inflation as well as collected transaction fees for using the Allora network. In Allora, we have two [payment flows](/devs/reference/module-accounts) for paying out rewards. Cosmos validators use the standard cosmos-sdk staking flows to get their rewards, and reputers and workers separately get their rewards from the Allora specific algorithm and code. This parameter controls the ratio of rewards between cosmos validators on one side, and reputers and workers on the other.
+**A higher percentage would pay more transaction fees to reputers and workers, at the expense of giving less rewards to cosmos validators. A lower percentage value would give more rewards to cosmos validators, but pay out less rewards to reputers and workers for their services to the network.**
-Default Value: 50%
+**Reward Distribution Strategy**:
+- **Balanced incentives**: Fair distribution between different types of network participants
+- **Economic alignment**: Align rewards with value provided to the network
+- **Governance control**: Allow community to adjust distribution based on network needs
+- **Dual-layer rewards**: Separate but coordinated reward systems for different roles
-A higher percentage would pay more transaction fees to reputers and workers, at the expense of giving less rewards to cosmos validators. A lower percentage value would give more rewards to cosmos validators, but pay out less rewards to reputers and workers for their services to the network.
+## Technical Parameters
+
+### System Limits and Safety
#### epsilon_safe_div
+**A small tolerance quantity used to cap division by zero.**
-A small tolerance quantity used to cap division by zero.
+**Default Value:** 0.0000001
-Default Value: 0.0000001
+**Mathematical Safety**:
+- **Numerical stability**: Prevent division by zero errors in calculations
+- **System reliability**: Ensure robust mathematical operations
+- **Edge case handling**: Manage extreme values that could cause system failures
+- **Precision balance**: Small enough to maintain accuracy, large enough to prevent errors
#### max_string_length
+**The maximum length of a string to allow to store on the chain.** For example, used in limiting metadata for the creation of new topics.
+
+**Default Value:** 255
+
+**Data Management**:
+- **Storage efficiency**: Prevent excessive on-chain data storage
+- **Performance optimization**: Maintain fast transaction processing
+- **Abuse prevention**: Prevent spam through large string submissions
+- **Resource conservation**: Optimize blockchain storage usage
+
+## Parameter Governance
+
+### Modification Process
+
+**Governance-Controlled Changes**:
+- **Proposal submission**: Any token holder can propose parameter changes
+- **Community discussion**: Public debate and analysis of proposed changes
+- **Voting process**: Token holders vote on parameter modification proposals
+- **Implementation**: Approved changes are automatically implemented by the network
+
+### Best Practices
+
+**Parameter Adjustment Guidelines**:
+- **Gradual changes**: Make incremental adjustments to avoid disruption
+- **Data-driven decisions**: Base changes on network performance data and analysis
+- **Community input**: Seek broad community feedback before proposing changes
+- **Impact assessment**: Carefully analyze potential effects of parameter changes
+
+## Prerequisites
+
+- **Blockchain economics**: Understanding of tokenomics and incentive mechanisms
+- **Network operations**: Knowledge of blockchain network functionality and performance
+- **Governance systems**: Familiarity with decentralized governance and voting processes
+- **Technical parameters**: Ability to understand technical specifications and their implications
-The maximum length of a string to allow to store on the chain. For example, used in limiting metadata for the creation of new topics.
+## Next Steps
-Default Value: 255
\ No newline at end of file
+- [Explore mint parameters](/devs/reference/params/mint) for detailed inflation control mechanisms
+- [Study staking parameters](/devs/reference/params/stake) for network participation rules
+- [Review consensus parameters](/devs/reference/params/consensus) for network operation details
+- [Learn about module accounts](/devs/reference/module-accounts) for understanding token flow and distribution
\ No newline at end of file
diff --git a/pages/devs/reference/params/consensus.mdx b/pages/devs/reference/params/consensus.mdx
index 5b9791e..ab5555f 100644
--- a/pages/devs/reference/params/consensus.mdx
+++ b/pages/devs/reference/params/consensus.mdx
@@ -1,57 +1,220 @@
# Consensus Parameters
-> Parameters that uniquely affect validators of the Allora Chain
+## What You'll Learn
+- Understanding consensus parameters that uniquely affect validators of the Allora Chain
+- How block size and gas limit parameters control network throughput and performance
+- Evidence parameters that maintain network security through proof validation
+- Validator configuration requirements and supported cryptographic key types
-**block.max_bytes**
+## Overview
-Sets the maximum size of a block in bytes.
+> **Parameters that uniquely affect validators of the Allora Chain**
-Value: `22020096`
+**Consensus parameters control the fundamental operational characteristics of the Allora blockchain network.** These parameters define block properties, evidence handling, and validator requirements that are essential for network security, performance, and reliability.
-Standard value.
+### Why Consensus Parameters Matter
+
+**Network Performance**:
+- **Throughput control**: Block size and gas limits determine transaction processing capacity
+- **Resource management**: Parameters prevent network congestion and excessive resource usage
+- **Predictable operation**: Consistent parameters enable reliable network performance
+- **Scalability planning**: Parameters can be adjusted to support network growth
+
+**Security and Integrity**:
+- **Evidence validation**: Parameters ensure proper handling of validator misbehavior evidence
+- **Network protection**: Limits prevent abuse and maintain network stability
+- **Validator standards**: Requirements ensure all validators meet security specifications
+- **Consensus reliability**: Parameters support robust consensus mechanism operation
+
+## Block Configuration
+
+### Block Size Management
+
+#### block.max_bytes
+**Sets the maximum size of a block in bytes.**
+
+**Value:** `22020096`
+
+**Standard value.**
This parameter limits the block size, preventing excessive network load. However, setting it too low may restrict the number of transactions in a block. The current value strikes a balance between controlling block size and allowing for sufficient transaction throughput.
-**block.max_gas**
+**Block Size Strategy**:
+- **Network efficiency**: Prevent blocks from becoming too large for efficient network propagation
+- **Transaction capacity**: Allow adequate space for meaningful transaction volume
+- **Resource balance**: Balance between network performance and transaction throughput
+- **Propagation time**: Ensure blocks can be transmitted quickly across the network
+
+**Performance Implications**:
+- **Network bandwidth**: Larger blocks require more bandwidth for propagation
+- **Validation time**: Bigger blocks take longer to validate and process
+- **Storage requirements**: Block size affects long-term storage needs
+- **Sync performance**: Impacts initial sync time for new nodes
+
+### Gas Limit Configuration
-Sets the maximum amount of gas that can be used in a block.
+#### block.max_gas
+**Sets the maximum amount of gas that can be used in a block.**
-Value: `-1`
+**Value:** `-1`
-Standard value.
+**Standard value.**
The current setting allows for flexibility by indicating no limit on the maximum gas usage in a block. While this offers freedom for transactions, careful monitoring is needed to prevent potential abuse. This approach acknowledges the need for adaptability in a dynamic network environment.
-**evidence.max_age_num_blocks**
+**Gas Limit Strategy**:
+- **Unlimited flexibility**: No hard cap allows for adaptive gas usage based on network needs
+- **Market-driven**: Gas prices naturally limit excessive usage through economic incentives
+- **Network evolution**: Allows network to adapt to changing computational requirements
+- **Performance monitoring**: Requires ongoing observation to prevent abuse
-Sets the maximum age (in blocks) of evidence that can be included in a block.
+**Operational Considerations**:
+- **Abuse prevention**: Monitor for excessive gas usage that could slow the network
+- **Economic balance**: Rely on gas pricing to manage computational resource usage
+- **Network health**: Track block processing times and validator performance
+- **Future planning**: May need to implement limits based on network growth
-Value: `100000`
+## Evidence Management
-Standard value.
+### Evidence Age Limits
+
+#### evidence.max_age_num_blocks
+**Sets the maximum age (in blocks) of evidence that can be included in a block.**
+
+**Value:** `100000`
+
+**Standard value.**
By limiting the age of evidence, this parameter maintains network security by preventing the inclusion of outdated evidence. The chosen value strikes a reasonable balance between retaining relevant evidence and ensuring integrity of the network.
-**evidence.max_age_duration**
+**Block-Based Age Control**:
+- **Relevance maintenance**: Ensure evidence remains relevant to current network state
+- **Security focus**: Prevent old evidence from being used inappropriately
+- **Network integrity**: Maintain consistency in evidence validation across time
+- **Storage efficiency**: Limit retention of outdated information
-Sets the maximum age (in nanoseconds) of evidence that can be included in a block.
+#### evidence.max_age_duration
+**Sets the maximum age (in nanoseconds) of evidence that can be included in a block.**
-Value: `172800000000000`
+**Value:** `172800000000000`
-Standard value.
+**Standard value.**
This parameter complements `max_age_num_blocks` by providing an additional measure to limit the inclusion of outdated evidence. The current setting aligns with the need for a comprehensive yet controlled approach to evidence inclusion.
-**evidence.max_bytes**
+**Time-Based Age Control**:
+- **Dual protection**: Works with block-based limits for comprehensive age management
+- **Temporal relevance**: Ensure evidence remains temporally relevant
+- **Cross-validation**: Provide multiple mechanisms for evidence age verification
+- **System robustness**: Multiple age checks improve overall system reliability
+
+**Duration Analysis**:
+- **Nanosecond precision**: High precision timing for accurate age calculation
+- **48-hour window**: Approximately 48 hours (172,800 seconds) maximum age
+- **Reasonable timeframe**: Sufficient time for evidence collection and validation
+- **Security balance**: Balance between evidence utility and staleness prevention
+
+### Evidence Size Control
-Sets the maximum size of evidence in bytes.
+#### evidence.max_bytes
+**Sets the maximum size of evidence in bytes.**
-Value: `1048576`
+**Value:** `1048576`
-Standard value.
+**Standard value.**
Controlling the size of evidence prevents potential abuse and ensures efficient network operation. While too low a value may restrict the inclusion of legitimate evidence, the current setting finds a suitable compromise between limiting size and maintaining the effectiveness of the evidence mechanism.
-**validator.pub_key_types**
+**Size Management Benefits**:
+- **Network efficiency**: Prevent large evidence from slowing block processing
+- **Storage optimization**: Control blockchain storage requirements
+- **Abuse prevention**: Limit potential for evidence spam or DoS attacks
+- **Processing speed**: Ensure evidence can be validated quickly
+
+**Size Specifications**:
+- **1MB limit**: 1,048,576 bytes maximum evidence size
+- **Reasonable capacity**: Sufficient space for comprehensive evidence data
+- **Performance balance**: Large enough for effectiveness, small enough for efficiency
+- **System scalability**: Manageable size for long-term network growth
+
+## Validator Configuration
+
+### Cryptographic Standards
+
+#### validator.pub_key_types
+**Defines the supported public key types for validators.**
+
+**Supported Types**:
+- **Ed25519**: Primary cryptographic standard for validator signing keys
+- **Security properties**: Proven cryptographic security and performance characteristics
+- **Industry standard**: Widely adopted in blockchain and distributed systems
+- **Implementation maturity**: Well-tested and reliable cryptographic implementation
+
+**Key Type Strategy**:
+- **Standardization**: Ensure all validators use compatible cryptographic standards
+- **Security assurance**: Require proven and secure cryptographic algorithms
+- **Interoperability**: Maintain compatibility with Cosmos SDK standards
+- **Future flexibility**: Framework supports additional key types if needed
+
+## Parameter Interactions
+
+### Integrated System Design
+
+**Holistic Approach**:
+- **Block constraints**: Size and gas limits work together to control block properties
+- **Evidence coordination**: Multiple age and size limits provide comprehensive evidence control
+- **Validator standards**: Key type requirements ensure consistent security across all validators
+- **Performance optimization**: All parameters contribute to overall network performance
+
+### Security Framework
+
+**Multi-Layer Protection**:
+- **Resource limits**: Block and evidence size limits prevent resource exhaustion
+- **Temporal controls**: Evidence age limits maintain relevance and prevent stale data
+- **Cryptographic standards**: Key type requirements ensure strong validator security
+- **Economic incentives**: Gas mechanisms provide market-based resource allocation
+
+## Operational Impact
+
+### Network Performance
+
+**Throughput Characteristics**:
+- **Transaction capacity**: Block size limits affect maximum transactions per block
+- **Processing speed**: Gas limits and evidence sizes impact block processing time
+- **Network propagation**: Block size affects how quickly blocks spread across the network
+- **Validator efficiency**: Parameters impact validator resource requirements and performance
+
+### Security Implications
+
+**Network Protection**:
+- **Evidence integrity**: Age and size limits prevent evidence manipulation
+- **Validator accountability**: Proper evidence handling ensures validator misbehavior can be proven
+- **Network stability**: Resource limits prevent network degradation from excessive usage
+- **Cryptographic security**: Key type standards ensure strong validator authentication
+
+## Best Practices
+
+### Parameter Monitoring
+
+**Performance Tracking**:
+- **Block utilization**: Monitor average block size and gas usage patterns
+- **Evidence processing**: Track evidence inclusion rates and validation times
+- **Network health**: Observe block propagation times and validator performance
+- **Resource usage**: Monitor system resource utilization across validators
+
+### Optimization Strategies
+
+**Network Tuning**:
+- **Capacity planning**: Adjust parameters based on network growth and usage patterns
+- **Performance optimization**: Balance throughput with network stability and security
+- **Evidence management**: Ensure evidence parameters support effective validator accountability
+- **Future planning**: Consider parameter adjustments for anticipated network evolution
+
+## Prerequisites
-Specifies the supported public key types for validators.
+- **Blockchain architecture**: Understanding of block structure and consensus mechanisms
+- **Cryptography basics**: Knowledge of digital signatures and key management
+- **Network operations**: Familiarity with blockchain network performance and security
+- **Validator operations**: Understanding of validator roles and responsibilities
-Value: `["ed25519"]`
+## Next Steps
-Standard value.
-This parameter enhances security by explicitly specifying the supported public key type for validators.
+- [Study chain parameters](/devs/reference/params/chain) for comprehensive network configuration
+- [Explore staking parameters](/devs/reference/params/stake) for validator participation mechanics
+- [Review mint parameters](/devs/reference/params/mint) for token economics
+- [Learn about validator operations](/devs/validators/validator-operations) for practical validator management
diff --git a/pages/devs/reference/params/mint.mdx b/pages/devs/reference/params/mint.mdx
index b04f706..432fb2f 100644
--- a/pages/devs/reference/params/mint.mdx
+++ b/pages/devs/reference/params/mint.mdx
@@ -1,69 +1,211 @@
# Mint Parameters
-> Parameters from the minting module on Allora Network
+## What You'll Learn
+- Understanding all minting module parameters that control token inflation and supply
+- How these parameters balance economic incentives with network security and sustainability
+- The rationale behind parameter values and their impact on network economics
+- How minting parameters work together to create a stable and predictable token economy
-**mint_denom**
+## Overview
-The mint denomination for the blockchain is `uallo`.
+> **Parameters from the minting module on Allora Network**
-**inflation_rate_change**
+**The mint module parameters control the creation of new tokens and inflation mechanics on the Allora Network.** These parameters are crucial for maintaining economic balance, incentivizing network participation, and ensuring long-term sustainability.
-Determines the maximum annual rate at which the inflation rate can change.
+### Why Mint Parameters Matter
-Value: `"0.130000000000000000"`
+**Economic Foundation**:
+- **Inflation control**: Manage token supply growth to balance incentives and value preservation
+- **Network incentives**: Provide rewards for validators, workers, and reputers
+- **Economic predictability**: Create stable and predictable economic conditions
+- **Long-term sustainability**: Balance short-term growth with long-term token value
-Standard value.
+**Network Health**:
+- **Security incentives**: Ensure adequate rewards for network security providers
+- **Participation rewards**: Maintain incentives for quality network participation
+- **Supply management**: Control total token supply to prevent hyperinflation
+- **Economic stability**: Provide stable foundation for network economic activity
+
+## Core Minting Parameters
+
+### Base Configuration
+
+#### mint_denom
+**The mint denomination for the blockchain is `uallo`.**
+
+**Denomination Benefits**:
+- **Standard unit**: Base unit for all network transactions and rewards
+- **Precision handling**: Micro-denomination allows for precise value calculations
+- **System consistency**: Uniform denomination across all network operations
+- **Economic clarity**: Clear unit system for users and developers
+
+### Inflation Rate Controls
+
+#### inflation_rate_change
+**Determines the maximum annual rate at which the inflation rate can change.**
+
+**Value:** `"0.130000000000000000"`
+
+**Standard value.**
Balances the rate of change to adapt to economic conditions while preventing sudden shocks. It will be regularly evaluated and adjusted based on economic dynamics.
-**inflation_max**
+**Rate Change Strategy**:
+- **Stability protection**: Prevent dramatic inflation changes that could destabilize the economy
+- **Adaptive capability**: Allow inflation adjustments in response to network conditions
+- **Shock prevention**: Avoid sudden economic disruptions from rapid rate changes
+- **Dynamic management**: Enable responsive economic policy while maintaining predictability
-Inflation max sets the maximum allowable annual inflation rate.
+#### inflation_max
+**Inflation max sets the maximum allowable annual inflation rate.**
-Value: `"0.200000000000000000"`
+**Value:** `"0.200000000000000000"`
-Standard value.
+**Standard value.**
It may be adjusted based on the balance between controlling token supply growth and incentivizing network participants.
-**inflation_min**
+**Maximum Rate Benefits**:
+- **Value protection**: Set ceiling on token dilution to protect holder value
+- **Investment confidence**: Provide certainty about maximum inflation exposure
+- **Network growth**: Allow sufficient inflation for network bootstrapping and growth
+- **Economic balance**: Balance growth incentives with value preservation
-Inflation min sets the minimum allowable annual inflation rate.
+#### inflation_min
+**Inflation min sets the minimum allowable annual inflation rate.**
-Value: `"0.070000000000000000"`
+**Value:** `"0.070000000000000000"`
-Standard value.
+**Standard value.**
It provides adequate incentives while avoiding undue token supply inflation.
-**goal_bonded**
+**Minimum Rate Strategy**:
+- **Continuous incentives**: Ensure ongoing rewards for network participants
+- **Security maintenance**: Maintain minimum rewards for network security providers
+- **Participation encouragement**: Keep minimum incentives for quality participation
+- **Economic floor**: Prevent deflationary conditions that could harm network growth
+
+### Network Economics
-Represents the target ratio of bonded (staked) tokens to the total token supply.
+#### goal_bonded
+**Represents the target ratio of bonded (staked) tokens to the total token supply.**
-Current value: `"0.670000000000000000"`
+**Current value:** `"0.670000000000000000"`
-Standard value.
+**Standard value.**
It provides adequate incentives while avoiding undue token supply inflation.
-**max_supply**
+**Bonding Target Strategy**:
+- **Security optimization**: Target optimal staking ratio for network security
+- **Liquidity balance**: Balance between network security and token liquidity
+- **Inflation adjustment**: Influence inflation rates based on staking participation
+- **Economic equilibrium**: Maintain healthy balance between staked and liquid tokens
+
+**Bonding Ratio Impact**:
+- **Above target**: Lower inflation rewards to reduce bonding incentives
+- **Below target**: Higher inflation rewards to encourage more staking
+- **Dynamic adjustment**: Automatic balancing mechanism for optimal security
+- **Market efficiency**: Allow market forces to determine optimal staking levels
+
+### Supply Management
+
+#### max_supply
+**Maximum total supply of `uallo`**
+
+**Current value:** `"1000000000000000000000000000"`
+
+**Supply Cap Benefits**:
+- **Scarcity assurance**: Guarantee finite token supply for long-term value preservation
+- **Economic predictability**: Provide certainty about maximum token dilution
+- **Investment appeal**: Create scarcity dynamics that may support token appreciation
+- **Inflation endpoint**: Eventually eliminate inflation as supply cap approaches
+
+**Mathematical Representation**:
+- **Denomination**: 1 billion ALLO tokens in base uALLO units
+- **Precision**: 18 decimal places for micro-denomination precision
+- **Total cap**: Absolute maximum tokens that can ever exist
+- **Future planning**: Long-term economic planning based on known supply limit
+
+## Parameter Interactions
+
+### Economic Equilibrium
+
+**Balanced Ecosystem**:
+- **Inflation bounds**: Min and max inflation create stable operating range
+- **Bonding targets**: Goal bonded ratio influences inflation adjustments
+- **Supply limits**: Maximum supply provides long-term scarcity guarantee
+- **Rate management**: Inflation rate changes allow adaptive economic policy
+
+### Dynamic Adjustments
+
+**Responsive Mechanisms**:
+- **Staking incentives**: Inflation adjusts based on actual bonding ratios
+- **Economic adaptation**: Parameters enable response to changing network conditions
+- **Market efficiency**: Allow economic forces to influence network participation
+- **Stability maintenance**: Prevent extreme economic conditions through parameter bounds
+
+## Implementation Details
+
+### Inflation Calculation
+
+**Algorithmic Approach**:
+- **Target-based adjustment**: Inflation adjusts toward bonding target
+- **Gradual changes**: Rate changes are limited to prevent economic shocks
+- **Predictable formula**: Mathematical approach to inflation determination
+- **Market responsive**: Reacts to actual staking behavior and participation
+
+### Token Distribution
+
+**Reward Allocation**:
+- **Validator rewards**: Primary recipients of newly minted tokens
+- **Network participants**: Workers and reputers receive allocated portions
+- **Economic incentives**: Distribution aligns with network value contribution
+- **Sustainable growth**: Balanced approach to token creation and distribution
+
+## Economic Impact Analysis
+
+### Incentive Structure
+
+**Participation Rewards**:
+- **Security providers**: Validators receive consistent rewards for network security
+- **Quality contributors**: Workers and reputers earn based on performance
+- **Long-term holders**: Staking rewards encourage long-term network commitment
+- **Economic alignment**: Rewards align with network value and security needs
-Maximum total supply of `uallo`
+### Market Dynamics
-Current value: `"1000000000000000000000000000"`
+**Supply and Demand**:
+- **Predictable inflation**: Known parameters enable accurate economic modeling
+- **Scarcity timeline**: Finite supply creates long-term scarcity value proposition
+- **Participation incentives**: Staking rewards encourage network participation
+- **Economic stability**: Balanced parameters promote sustainable network growth
-**halving_interval **
+## Best Practices
-The block interval for the halving of the block reward.
+### Parameter Monitoring
-Current value: `25246080`
+**Ongoing Assessment**:
+- **Economic indicators**: Track inflation impact on network participation and token value
+- **Participation rates**: Monitor staking ratios and network security metrics
+- **Market conditions**: Assess broader cryptocurrency market impact on network economics
+- **Community feedback**: Incorporate stakeholder input on economic policy effectiveness
-**current_block_provision**
+### Governance Considerations
-The initial value of provisions per block. This value is recalculated and updated with each block.
+**Democratic Management**:
+- **Community proposals**: Allow token holders to propose parameter adjustments
+- **Evidence-based changes**: Base modifications on data and economic analysis
+- **Impact assessment**: Carefully evaluate potential effects of parameter changes
+- **Gradual implementation**: Make incremental adjustments to minimize disruption
-Current value: `2831000000000000000000`
+## Prerequisites
-**blocks_per_year**
+- **Economic principles**: Understanding of inflation, supply-demand dynamics, and monetary policy
+- **Blockchain economics**: Knowledge of tokenomics and network incentive structures
+- **Mathematical concepts**: Ability to understand percentage calculations and economic formulas
+- **Network operations**: Understanding of staking, validation, and network participation
-Value: `6311520` (a block every ~5 seconds)
+## Next Steps
-In Cronos, Treasurenet, Celestia, and many other Cosmos-based networks, the blocks per second are 5. This is the most common value for Cosmos-based blockchains.
-In Bittensor this is 12 seconds. We are allowing a higher number of topics (suggested 2048) than there are max subnets (32), so it is expected to have a higher level of activity, thus higher number of transactions - which could benefit from faster blocks.
-The standard value of a block every ~5 seconds is kept, for consistency with other blockchains and for allowing quicker block times and faster transactions than in Bittensor for the aforementioned reasons.
+- [Study chain parameters](/devs/reference/params/chain) for comprehensive network configuration
+- [Explore staking parameters](/devs/reference/params/stake) for participation mechanics
+- [Review consensus parameters](/devs/reference/params/consensus) for network operation details
+- [Learn about module accounts](/devs/reference/module-accounts) for understanding token flow and distribution
diff --git a/pages/devs/reference/params/stake.mdx b/pages/devs/reference/params/stake.mdx
index 1273592..d01ed93 100644
--- a/pages/devs/reference/params/stake.mdx
+++ b/pages/devs/reference/params/stake.mdx
@@ -1,76 +1,240 @@
# Stake Parameters
-> Parameters that affect both kinds of staking featured by Allora
+## What You'll Learn
+- Understanding both validation staking and reputational staking mechanisms on Allora Network
+- How different staking types serve different network functions and participant roles
+- Complete overview of staking parameters and their impact on network security and economics
+- The relationship between staking parameters and network decentralization
-There are two types of staking in Allora Network which run through different staking mechanisms: Validation staking and Reputational staking.
+## Overview
+
+> **Parameters that affect both kinds of staking featured by Allora**
+
+**There are two types of staking in Allora Network which run through different staking mechanisms: Validation staking and Reputational staking.**
+
+### Dual Staking Architecture
**Validation staking** comes from the popular `staking` module on Cosmos SDK. It is used when staking into Validator nodes.
**Reputational staking** is specific to Allora Network, and it is used to stake into Worker and Reputer nodes.
-The parameters for the two types are specified below.
+**The parameters for the two types are specified below.**
+
+#### Why Dual Staking Systems?
+
+**Specialized Functions**:
+- **Network security**: Validation staking secures the blockchain consensus layer
+- **Quality assurance**: Reputational staking ensures AI/ML inference quality
+- **Role separation**: Different staking types serve distinct network functions
+- **Economic optimization**: Separate systems allow optimized incentives for each role
+
+**System Benefits**:
+- **Flexible participation**: Users can stake in validators, workers, or reputers based on expertise
+- **Risk distribution**: Multiple staking types distribute network security across functions
+- **Specialized rewards**: Different reward structures optimize for different contribution types
+- **Network resilience**: Multiple staking systems provide redundant security mechanisms
## Reputational Staking
-Parameters from the reputational-staking module on Allora Network. These are parameters for staking into reputers and workers.
+**Parameters from the reputational-staking module on Allora Network.** These are parameters for staking into reputers and workers.
-These parameters are defined as "Chain Parameters" and can be found [here](./chain).
+**These parameters are defined as "Chain Parameters" and can be found [here](./chain).**
-The parameters of concern to reputers in particular are:
+### Key Reputational Parameters
+**The parameters of concern to reputers in particular are:**
- **required_minimum_stake**
- **remove_stake_delay_window**
+#### Parameter Integration
+
+**Chain Parameter Reference**:
+- **Centralized management**: Reputational staking parameters are managed as chain-wide settings
+- **Governance control**: Parameters can be adjusted through network governance proposals
+- **Unified policy**: Consistent application across all reputers and workers
+- **Dynamic adjustment**: Parameters can evolve with network needs and conditions
+
+**Economic Design**:
+- **Quality incentives**: Minimum stake requirements ensure serious participation
+- **Security delays**: Stake removal delays protect against rapid manipulation
+- **Risk management**: Parameters balance accessibility with security requirements
+- **Network health**: Settings optimize for long-term network stability and growth
+
## Validation Staking
-Parameters from the validator-based staking module on Allora Network
+**Parameters from the validator-based staking module on Allora Network**
-**unbonding_time**
+### Core Validation Parameters
-Sets the duration for which tokens remain bonded after initiating the unbonding process.
+#### unbonding_time
+**Sets the duration for which tokens remain bonded after initiating the unbonding process.**
-Value: `1814400s` (3 weeks)
+**Value:** `1814400s` (3 weeks)
-Standard value.
+**Standard value.**
A longer unbonding time enhances security by discouraging malicious actors and stabilizes token supply dynamics, but too long a period may inconvenience users who want to unstake their tokens promptly. This setting achieves a reasonable trade-off.
-**max_validators**
+**Unbonding Period Strategy**:
+- **Security enhancement**: Long unbonding period deters malicious validator behavior
+- **Economic stability**: Prevents rapid stake withdrawals that could destabilize network
+- **Attack prevention**: Makes it expensive to execute certain types of network attacks
+- **User consideration**: Balance security needs with reasonable user experience
-Sets the maximum number of validators allowed in the network.
+**Duration Analysis**:
+- **3-week period**: 21 days provides substantial security buffer
+- **Industry standard**: Comparable to other Cosmos-based networks
+- **Economic impact**: Sufficient time for slash conditions to be discovered and applied
+- **User planning**: Allows users to plan for capital lockup periods
-Value: `100`
+#### max_validators
+**Sets the maximum number of validators allowed in the network.**
-Standard value.
+**Value:** `100`
+
+**Standard value.**
It balances decentralization with network scalability. It will be regularly assessed and adjusted based on the network's growth and decentralization.
-**max_entries**
+**Validator Limit Strategy**:
+- **Decentralization balance**: Sufficient validators to ensure network decentralization
+- **Performance optimization**: Manageable number for efficient consensus operations
+- **Entry opportunity**: Reasonable limit that allows new validators to join
+- **Network evolution**: Can be adjusted as network grows and matures
+
+**Network Implications**:
+- **Consensus efficiency**: Smaller validator sets can reach consensus more quickly
+- **Security distribution**: 100 validators provide strong security through distribution
+- **Competition dynamics**: Limited slots create competitive environment for validator quality
+- **Geographic distribution**: Enough slots to encourage global validator distribution
+
+#### max_entries
+**Determines the maximum number of entries in the staking transaction pool.**
+
+**Value:** `7`
+
+**Standard value.**
+This parameter limits the number of concurrent staking operations to maintain network efficiency while still allowing reasonable flexibility for staking activities.
+
+**Entry Pool Management**:
+- **Concurrency control**: Limit simultaneous staking operations to prevent network congestion
+- **User flexibility**: Allow multiple pending operations for user convenience
+- **Resource management**: Control memory and processing requirements for staking operations
+- **Network efficiency**: Balance functionality with performance requirements
+
+**Operational Impact**:
+- **Transaction throughput**: Affects how many staking operations can be processed simultaneously
+- **User experience**: Impacts user ability to perform multiple staking actions
+- **Network performance**: Influences validator resource requirements
+- **System stability**: Prevents excessive concurrent operations from overwhelming the system
+
+### Additional Validation Parameters
+
+#### historical_entries
+**Controls the number of historical entries maintained for staking operations.**
+
+**Value:** `10000`
+
+**Standard value.**
+This parameter balances data availability for queries and analysis with storage efficiency, ensuring that sufficient historical data is maintained without excessive storage requirements.
+
+**Historical Data Strategy**:
+- **Query support**: Sufficient history for analysis and verification needs
+- **Storage efficiency**: Limited retention prevents excessive blockchain storage growth
+- **Audit capability**: Historical entries support network analysis and troubleshooting
+- **Performance balance**: Adequate data without impacting node performance
+
+#### bond_denom
+**Specifies the denomination used for validator staking.**
+
+**Value:** `"uallo"`
+
+**Network Consistency**:
+- **Standard denomination**: Use network's base denomination for all staking operations
+- **Economic integration**: Align staking currency with network token economics
+- **System simplicity**: Single denomination reduces complexity and confusion
+- **Network coherence**: Consistent currency across all network functions
+
+## Parameter Interactions
+
+### Cross-System Coordination
+
+**Integrated Design**:
+- **Dual staking support**: Parameters accommodate both validation and reputational staking
+- **Economic alignment**: Different staking types contribute to overall network security
+- **Resource optimization**: Parameters balance different types of network participation
+- **Security layering**: Multiple staking systems provide comprehensive network protection
+
+### Economic Balance
+
+**Incentive Structure**:
+- **Validator rewards**: Validation staking parameters influence validator economics
+- **Quality incentives**: Reputational staking parameters drive AI/ML quality
+- **Participation balance**: Parameters encourage appropriate participation in each system
+- **Network sustainability**: Settings support long-term network health and growth
+
+## Staking Strategy Considerations
+
+### Validation Staking Strategy
+
+**Validator Participation**:
+- **Long-term commitment**: Unbonding period requires serious commitment to validation
+- **Infrastructure investment**: Validation requires significant technical infrastructure
+- **Network responsibility**: Validators bear responsibility for blockchain security and consensus
+- **Competitive environment**: Limited validator slots create competitive dynamics
+
+### Reputational Staking Strategy
+
+**AI/ML Participation**:
+- **Specialized knowledge**: Reputational staking rewards AI/ML expertise
+- **Quality focus**: Staking parameters incentivize high-quality inference provision
+- **Flexible participation**: Multiple roles (worker, reputer) provide participation options
+- **Performance-based rewards**: Staking success depends on AI/ML performance quality
-Determines the maximum number of entries in the staking transaction pool.
+## Security Implications
-Value: `7`
+### Network Protection
-Standard value.
-It balances the transaction pool size based on expected network demand. It will be regularly assessed and adjusted as the network evolves.
+**Multi-Layer Security**:
+- **Consensus security**: Validation staking secures blockchain consensus layer
+- **Quality security**: Reputational staking ensures AI/ML inference quality
+- **Economic security**: Both staking types require economic commitment
+- **Behavioral incentives**: Parameters incentivize honest and quality behavior
-**historical_entries**
+### Risk Management
-Sets the maximum number of historical entries stored in the staking module.
+**Attack Prevention**:
+- **Economic cost**: Both staking types make attacks expensive
+- **Time delays**: Unbonding periods prevent rapid attack execution
+- **Quality control**: Minimum stakes ensure serious participation
+- **Diversified risk**: Multiple staking systems distribute security risk
-Value: `10000`
+## Best Practices
-Standard value.
-It balances historical data retention with storage efficiency. It will be regularly assessed and adjusted based on storage capabilities and network requirements.
+### Parameter Monitoring
-**bond_denom**
+**Performance Tracking**:
+- **Validator distribution**: Monitor geographic and ownership distribution of validators
+- **Staking participation**: Track participation rates in both staking systems
+- **Network security**: Assess overall network security through staking metrics
+- **Economic health**: Monitor staking economics and participant satisfaction
-Specifies the denomination of the bonded tokens.
+### Optimization Strategies
-Value: `10000`
+**Network Tuning**:
+- **Validator limits**: Adjust maximum validators based on network growth
+- **Unbonding periods**: Balance security needs with user convenience
+- **Entry limits**: Optimize transaction pool sizes for network efficiency
+- **Historical retention**: Balance query needs with storage requirements
-**min_commission_rate**
+## Prerequisites
-Sets the minimum commission rate a validator can charge.
+- **Staking concepts**: Understanding of proof-of-stake mechanisms and validator economics
+- **Network security**: Knowledge of blockchain security models and attack vectors
+- **Economic principles**: Understanding of token economics and incentive structures
+- **Technical operations**: Familiarity with validator and network node operations
-Value: `0.000000000000000000`
+## Next Steps
-Standard value. No minimum commission rate needs to be set. This incentivizes stakers into the validators, offering them higher rewards, and also as part of a community-building strategy.
+- [Study chain parameters](/devs/reference/params/chain) for reputational staking parameter details
+- [Explore mint parameters](/devs/reference/params/mint) for understanding staking reward economics
+- [Review consensus parameters](/devs/reference/params/consensus) for validator operational requirements
+- [Learn about validator operations](/devs/validators/stake-a-validator) for practical staking implementation
diff --git a/pages/devs/reputers.mdx b/pages/devs/reputers.mdx
index 5ab5f8e..212bd7c 100644
--- a/pages/devs/reputers.mdx
+++ b/pages/devs/reputers.mdx
@@ -1,32 +1,168 @@
# Reputers
-Reputers ensure the accuracy and reliability of worker inferences and the overall integrity of topics.
+## What You'll Learn
+- Understanding the critical role of reputers in network quality assurance
+- How reputers source ground truth data and calculate loss functions
+- The staking mechanism and reward structure for reputer participation
+- Complete workflow from ground truth sourcing to consensus-based rewards
-## What do Reputers do?
+## Overview
-### Source Ground Truth
+**Reputers ensure the accuracy and reliability of worker inferences and the overall integrity of topics.**
-Reputers source the ground truth as specified by the [topic metadata](/devs/topic-creators/how-to-create-topic#creating-your-first-topic).
-For example, they might retrieve the actual price of ETH at a specific moment in time.
+### Why Reputers Are Essential
-This ground truth is essential for evaluating the accuracy of inferences made by workers.
+**Quality Assurance Functions**:
+- **Accuracy validation**: Verify worker predictions against actual outcomes
+- **Network integrity**: Maintain trust and reliability across all network operations
+- **Consensus building**: Create shared understanding of prediction quality
+- **Incentive alignment**: Ensure workers are rewarded for accurate contributions
-### Calculate Loss
+## Core Reputer Functions
-Reputers calculate the loss of worker inferences and forecast-implied inferences relative to the ground truth.
+### 1. Source Ground Truth
-For instance, if a topic's [loss function](/devs/topic-creators/create-deploy-loss-calculation-function) is an L1-norm, reputers apply this norm to each worker's inference and the actual price of ETH in 10 days.
-They then respond with a [`ValueBundle` of losses](https://github.com/allora-network/allora-chain/blob/1d56c50c8d0f43446d770cf387dbd43bb3613e8c/x/emissions/proto/emissions/v1/reputer.proto#L28), detailing the calculated losses for each inference.
+**Reputers source the ground truth as specified by the [topic metadata](/devs/topic-creators/how-to-create-topic#creating-your-first-topic).**
-### Secure Topics with Stake
+**Ground Truth Process**:
+- **Data retrieval**: Access authoritative sources for actual outcomes
+- **Verification standards**: Ensure data accuracy and authenticity
+- **Timing precision**: Capture ground truth at exactly specified moments
+- **Format consistency**: Structure data according to network standards
-Reputers secure topics with their [stake](/devs/reputers/set-and-adjust-stake). The more a reputer stakes in a topic, the greater their influence on the consensus of losses.
+**Example Application**:
+For example, they might retrieve the actual price of ETH at a specific moment in time.
-Additionally, reputers can be [delegated to](/devs/reference/allorad#delegate-stake-to-a-reputer-for-a-topic), increasing their ability to secure the topic further. This delegated stake enhances the extent to which reputers secure the topic
-as opposed to the broader chain security.
+**This ground truth is essential for evaluating the accuracy of inferences made by workers.**
-### Receive Rewards
+**Ground Truth Sources**:
+- **Market data providers**: Reliable financial data feeds and exchanges
+- **Official sources**: Government agencies, central banks, or regulatory bodies
+- **Consensus sources**: Multiple data providers for cross-validation
+- **Real-time feeds**: Live data streams for immediate verification
-Reputers [receive rewards](/home/layers/consensus/reputers) based on how close their reported losses are to the consensus. A stake-weighted average of each reported loss is taken among reputers per topic per epoch. The closer a reputer's values are to this average, the more they are rewarded.
+### 2. Calculate Loss
-This system incentivizes reputers to provide accurate and reliable loss calculations, contributing to the network's overall integrity and reliability.
\ No newline at end of file
+**Reputers calculate the loss of worker inferences and forecast-implied inferences relative to the ground truth.**
+
+**Loss Calculation Process**:
+- **Function application**: Apply topic-specified loss functions to worker predictions
+- **Accuracy measurement**: Quantify the difference between predictions and reality
+- **Comparative analysis**: Evaluate relative performance across all workers
+- **Data structuring**: Format results for network consensus processing
+
+**For instance, if a topic's [loss function](/devs/topic-creators/create-deploy-loss-calculation-function) is an L1-norm, reputers apply this norm to each worker's inference and the actual price of ETH in 10 days.**
+
+**They then respond with a [`ValueBundle` of losses](https://github.com/allora-network/allora-chain/blob/1d56c50c8d0f43446d770cf387dbd43bb3613e8c/x/emissions/proto/emissions/v1/reputer.proto#L28), detailing the calculated losses for each inference.**
+
+**Technical Implementation**:
+- **Mathematical precision**: Apply loss functions with high numerical accuracy
+- **Batch processing**: Handle multiple worker inferences efficiently
+- **Error handling**: Manage edge cases and invalid data gracefully
+- **Protocol compliance**: Format outputs according to network specifications
+
+### 3. Secure Topics with Stake
+
+**Reputers secure topics with their [stake](/devs/reputers/set-and-adjust-stake). The more a reputer stakes in a topic, the greater their influence on the consensus of losses.**
+
+**Staking Mechanism**:
+- **Economic commitment**: Put financial resources at risk to participate
+- **Influence weighting**: Higher stakes provide greater consensus influence
+- **Security provision**: Financial backing ensures topic reliability
+- **Aligned incentives**: Economic risk encourages accurate evaluations
+
+**Additionally, reputers can be [delegated to](/devs/reference/allorad#delegate-stake-to-a-reputer-for-a-topic), increasing their ability to secure the topic further. This delegated stake enhances the extent to which reputers secure the topic as opposed to the broader chain security.**
+
+**Delegation Benefits**:
+- **Increased capacity**: Handle larger volumes of inferences and topics
+- **Community trust**: Attract delegations from network participants
+- **Enhanced influence**: Greater weight in consensus decisions
+- **Shared responsibility**: Community involvement in topic security
+
+### 4. Receive Rewards
+
+**Reputers [receive rewards](/home/layers/consensus/reputers) based on how close their reported losses are to the consensus. A stake-weighted average of each reported loss is taken among reputers per topic per epoch. The closer a reputer's values are to this average, the more they are rewarded.**
+
+**Reward Mechanism**:
+- **Consensus alignment**: Rewards increase with proximity to network consensus
+- **Stake weighting**: Higher stakes contribute more to consensus calculations
+- **Accuracy incentives**: Better loss calculations result in higher rewards
+- **Continuous evaluation**: Performance assessed across multiple epochs
+
+**This system incentivizes reputers to provide accurate and reliable loss calculations, contributing to the network's overall integrity and reliability.**
+
+**Long-term Benefits**:
+- **Reputation building**: Consistent accuracy enhances network standing
+- **Compound rewards**: Success leads to more delegations and higher stakes
+- **Network health**: Quality evaluations improve overall system reliability
+- **Economic sustainability**: Balanced reward system ensures continued participation
+
+## Reputer Workflow
+
+### Complete Process Overview
+
+**Step-by-Step Operations**:
+1. **Monitor topics**: Track active topics requiring evaluation
+2. **Gather ground truth**: Collect authoritative data for comparison
+3. **Calculate losses**: Apply mathematical functions to worker predictions
+4. **Submit evaluations**: Report loss calculations to the network
+5. **Participate in consensus**: Contribute to aggregate loss determinations
+6. **Earn rewards**: Receive compensation based on consensus alignment
+
+### Performance Optimization
+
+**Excellence Strategies**:
+- **Data source quality**: Use the most reliable and accurate ground truth sources
+- **Calculation precision**: Implement mathematical functions with high accuracy
+- **Timing optimization**: Submit evaluations promptly within epoch windows
+- **Consensus awareness**: Understand network patterns and participant behavior
+
+## Getting Started as a Reputer
+
+### Prerequisites Assessment
+
+**Required Capabilities**:
+- **Data access**: Ability to obtain reliable ground truth information
+- **Technical skills**: Understanding of loss functions and mathematical calculations
+- **Financial resources**: Sufficient funds for staking requirements
+- **Network knowledge**: Familiarity with blockchain operations and consensus
+
+### Setup and Operations
+
+**Implementation Steps**:
+- **Stake allocation**: Determine appropriate stake amounts for chosen topics
+- **Infrastructure setup**: Establish reliable data feeds and calculation systems
+- **Monitoring systems**: Implement tracking for topic activity and performance
+- **Community engagement**: Build relationships with potential delegators
+
+## Strategic Considerations
+
+### Topic Selection
+
+**Evaluation Criteria**:
+- **Data availability**: Ensure reliable access to ground truth sources
+- **Competition level**: Assess existing reputer participation and stakes
+- **Topic activity**: Consider worker participation and inference volume
+- **Reward potential**: Evaluate economic opportunity relative to stake requirements
+
+### Risk Management
+
+**Operational Safety**:
+- **Stake diversification**: Spread stakes across multiple topics
+- **Performance monitoring**: Track accuracy and consensus alignment
+- **Data validation**: Verify ground truth sources for reliability
+- **Network updates**: Stay informed about protocol changes and improvements
+
+## Prerequisites
+
+- **Technical expertise**: Understanding of mathematical functions and data analysis
+- **Financial resources**: Adequate funds for staking and operational costs
+- **Data access**: Reliable sources for ground truth information
+- **Network familiarity**: Basic understanding of blockchain consensus and rewards
+
+## Next Steps
+
+- [Learn to deploy a reputer node](/devs/reputers/reputers) for hands-on implementation
+- [Understand stake management](/devs/reputers/set-and-adjust-stake) for optimal participation
+- [Explore reputer data querying](/devs/reputers/query-reputer-data) for performance monitoring
+- [Study the coin prediction reputer example](/devs/reputers/coin-prediction-reputer) for practical guidance
\ No newline at end of file
diff --git a/pages/devs/reputers/coin-prediction-reputer.mdx b/pages/devs/reputers/coin-prediction-reputer.mdx
index 74f343f..d26dd00 100644
--- a/pages/devs/reputers/coin-prediction-reputer.mdx
+++ b/pages/devs/reputers/coin-prediction-reputer.mdx
@@ -2,41 +2,94 @@ import { Callout } from 'nextra/components'
# Deploy a Coin Prediction Reputer
-This is an example of a setup for running an Allora Network reputer node for providing ground truth and reputation, where the Allora Network node defers the requests to another container which is responsible for providing the ground truth, which is run in a separate container. It also provides a means of updating the internal database of the ground truth provider.
+## What You'll Learn
+- Complete setup process for a coin prediction reputer node using Docker
+- Understanding the three-component architecture: Reputer, Truth, and Updater containers
+- Configuration management for wallet settings and reputer parameters
+- Real-world implementation for ETH price prediction topics
-## Components
+## Overview
-- **Reputer**: The node that responds to reputer requests from the Allora Network.
-- **Truth**: A container that performs reputation tasks, maintains the state of the model, and responds to reputation requests via a simple Flask application. It fetches data from CoinGecko.
-- **Updater**: A cron-like container designed to periodically trigger the Truth node's data updates.
+**This is an example of a setup for running an Allora Network reputer node for providing ground truth and reputation, where the Allora Network node defers the requests to another container which is responsible for providing the ground truth, which is run in a separate container.** It also provides a means of updating the internal database of the ground truth provider.
-A full working example for a reputer node for ETH price prediction topics is provided in the [`docker-compose.yml` file](https://github.com/allora-network/coin-prediction-reputer/blob/main/docker-compose.yml) of our example repo. Simply run:
+### Architecture Benefits
-## Explainer Video
+**Why This Design Works**:
+- **Separation of concerns**: Each component handles specific responsibilities
+- **Scalability**: Individual containers can be scaled independently
+- **Maintainability**: Components can be updated without affecting others
+- **Reliability**: Isolated failures don't compromise the entire system
-Please see the video below to get a full deep-dive on how to deploy a reputer:
+## System Components
+
+### Component Architecture
+
+**Reputer**: The node that responds to reputer requests from the Allora Network.
+- **Primary function**: Interface between Allora Network and ground truth system
+- **Network communication**: Handles all blockchain interactions and consensus
+- **Request routing**: Forwards evaluation requests to the Truth container
+
+**Truth**: A container that performs reputation tasks, maintains the state of the model, and responds to reputation requests via a simple Flask application. It fetches data from CoinGecko.
+- **Data source**: CoinGecko API integration for reliable price data
+- **Ground truth provision**: Supplies authoritative data for loss calculations
+- **State management**: Maintains historical data and current market information
+
+**Updater**: A cron-like container designed to periodically trigger the Truth node's data updates.
+- **Automation**: Scheduled updates to maintain data freshness
+- **Data integrity**: Ensures continuous availability of ground truth
+- **System reliability**: Reduces manual intervention requirements
+
+### Example Implementation
+
+**A full working example for a reputer node for ETH price prediction topics is provided in the [`docker-compose.yml` file](https://github.com/allora-network/coin-prediction-reputer/blob/main/docker-compose.yml) of our example repo. Simply run:**
+
+**Repository Benefits**:
+- **Complete implementation**: Ready-to-deploy configuration
+- **Best practices**: Proven architecture and setup patterns
+- **Documentation**: Comprehensive examples and explanations
+- **Community support**: Maintained by the Allora Network team
+
+## Educational Resources
+
+### Video Walkthrough
+
+**Please see the video below to get a full deep-dive on how to deploy a reputer:**
-## Setup a Reputer Node with `docker-compose`
+**Video Learning Benefits**:
+- **Visual guidance**: Step-by-step demonstration of the entire process
+- **Troubleshooting**: Common issues and their solutions
+- **Best practices**: Expert recommendations for optimal setup
+- **Real-time examples**: Live deployment walkthrough
-### Download the Repository
+## Setup Process
+
+### Step 1: Download the Repository
```bash
git clone https://github.com/allora-network/coin-prediction-reputer.git
cd coin-prediction-reputer
```
-## Configure Your Environment
+**Repository Structure**:
+- **Complete codebase**: All necessary files for reputer deployment
+- **Configuration templates**: Pre-built examples for quick setup
+- **Documentation**: Detailed instructions and troubleshooting guides
+- **Docker integration**: Containerized deployment for easy management
+
+### Step 2: Configure Your Environment
1. Copy `config.example.json` and name the copy `config.json`.
2. Open `config.json` and **update** the necessary fields inside the `wallet` sub-object and `worker` config with your specific values:
-### `wallet` Sub-object
+#### Wallet Configuration
+
+**`wallet` Sub-object**:
-1. `nodeRpc`: The [RPC URL](/devs/get-started/setup-wallet#rpc-url-and-chain-id) for the corresponding network the node will be deployed on
-2. `addressKeyName`: The name you gave your wallet key when [setting up your wallet](/devs/get-started/setup-wallet)
-3. `addressRestoreMnemonic`: The mnemonic that was outputted when setting up a new key
+1. **`nodeRpc`**: The [RPC URL](/devs/get-started/quick-start#network-configuration) for the corresponding network the node will be deployed on
+2. **`addressKeyName`**: The name you gave your wallet key when [setting up your wallet](/devs/get-started/quick-start#create-your-wallet)
+3. **`addressRestoreMnemonic`**: The mnemonic that was outputted when setting up a new key
{/*
`addressKeyName` and `addressRestoreMnemonic` are optional parameters. If you did not previously generate keys, keys will be generated for you when [running the node](/devs/workers/deploy-worker/using-docker#generate-keys-and-export-variables).
@@ -44,19 +97,28 @@ cd coin-prediction-reputer
If you have existing keys that you wish to use, you will need to provide these variables.
*/}
-### `reputer` Config
+**Configuration Security**:
+- **Secure storage**: Keep your mnemonic phrase in a safe location
+- **Access control**: Limit file permissions for configuration files
+- **Backup strategy**: Maintain secure backups of wallet information
+
+#### Reputer Configuration
+
+**`reputer` Config**:
-1. `topicId`: The specific topic ID you created the reputer for.
-2. `SourceOfTruthEndpoint`: The endpoint exposed by your source of truth server to provide the truth data to the network.
-3. `Token`: The token for the specific topic you are verifying truth data for. This token should be included in the source of truth endpoint for retrieval.
+1. **`topicId`**: The specific topic ID you created the reputer for.
+2. **`SourceOfTruthEndpoint`**: The endpoint exposed by your source of truth server to provide the truth data to the network.
+3. **`Token`**: The token for the specific topic you are verifying truth data for. This token should be included in the source of truth endpoint for retrieval.
- The `Token` variable is specific to the endpoint you expose in your `main.py` file. It is **not** related to any blockchain parameter and is only locally specific.
-4. `minStake`: The minimum stake required to participate as a reputer. This stake will be deducted from the reputer's wallet balance.
-5. `loopSeconds`: The amount of seconds to wait between attempts to get the next reputer [nonce](devs/topic-creators/topic-life-cycle#nonce)
+4. **`minStake`**: The minimum stake required to participate as a reputer. This stake will be deducted from the reputer's wallet balance.
+5. **`loopSeconds`**: The amount of seconds to wait between attempts to get the next reputer [nonce](devs/topic-creators/topic-life-cycle#nonce)
When placing your minimum stake, the system will verify the amount of funds you have already staked in the topic. If your staked amount is insufficient, it will automatically pull the necessary funds from your wallet to meet the required minimum.
+#### Multi-Topic Support
+
The `reputer` config is an array of sub-objects, each representing a different topic ID. This structure allows you to manage multiple topic IDs, each within its own sub-object.
@@ -88,11 +150,19 @@ To deploy a reputer that provides inferences for multiple topics, you can duplic
```
-## Running the Node
+**Multi-Topic Strategy**:
+- **Resource allocation**: Distribute stakes strategically across topics
+- **Performance monitoring**: Track individual topic performance
+- **Risk management**: Diversify across different prediction categories
+- **Scaling considerations**: Plan for increased computational requirements
+
+## Deployment Process
+
+### Step 3: Running the Node
Now that the node is configured, let's deploy and register it to the network. To run the node, follow these steps:
-### Export Variables
+#### Export Variables
Execute the following command from the root directory:
@@ -115,11 +185,22 @@ before proceeding.
-### Request from Faucet
+**Variable Export Benefits**:
+- **Environment isolation**: Secure separation of configuration data
+- **Docker integration**: Seamless container environment setup
+- **Security enhancement**: Prevents credential exposure in command line
+- **Configuration validation**: Ensures all required parameters are present
+
+#### Request from Faucet
Copy your Allora address and request some tokens from the [Allora Testnet Faucet](https://faucet.testnet.allora.network/) to register your reputer in the next step successfully.
-### Deploy the Node
+**Funding Requirements**:
+- **Minimum stake**: Sufficient tokens for your configured minimum stake
+- **Transaction fees**: Additional tokens for network operations
+- **Operational buffer**: Extra funds for ongoing network participation
+
+#### Deploy the Node
```
docker compose up --build
@@ -127,15 +208,33 @@ docker compose up --build
Both the offchain node and the source services will be started. They will communicate through endpoints attached to the internal DNS.
+**Deployment Process**:
+- **Container orchestration**: All components start in coordinated fashion
+- **Internal networking**: Secure communication between containers
+- **Service discovery**: Automatic endpoint resolution and connection
+- **Health monitoring**: Built-in checks for service availability
+
+## Verification and Monitoring
+
+### Successful Deployment
+
A **successful** response from your Reputer should display:
```bash
{"level":"debug","msg":"Send Reputer Data to chain","txHash":"","time":"","message":"Success"}
```
-Congratulations! You've successfully deployed and registered your node on Allora.
+**Congratulations! You've successfully deployed and registered your node on Allora.**
+
+**Success Indicators**:
+- **Transaction confirmation**: Valid transaction hash in logs
+- **Network registration**: Node appears in network participant lists
+- **Data flow**: Truth service responding to requests properly
+- **Consensus participation**: Active involvement in reputation calculations
-### Keep it updated
+### Data Maintenance
+
+#### Keep it updated
You can keep the state updated by hitting the url:
@@ -147,16 +246,24 @@ where:
- `token-from`: the name of the token on Coingecko naming, e.g. ethereum
- `token-to`: the name of the token on Coingecko naming, e.g. usd
-It is expected that this endpoint is hit periodically, as this is crucial for maintaining the accuracy of the provided ground truth.
+**It is expected that this endpoint is hit periodically, as this is crucial for maintaining the accuracy of the provided ground truth.**
+
+**Update Strategy**:
+- **Scheduled updates**: Implement regular data refresh cycles
+- **Real-time monitoring**: Track data freshness and accuracy
+- **Error handling**: Manage API failures and network issues
+- **Performance optimization**: Balance update frequency with resource usage
+
+## Testing and Validation
-## Testing the Truth Service
+### Testing the Truth Service
-Here we'll setup a reputer with only the "truth service", which fetches the ground truth.
+**Here we'll setup a reputer with only the "truth service", which fetches the ground truth.**
-To only test the truth service, you can simply follow these steps:
+**To only test the truth service, you can simply follow these steps:**
-- Run `docker compose up --build truth` and wait for the initial data load.
-- Requests can now be sent, e.g. ETH price ground truths can be fetched with:
+- **Run `docker compose up --build truth`** and wait for the initial data load.
+- **Requests can now be sent**, e.g. ETH price ground truths can be fetched with:
```
$ curl http://localhost:8000/gt/ETHUSD/1719565747
{"value":"3431.440268842158"}
@@ -164,4 +271,42 @@ To only test the truth service, you can simply follow these steps:
or you can trigger an update to the current ETH price:
```
$ curl http://localhost:8000/update/ETHUSD/ethereum/usd
- ```
\ No newline at end of file
+ ```
+
+**Testing Benefits**:
+- **Component isolation**: Test truth service independently
+- **Data validation**: Verify ground truth accuracy before full deployment
+- **Performance assessment**: Measure response times and reliability
+- **Debugging facilitation**: Identify issues before network integration
+
+## Best Practices
+
+### Operational Considerations
+
+**Reliability Strategies**:
+- **Regular monitoring**: Track all component health and performance
+- **Data backup**: Maintain copies of critical configuration and state data
+- **Update procedures**: Establish processes for software and configuration updates
+- **Error recovery**: Implement robust error handling and recovery mechanisms
+
+### Security Guidelines
+
+**Protection Measures**:
+- **Access control**: Limit network access to reputer endpoints
+- **Credential management**: Secure storage of wallet keys and API credentials
+- **Network security**: Use firewalls and secure communication protocols
+- **Regular updates**: Keep all software components current with security patches
+
+## Prerequisites
+
+- **Docker and Docker Compose**: Container orchestration platform
+- **Network connectivity**: Stable internet connection for API access and blockchain communication
+- **Wallet setup**: Configured Allora wallet with sufficient funds
+- **Technical understanding**: Basic knowledge of containerized applications and blockchain operations
+
+## Next Steps
+
+- [Learn about reputer data querying](/devs/reputers/query-reputer-data) for performance monitoring
+- [Understand stake management](/devs/reputers/set-and-adjust-stake) for optimal network participation
+- [Explore general reputer deployment](/devs/reputers/reputers) for broader implementation options
+- [Study the reputer role](/devs/reputers) for comprehensive understanding of network functions
\ No newline at end of file
diff --git a/pages/devs/reputers/query-reputer-data.mdx b/pages/devs/reputers/query-reputer-data.mdx
index af38d5d..1a08dcc 100644
--- a/pages/devs/reputers/query-reputer-data.mdx
+++ b/pages/devs/reputers/query-reputer-data.mdx
@@ -1,213 +1,351 @@
# How to Query Reputer Data using `allorad`
-Below is a list of commands to understand how to pull information about reputers via [`allorad`](/devs/get-started/cli#installing-allorad):
+## What You'll Learn
+- Essential commands for querying reputer registration, stakes, and performance data
+- How to check delegate stake placements and removal status
+- Understanding reputer authority, listening coefficients, and EMA scores
+- Bulk querying techniques for analyzing multiple reputers
+
+## Overview
+
+**Below is a list of commands to understand how to pull information about reputers via [`allorad`](/devs/get-started/quick-start#install-the-allora-cli):**
+
+### Why Query Reputer Data?
+
+Reputer data queries help you:
+- **Monitor performance**: Track reputer scores and activity levels
+- **Manage delegations**: Evaluate reputer effectiveness before staking
+- **Analyze network**: Understand stake distribution and reputer authority
+- **Track operations**: Monitor stake placements, removals, and rewards
## Prerequisites
-- [`allorad` CLI](/devs/get-started/cli)
-- A basic understanding of the Allora Network
+- **[`allorad` CLI](/devs/get-started/quick-start#install-the-allora-cli)**: Command-line interface for Allora Network
+- **A basic understanding of the Allora Network**: Familiarity with reputers, stakes, and topics
+- **Access to RPC endpoints**: Network connectivity to query blockchain data
+- **Reputer addresses**: Know the specific reputer addresses you want to query
-## Query Functions
+## Query Command Structure
These functions read from the appchain only and do not write. Add the **Command** value into your query to retrieve the expected data.
+**Base Query Format**:
```bash
allorad q emissions [Command] --node
```
-## Check if Reputer is Registered in a Topic
+**Command Components**:
+- **`allorad q emissions`**: Base query structure for emissions module
+- **`[Command]`**: Specific function to execute (see individual commands below)
+- **`--node `**: RPC endpoint for network connection
+
+## Registration and Status Commands
+### Check if Reputer is Registered in a Topic
+
+**Query Details**:
- **RPC Method:** `IsReputerRegisteredInTopicId`
- **Command:** `is-reputer-registered [topic_id] [address]`
- **Description:** Checks whether a reputer is registered in a specific topic. Returns `true` if the reputer is registered in the given topic, and `false` otherwise.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic where you want to check the reputer’s registration status.
- - `address`: The address of the reputer you want to check.
-### Use Case:
+**Positional Arguments:**
+- **`topic_id`**: The identifier of the topic where you want to check the reputer's registration status.
+- **`address`**: The address of the reputer you want to check.
+
+#### Use Case
+
**Why use it?**
- This command is essential for verifying whether a reputer is properly registered in a specific topic before submitting reputation-related data or participating in topic-related activities.
**Example Scenario:**
- Before a reputer attempts to evaluate workers or participate in consensus, you can confirm if they are registered to the relevant topic, ensuring their eligibility for participation.
-## Check Reputer Stake in a Topic
+**Practical Applications**:
+- Pre-delegation verification for delegators
+- Reputer eligibility validation before participation
+- Network compliance checking
+
+## Stake Analysis Commands
+### Check Reputer Stake in a Topic
+
+**Query Details**:
- **RPC Method:** `GetReputerStakeInTopic`
- **Command:** `stake-in-topic-reputer [address] [topic_id]`
- **Description:** Retrieves the stake a reputer has in a specific topic, including any stake that has been delegated to them.
-- **Positional Arguments:**
- - `address`: The address of the reputer whose stake is being queried.
- - `topic_id`: The identifier of the topic.
-### Use Case:
+**Positional Arguments:**
+- **`address`**: The address of the reputer whose stake is being queried.
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- This command is essential for understanding the total stake a reputer holds in a specific topic, including delegated stake, which is important for determining their influence.
**Example Scenario:**
- Before delegating more stake, you may want to check how much stake a reputer already has in a particular topic.
----
+**Practical Applications**:
+- Stake distribution analysis
+- Reputer influence assessment
+- Delegation strategy planning
-## Get Total Delegate Stake in a Reputer for a Topic
+### Get Total Delegate Stake in a Reputer for a Topic
+**Query Details**:
- **RPC Method:** `GetDelegateStakeInTopicInReputer`
- **Command:** `stake-total-delegated-in-topic-reputer [reputer_address] [topic_id]`
- **Description:** Retrieves the total amount of stake delegated to a reputer for a specific topic.
-- **Positional Arguments:**
- - `reputer_address`: The address of the reputer.
- - `topic_id`: The identifier of the topic.
-### Use Case:
+**Positional Arguments:**
+- **`reputer_address`**: The address of the reputer.
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- This command provides insight into how much stake has been delegated to a reputer for a given topic, which can impact their role in network consensus.
**Example Scenario:**
- As a delegator, you may want to see how much stake has already been delegated to a reputer before deciding to contribute more.
----
+**Practical Applications**:
+- Delegation saturation analysis
+- Reputer popularity assessment
+- Risk distribution evaluation
+
+## Delegation Management Commands
-## Get Stake Delegated to a Reputer
+### Get Stake Delegated to a Reputer
+**Query Details**:
- **RPC Method:** `GetDelegateStakePlacement`
- **Command:** `delegate-stake-placement [topic_id] [delegator] [target]`
- **Description:** Retrieves the amount of tokens delegated to a specific reputer by a given delegator for a topic.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
- - `delegator`: The address of the delegator.
- - `target`: The address of the target reputer.
-### Use Case:
+**Positional Arguments:**
+- **`topic_id`**: The identifier of the topic.
+- **`delegator`**: The address of the delegator.
+- **`target`**: The address of the target reputer.
+
+#### Use Case
+
**Why use it?**
- Use this command to track how much stake a delegator has assigned to a particular reputer in a specific topic.
**Example Scenario:**
- A delegator can check the exact amount of tokens they have staked on a specific reputer within a topic.
----
+**Practical Applications**:
+- Personal delegation tracking
+- Portfolio management for delegators
+- Stake allocation verification
-## Get Removed Delegated Stake from a Reputer
+### Get Removed Delegated Stake from a Reputer
+**Query Details**:
- **RPC Method:** `GetDelegateStakeRemoval`
- **Command:** `delegate-stake-removal [block_height] [topic_id] [delegator] [reputer]`
- **Description:** Retrieves the current state of a pending delegate stake removal for a delegator in a topic.
-- **Positional Arguments:**
- - `block_height`: The block height at which the removal is pending.
- - `topic_id`: The identifier of the topic.
- - `delegator`: The address of the delegator.
- - `reputer`: The address of the target reputer.
-#### Use Case:
+**Positional Arguments:**
+- **`block_height`**: The block height at which the removal is pending.
+- **`topic_id`**: The identifier of the topic.
+- **`delegator`**: The address of the delegator.
+- **`reputer`**: The address of the target reputer.
+
+#### Use Case
+
**Why use it?**
- This command helps track pending removals of delegated stake, ensuring visibility into the process of un-staking tokens from a reputer.
**Example Scenario:**
- A delegator can check the status of their pending delegate stake removal request.
----
+**Practical Applications**:
+- Unstaking process monitoring
+- Liquidity planning for delegators
+- Pending operation tracking
-## Get Total Stake Delegated to a Reputer
+### Get Total Stake Delegated to a Reputer
+**Query Details**:
- **RPC Method:** `GetDelegateStakeUponReputer`
- **Command:** `delegate-stake-on-reputer [topic_id] [target]`
- **Description:** Retrieves the total amount of tokens delegated to a reputer in a specific topic.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
- - `target`: The address of the target reputer.
-### Use Case:
+**Positional Arguments:**
+- **`topic_id`**: The identifier of the topic.
+- **`target`**: The address of the target reputer.
+
+#### Use Case
+
**Why use it?**
- This command provides insight into the total delegated stake a reputer has accumulated in a given topic, which impacts their standing in the network.
**Example Scenario:**
- You may want to know how much stake has been assigned to a reputer before deciding to interact with them in the topic.
----
+**Practical Applications**:
+- Reputer evaluation for delegators
+- Network influence analysis
+- Consensus participation assessment
-## Get Reputer's Latest Score in a Topic
+## Performance Analysis Commands
+### Get Reputer's Latest Score in a Topic
+
+**Query Details**:
- **RPC Method:** `GetReputerScoreEma`
- **Command:** `reputer-score-ema [topic_id] [reputer]`
- **Description:** Returns the latest Exponential Moving Average (EMA) score for a reputer in a specific topic.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
- - `reputer`: The address of the reputer.
-### Use Case:
+**Positional Arguments:**
+- **`topic_id`**: The identifier of the topic.
+- **`reputer`**: The address of the reputer.
+
+#### Use Case
+
**Why use it?**
- This command allows you to track the latest performance score of a reputer, giving insight into their effectiveness within the network.
**Example Scenario:**
- Before delegating stake, you may want to see how well a reputer is performing in terms of their most recent EMA score.
----
+**Practical Applications**:
+- Performance-based delegation decisions
+- Reputer effectiveness tracking
+- Quality assessment for network participation
+
+### Get Listening Coefficient for a Reputer
+
+**Query Details**:
+- **RPC Method:** `GetListeningCoefficient`
+- **Command:** `listening-coefficient [topic_id] [reputer]`
+- **Description:** Returns the current [listening coefficient](/home/layers/consensus/reputers#solution-adjusted-stake) for a given reputer in a specific topic. The coefficient measures how much a reputer is "listening" or interacting with the network. If no coefficient exists, it defaults to `1`.
+
+**Positional Arguments:**
+- **`topic_id`**: The identifier of the topic.
+- **`reputer`**: The address of the reputer whose listening coefficient is being queried.
+
+#### Use Case
+
+**Why use it?**
+- This command is useful to determine how actively a reputer is interacting with a topic. The listening coefficient reflects how engaged the reputer is in the network's consensus and decision-making process.
+
+**Example Scenario:**
+- As a delegator, you may want to check the listening coefficient of a reputer before deciding to delegate stake to them, ensuring they are actively participating in the topic.
+
+**Practical Applications**:
+- Activity level assessment
+- Network engagement verification
+- Delegation quality control
-## Get Reputer's Stake Removal Information
+## Authority and Stake Management Commands
+### Get Reputer's Stake Removal Information
+
+**Query Details**:
- **RPC Method:** `GetStakeRemovalForReputerAndTopicId`
- **Command:** `stake-removal [reputer] [topic_id]`
- **Description:** Retrieves information about a pending stake removal request for a reputer in a specific topic.
-- **Positional Arguments:**
- - `reputer`: The address of the reputer.
- - `topic_id`: The identifier of the topic.
-### Use Case:
+**Positional Arguments:**
+- **`reputer`**: The address of the reputer.
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- Use this command to check the details of any pending stake removal for a reputer in a topic.
**Example Scenario:**
-- You can track the status of a reputer’s pending stake removal request in the network.
+- You can track the status of a reputer's pending stake removal request in the network.
----
+**Practical Applications**:
+- Stake operation monitoring
+- Reputer commitment tracking
+- Network participation planning
-## Get Total Stake Delegated to a Reputer
+### Get Total Stake Delegated to a Reputer
+**Query Details**:
- **RPC Method:** `GetStakeReputerAuthority`
- **Command:** `reputer-authority [topic_id] [reputer]`
- **Description:** Retrieves the total stake a reputer holds in a topic, including both their own stake and delegated stake.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
- - `reputer`: The address of the reputer.
-### Use Case:
+**Positional Arguments:**
+- **`topic_id`**: The identifier of the topic.
+- **`reputer`**: The address of the reputer.
+
+#### Use Case
+
**Why use it?**
- This command provides a complete view of a reputer's stake in a topic, combining both self-stake and delegated stake, which influences their standing in the network.
**Example Scenario:**
- Before interacting with a reputer in a topic, you may want to see their total stake, including how much has been delegated to them.
----
-
-## Get Listening Coefficient for a Reputer
+**Practical Applications**:
+- Complete authority assessment
+- Network influence evaluation
+- Consensus weight calculation
-- **RPC Method:** `GetListeningCoefficient`
-- **Command:** `listening-coefficient [topic_id] [reputer]`
-- **Description:** Returns the current [listening coefficient](/home/layers/consensus/reputers#solution-adjusted-stake) for a given reputer in a specific topic. The coefficient measures how much a reputer is "listening" or interacting with the network. If no coefficient exists, it defaults to `1`.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
- - `reputer`: The address of the reputer whose listening coefficient is being queried.
+## Bulk Analysis Commands
-### Use Case:
-**Why use it?**
-- This command is useful to determine how actively a reputer is interacting with a topic. The listening coefficient reflects how engaged the reputer is in the network's consensus and decision-making process.
-
-**Example Scenario:**
-- As a delegator, you may want to check the listening coefficient of a reputer before deciding to delegate stake to them, ensuring they are actively participating in the topic.
-
----
-
-## Get Multiple Reputers' Stakes in a Topic
+### Get Multiple Reputers' Stakes in a Topic
+**Query Details**:
- **RPC Method:** `GetMultiReputerStakeInTopic`
- **Command:** `multi-reputer-stake [addresses] [topic_id]`
- **Description:** Retrieves the stakes for each reputer in a given list of addresses for a specific topic. The list can contain up to the `MaxPageLimit` number of addresses. If a reputer does not exist, their stake is defaulted to 0.
-- **Positional Arguments:**
- - `addresses`: A list of reputer addresses whose stakes you want to retrieve.
- - `topic_id`: The identifier of the topic.
-### Use Case:
+**Positional Arguments:**
+- **`addresses`**: A list of reputer addresses whose stakes you want to retrieve.
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- This command allows you to query the stakes of multiple reputers in a specific topic in a single request, making it useful for bulk operations or analysis.
**Example Scenario:**
- You want to check the stakes of a list of reputers for a specific topic to compare their authority and influence in the topic.
+
+**Practical Applications**:
+- Comparative analysis of multiple reputers
+- Batch processing for large-scale operations
+- Network-wide stake distribution analysis
+
+## Common Use Cases
+
+### Delegation Strategy Development
+
+**Assessment Workflow**:
+1. **Check registration**: Use `is-reputer-registered` to verify eligibility
+2. **Analyze performance**: Use `reputer-score-ema` to assess effectiveness
+3. **Evaluate activity**: Use `listening-coefficient` to check engagement
+4. **Review authority**: Use `reputer-authority` to understand total stake
+
+### Portfolio Management
+
+**Monitoring Activities**:
+- Track individual delegation amounts with `delegate-stake-placement`
+- Monitor total delegated stakes with `delegate-stake-on-reputer`
+- Check pending operations with `delegate-stake-removal`
+- Assess overall reputer performance across topics
+
+### Network Analysis
+
+**Research Applications**:
+- Compare multiple reputers using `multi-reputer-stake`
+- Analyze stake distribution patterns across topics
+- Study reputer behavior and performance trends
+- Identify high-performing reputers for delegation
+
+## Next Steps
+
+- [Learn how to set and adjust stake](/devs/reputers/set-and-adjust-stake) for optimal participation
+- [Explore reputer deployment](/devs/reputers/reputers) to become a network participant
+- [Understand reputer economics](/home/layers/consensus/reputers) for strategic decisions
diff --git a/pages/devs/reputers/reputers.mdx b/pages/devs/reputers/reputers.mdx
index 4f526a2..809583f 100644
--- a/pages/devs/reputers/reputers.mdx
+++ b/pages/devs/reputers/reputers.mdx
@@ -2,7 +2,31 @@ import { Callout } from 'nextra/components'
# Deploy a Reputer Node using Docker
-Deploying a reputer in the Allora network involves configuring the `config.example.json` file to ensure your reputer can interact with the network and provide accurate truth data.
+## What You'll Learn
+- How to deploy and configure a reputer node for the Allora Network
+- Understanding reputer configuration including wallet and endpoint setup
+- Creating a truth server to provide accurate ground truth data
+- Complete deployment workflow from setup to network registration
+
+## Overview
+
+**Deploying a reputer in the Allora network involves configuring the `config.example.json` file to ensure your reputer can interact with the network and provide accurate truth data.**
+
+### What is a Reputer?
+
+Reputers serve critical functions in the Allora Network:
+- **Quality assurance**: Evaluate the accuracy of worker submissions
+- **Ground truth provision**: Supply reliable truth data for comparison
+- **Network integrity**: Maintain the quality and reliability of network inferences
+- **Reward determination**: Help calculate appropriate rewards for workers
+
+### Why Deploy a Reputer?
+
+Running a reputer provides:
+- **Network contribution**: Help maintain network quality and accuracy
+- **Reward opportunities**: Earn rewards for providing valuable evaluation services
+- **Data validation**: Contribute to the verification of network predictions
+- **Ecosystem participation**: Play a key role in network governance and quality
To build this setup, please follow these steps:
@@ -10,11 +34,13 @@ To build this setup, please follow these steps:
Ensure you have the following installed on your machine:
-- Git
-- Go (version 1.16 or later)
-- Docker
+- **Git**: Version control system for repository management
+- **Go (version 1.16 or later)**: Programming language runtime
+- **Docker**: Container platform for application deployment
+
+## Installation and Setup
-## Clone the `allora-offchain-node` Repository
+### Step 1: Clone the Repository
Download the `allora-offchain-node` git repo:
@@ -23,16 +49,25 @@ git clone https://github.com/allora-network/allora-offchain-node
cd allora-offchain-node
```
-## Configure Your Environment
+**Repository Contents**:
+- Pre-configured reputer node implementation
+- Example configuration files for easy setup
+- Sample truth server code and Docker configurations
+
+## Configuration
+
+### Step 2: Configure Your Environment
1. Copy `config.example.json` and name the copy `config.json`.
2. Open `config.json` and **update** the necessary fields inside the `wallet` sub-object and `worker` config with your specific values:
-### `wallet` Sub-object
+#### Wallet Configuration
-1. `nodeRpc`: The [RPC URL](/devs/get-started/setup-wallet#rpc-url-and-chain-id) for the corresponding network the node will be deployed on
-2. `addressKeyName`: The name you gave your wallet key when [setting up your wallet](/devs/get-started/setup-wallet)
-3. `addressRestoreMnemonic`: The mnemonic that was outputted when setting up a new key
+**`wallet` Sub-object**:
+
+1. **`nodeRpc`**: The [RPC URL](/devs/get-started/quick-start#network-configuration) for the corresponding network the node will be deployed on
+2. **`addressKeyName`**: The name you gave your wallet key when [setting up your wallet](/devs/get-started/quick-start#create-your-wallet)
+3. **`addressRestoreMnemonic`**: The mnemonic that was outputted when setting up a new key
{/*
`addressKeyName` and `addressRestoreMnemonic` are optional parameters. If you did not previously generate keys, keys will be generated for you when [running the node](/devs/workers/deploy-worker/using-docker#generate-keys-and-export-variables).
@@ -40,18 +75,22 @@ cd allora-offchain-node
If you have existing keys that you wish to use, you will need to provide these variables.
*/}
-### `reputer` Config
+#### Reputer Configuration
+
+**`reputer` Config**:
-1. `topicId`: The specific topic ID you created the reputer for.
-2. `SourceOfTruthEndpoint`: The endpoint exposed by your source of truth server to provide the truth data to the network.
-3. `Token`: The token for the specific topic you are verifying truth data for. This token should be included in the source of truth endpoint for retrieval.
+1. **`topicId`**: The specific topic ID you created the reputer for.
+2. **`SourceOfTruthEndpoint`**: The endpoint exposed by your source of truth server to provide the truth data to the network.
+3. **`Token`**: The token for the specific topic you are verifying truth data for. This token should be included in the source of truth endpoint for retrieval.
- The `Token` variable is specific to the endpoint you expose in your `main.py` file. It is **not** related to any blockchain parameter and is only locally specific.
-4. `minStake`: The minimum stake required to participate as a reputer. This stake will be deducted from the reputer's wallet balance.
+4. **`minStake`**: The minimum stake required to participate as a reputer. This stake will be deducted from the reputer's wallet balance.
When placing your minimum stake, the system will verify the amount of funds you have already staked in the topic. If your staked amount is insufficient, it will automatically pull the necessary funds from your wallet to meet the required minimum.
+#### Multi-Topic Support
+
The `reputer` config is an array of sub-objects, each representing a different topic ID. This structure allows you to manage multiple topic IDs, each within its own sub-object.
@@ -83,32 +122,33 @@ To deploy a reputer that provides inferences for multiple topics, you can duplic
```
-### Worker Config
+#### Worker Configuration (Optional)
-The `config.example.json` file that was copied and edited in the previous steps also contains a JSON object for configuring and deploying a [worker](/devs/workers/using-docker). To ignore the worker and only deploy a reputer, delete the reputer sub-object from the `config.json` file.
+**Worker Config**: The `config.example.json` file that was copied and edited in the previous steps also contains a JSON object for configuring and deploying a [worker](/devs/workers/using-docker). To ignore the worker and only deploy a reputer, delete the reputer sub-object from the `config.json` file.
-## Create the Truth Server
+## Truth Server Implementation
-### Prepare the API Gateway
+### Step 3: Create the Truth Server
-Ensure you have an API gateway or server that can accept API requests to call your model.
+#### Prepare the API Gateway
-### Server Responsibilities
+Ensure you have an API gateway or server that can accept API requests to call your model.
+**Server Responsibilities**:
- Accept API requests from `main.go`.
- Respond with the corresponding inference obtained from the model.
-### Truth Relay
+#### Truth Relay Architecture
Below is a sample structure of what your `main.go`, `main.py` and Dockerfile will look like. You can also find a working example [here](https://github.com/allora-network/basic-coin-prediction-node).
-#### `main.go`
+##### `main.go` Component
`allora-offchain-node` comes preconfigured with a `main.go` file inside the [`adapter/api-worker-reputer` folder](https://github.com/allora-network/allora-offchain-node/blob/dev/adapter/api-worker-reputer/main.go).
-The `main.go` file fetches the responses outputted from the Source of Truth Endpoint based on the `SourceOfTruthEndpoint` and `Token` provided in the section above.
+**Function**: The `main.go` file fetches the responses outputted from the Source of Truth Endpoint based on the `SourceOfTruthEndpoint` and `Token` provided in the section above.
-#### `main.py`
+##### `main.py` Implementation
`allora-offchain-node` comes preconfigured with a Flask application that uses a `main.py` file to expose the Source of Truth Endpoint.
@@ -133,7 +173,7 @@ if __name__ == '__main__':
The source of truth in `allora-offchain-node` is barebones and outputs a random integer. Follow the source of truth built in [`coin-prediction-reputer`](/devs/reputers/coin-prediction-reputer) as an example for a reputer that uses CoinGecko to fetch price data.
-#### `Dockerfile`
+##### Docker Configuration
A sample Dockerfile has been created in `allora-offchain-node` that can be used to deploy your model on port 8000.
@@ -155,11 +195,13 @@ EXPOSE 8000
CMD ["python", "main.py"]
```
-## Running the Node
+## Deployment Process
+
+### Step 4: Running the Node
Now that the node is configured, let's deploy and register it to the network. To run the node, follow these steps:
-### Export Variables
+#### Export Variables
Execute the following command from the root directory:
@@ -182,11 +224,16 @@ before proceeding.
-### Request from Faucet
+#### Request from Faucet
Copy your Allora address and request some tokens from the [Allora Testnet Faucet](https://faucet.testnet.allora.network/) to register your worker in the next step successfully.
-### Deploy the Node
+**Funding Requirements**:
+- Sufficient tokens for transaction fees
+- Minimum stake amount as configured in your reputer settings
+- Additional buffer for ongoing operations
+
+#### Deploy the Node
```
docker compose up --build
@@ -194,14 +241,51 @@ docker compose up --build
Both the offchain node and the source services will be started. They will communicate through endpoints attached to the internal DNS.
+**Deployment Components**:
+- Offchain reputer node for network communication
+- Truth server for providing ground truth data
+- Internal service discovery and communication
+
+## Verification
+
+### Successful Deployment
+
A **successful** response from your Reputer should display:
```bash
{"level":"debug","msg":"Send Reputer Data to chain","txHash":"","time":"","message":"Success"}
```
-Congratulations! You've successfully deployed and registered your node on Allora.
+**Success Indicators**:
+- Transaction hash appears in logs
+- No persistent error messages
+- Truth server responds to requests
+- Stake properly allocated to chosen topics
+
+**Congratulations! You've successfully deployed and registered your node on Allora.**
+
+## Advanced Configuration
+
+### Performance Optimization
+
+**Resource Management**:
+- Monitor container resource usage
+- Adjust stake amounts based on performance
+- Optimize truth data retrieval for faster responses
+
+### Multi-Topic Strategy
+
+**Scaling Considerations**:
+- Balance stake across multiple topics
+- Monitor relative performance and adjust allocations
+- Consider computational requirements for multiple truth sources
## Learn More
-Learn more by directly checking out the code and README for [the example ETH price reputer](https://github.com/allora-network/coin-prediction-reputer).
\ No newline at end of file
+Learn more by directly checking out the code and README for [the example ETH price reputer](https://github.com/allora-network/coin-prediction-reputer).
+
+## Next Steps
+
+- [Explore advanced reputer configurations](/devs/reputers/coin-prediction-reputer) with real market data
+- [Learn about stake management](/devs/reputers/set-and-adjust-stake) for optimal participation
+- [Understand reputer data querying](/devs/reputers/query-reputer-data) for performance monitoring
\ No newline at end of file
diff --git a/pages/devs/reputers/set-and-adjust-stake.mdx b/pages/devs/reputers/set-and-adjust-stake.mdx
index 4e19368..7f6ce73 100644
--- a/pages/devs/reputers/set-and-adjust-stake.mdx
+++ b/pages/devs/reputers/set-and-adjust-stake.mdx
@@ -1,153 +1,287 @@
# Set and Adjust Stake
-> We define stake, motivate its use, and demonstrate how it can be adjusted
+## What You'll Learn
+- Understanding how stake works for reputers and its role in network security
+- Complete guide to adding, removing, and canceling stake operations
+- Delegation management including staking to reputers and claiming rewards
+- Strategic considerations for optimal stake allocation and risk management
-## How Stake works for Reputers
+## Overview
-[Stake](/home/key-terms#stake) is used to signal confidence. A reputer earns more rewards based on their accuracy comparative to consensus (the other reputers providing data for a topic) and stake.
+**We define stake, motivate its use, and demonstrate how it can be adjusted.**
-Stake also protects Allora from malicious behavior, such as sybil attacks. We require all types of nodes to register on the chain before they can earn any rewards. Registering requires staking at least a minimum amount of ALLO. As a result, creating an army of malicious nodes would quickly become prohibitively expensive.
+### How Stake works for Reputers
+
+**[Stake](/home/key-terms#stake) is used to signal confidence.** A reputer earns more rewards based on their accuracy comparative to consensus (the other reputers providing data for a topic) and stake.
+
+### Why Stake Matters
+
+**Stake also protects Allora from malicious behavior, such as sybil attacks.** We require all types of nodes to register on the chain before they can earn any rewards. Registering requires staking at least a minimum amount of ALLO. As a result, creating an army of malicious nodes would quickly become prohibitively expensive.
+
+**Stake Functions**:
+- **Confidence signaling**: Higher stake indicates stronger commitment to accuracy
+- **Reward calculation**: Stake influences reward distribution based on performance
+- **Security mechanism**: Prevents spam and malicious behavior through economic cost
+- **Network governance**: Stake provides voting power in network decisions
## Prerequisites
-- [`allorad` CLI](/devs/get-started/cli)
+- **[`allorad` CLI](/devs/get-started/quick-start#install-the-allora-cli)**: Command-line interface for network interactions
+- **Funded wallet**: Sufficient ALLO tokens for staking operations
+- **Understanding of topics**: Knowledge of which topics to participate in
+- **Risk assessment**: Evaluation of stake amounts and delegation strategies
+
+## Command Structure
-## Tx Functions
+### Tx Functions
These functions read from the appchain only and do not write. Add the **Command** value into your query to retrieve the expected data.
+**Base Transaction Format**:
```bash
allorad tx emissions [Command] --node
```
-## Add Stake to Self
+**Important Notes**:
+- All stake operations require gas fees
+- Stake changes may have unbonding periods
+- Always verify transaction success before proceeding
+## Self-Stake Management
+
+### Add Stake to Self
+
+**Transaction Details**:
- **RPC Method:** `AddStake`
- **Command:** `add-stake [sender] [topic_id] [amount]`
- **Description:** Adds stake to the sender for a specific topic.
-- **Positional Arguments:**
- - `sender`: The address of the sender adding stake.
- - `topic_id`: The identifier of the topic.
- - `amount`: The amount of stake to be added.
-### Use Case:
+**Positional Arguments**:
+- **`sender`**: The address of the sender adding stake.
+- **`topic_id`**: The identifier of the topic.
+- **`amount`**: The amount of stake to be added.
+
+#### Use Case
+
**Why use it?**
- This command is used when a reputer or worker wants to increase their stake in a specific topic, increasing their influence or authority.
**Example Scenario:**
- As a reputer, you want to increase your stake in a specific topic to gain more influence and improve your reputation scores.
----
+**Strategic Considerations**:
+- Higher stake increases potential rewards but also increases risk
+- Consider topic competitiveness when determining stake amounts
+- Monitor network performance to time stake additions optimally
-## Remove Stake from Self
+### Remove Stake from Self
+**Transaction Details**:
- **RPC Method:** `RemoveStake`
- **Command:** `remove-stake [sender] [topic_id] [amount]`
- **Description:** Removes stake from the sender (a reputer) in a specific topic.
-- **Positional Arguments:**
- - `sender`: The address of the sender removing stake (reputer).
- - `topic_id`: The identifier of the topic.
- - `amount`: The amount of stake to be removed.
-### Use Case:
+**Positional Arguments**:
+- **`sender`**: The address of the sender removing stake (reputer).
+- **`topic_id`**: The identifier of the topic.
+- **`amount`**: The amount of stake to be removed.
+
+#### Use Case
+
**Why use it?**
- This command is used by reputers to reduce their stake in a topic, either for liquidity purposes or when their role in the topic has changed.
**Example Scenario:**
- A reputer wants to reduce their stake in a topic after completing their contributions and being satisfied with the rewards.
----
+**Important Considerations**:
+- Stake removal may have unbonding periods
+- Reduced stake means lower potential rewards
+- Consider market conditions before removing stake
-## Cancel Pending Stake Removal (Reputer)
+### Cancel Pending Stake Removal (Reputer)
+**Transaction Details**:
- **RPC Method:** `CancelRemoveStake`
- **Command:** `cancel-remove-stake [sender] [topic_id]`
- **Description:** Cancels the removal of stake that is pending for the sender (a reputer) in a topic.
-- **Positional Arguments:**
- - `sender`: The address of the sender canceling the stake removal (reputer).
- - `topic_id`: The identifier of the topic.
-### Use Case:
+**Positional Arguments**:
+- **`sender`**: The address of the sender canceling the stake removal (reputer).
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- This command allows reputers to cancel a stake removal request if they change their mind and wish to keep their stake in the topic.
**Example Scenario:**
- A reputer wants to cancel their stake removal request because they decide to maintain their position in the topic for an additional epoch.
----
+**Practical Applications**:
+- Market condition changes that make continued participation attractive
+- Improved topic performance that warrants continued involvement
+- Strategic repositioning based on network developments
-## Delegate Stake to a Reputer
+## Delegation Operations
+### Delegate Stake to a Reputer
+
+**Transaction Details**:
- **RPC Method:** `DelegateStake`
- **Command:** `delegate-stake [sender] [topic_id] [reputer] [amount]`
- **Description:** Delegates stake from the sender to a specific reputer for a topic.
-- **Positional Arguments:**
- - `sender`: The address of the sender (delegator).
- - `topic_id`: The identifier of the topic.
- - `reputer`: The address of the reputer receiving the delegated stake.
- - `amount`: The amount of stake to be delegated.
-### Use Case:
+**Positional Arguments**:
+- **`sender`**: The address of the sender (delegator).
+- **`topic_id`**: The identifier of the topic.
+- **`reputer`**: The address of the reputer receiving the delegated stake.
+- **`amount`**: The amount of stake to be delegated.
+
+#### Use Case
+
**Why use it?**
- This command is used by delegators to delegate their stake to a reputer, giving the reputer more authority and influence within a specific topic.
**Example Scenario:**
- As a delegator, you want to support a reputer you trust by delegating your tokens to them for a particular topic.
----
+**Delegation Strategy**:
+- **Due diligence**: Research reputer performance and reliability
+- **Diversification**: Consider spreading delegations across multiple reputers
+- **Performance monitoring**: Track delegated reputer effectiveness regularly
+- **Risk management**: Balance potential rewards with delegation risks
-## Remove Delegated Stake from a Reputer
+### Remove Delegated Stake from a Reputer
+**Transaction Details**:
- **RPC Method:** `RemoveDelegateStake`
- **Command:** `remove-delegate-stake [sender] [topic_id] [reputer] [amount]`
- **Description:** Removes delegated stake from a reputer for a topic.
-- **Positional Arguments:**
- - `sender`: The address of the sender (delegator).
- - `topic_id`: The identifier of the topic.
- - `reputer`: The address of the reputer whose delegated stake is being removed.
- - `amount`: The amount of stake to be removed.
-### Use Case:
+**Positional Arguments**:
+- **`sender`**: The address of the sender (delegator).
+- **`topic_id`**: The identifier of the topic.
+- **`reputer`**: The address of the reputer whose delegated stake is being removed.
+- **`amount`**: The amount of stake to be removed.
+
+#### Use Case
+
**Why use it?**
- This command is used when a delegator wants to withdraw or reduce the stake they have delegated to a reputer in a topic.
**Example Scenario:**
- A delegator wants to reduce their stake delegated to a reputer after reassessing the reputer's performance in a topic.
----
+**Considerations for Removal**:
+- Monitor reputer performance before removing stake
+- Consider alternative reputers for reallocation
+- Account for unbonding periods in liquidity planning
-## Cancel Pending Delegated Stake Removal
+### Cancel Pending Delegated Stake Removal
+**Transaction Details**:
- **RPC Method:** `CancelRemoveDelegateStake`
- **Command:** `cancel-remove-delegate-stake [sender] [topic_id] [reputer]`
- **Description:** Cancels the removal of delegated stake for a delegator staking on a reputer in a topic.
-- **Positional Arguments:**
- - `sender`: The address of the sender (delegator).
- - `topic_id`: The identifier of the topic.
- - `reputer`: The address of the reputer whose delegated stake removal is being canceled.
-### Use Case:
+**Positional Arguments**:
+- **`sender`**: The address of the sender (delegator).
+- **`topic_id`**: The identifier of the topic.
+- **`reputer`**: The address of the reputer whose delegated stake removal is being canceled.
+
+#### Use Case
+
**Why use it?**
- This command allows delegators to cancel a delegated stake removal request if they change their mind and want to keep their stake with the reputer.
**Example Scenario:**
- A delegator decides to cancel their pending stake removal and continue supporting the reputer in the topic.
----
+**When to Cancel**:
+- Improved reputer performance during unbonding period
+- Changed market conditions that favor continued delegation
+- Strategic reallocation decisions that no longer apply
-## Claim Rewards for Delegated Stake
+## Reward Management
+### Claim Rewards for Delegated Stake
+
+**Transaction Details**:
- **RPC Method:** `RewardDelegateStake`
- **Command:** `reward-delegate-stake [sender] [topic_id] [reputer]`
- **Description:** Claims the rewards for a delegator who has delegated stake to a reputer in a specific topic.
-- **Positional Arguments:**
- - `sender`: The address of the sender (delegator).
- - `topic_id`: The identifier of the topic.
- - `reputer`: The address of the reputer to whom the stake was delegated.
-### Use Case:
+**Positional Arguments**:
+- **`sender`**: The address of the sender (delegator).
+- **`topic_id`**: The identifier of the topic.
+- **`reputer`**: The address of the reputer to whom the stake was delegated.
+
+#### Use Case
+
**Why use it?**
- This command is used by delegators to claim their rewards based on the performance of the reputer they delegated stake to.
**Example Scenario:**
- A delegator wants to claim their rewards for a topic after their reputer has successfully contributed to the topic's outcomes.
+
+**Reward Optimization**:
+- **Timing**: Claim rewards regularly to compound returns
+- **Reinvestment**: Consider restaking rewards for increased future returns
+- **Performance tracking**: Monitor reward rates to assess delegation effectiveness
+- **Tax considerations**: Keep records of reward claims for reporting purposes
+
+## Best Practices
+
+### Stake Management Strategy
+
+**Risk Assessment**:
+- **Start small**: Begin with modest stake amounts to understand dynamics
+- **Diversify**: Spread stakes across multiple topics and reputers
+- **Monitor performance**: Track reputer effectiveness and adjust accordingly
+- **Stay informed**: Keep up with network updates and parameter changes
+
+### Delegation Guidelines
+
+**Reputer Selection Criteria**:
+- **Performance history**: Review past accuracy and consistency
+- **Activity level**: Ensure regular participation and responsiveness
+- **Stake commitment**: Look for reputers with significant self-stake
+- **Communication**: Prefer reputers who communicate with delegators
+
+### Operational Considerations
+
+**Transaction Management**:
+- **Gas costs**: Factor transaction fees into stake calculations
+- **Timing**: Consider network congestion when submitting transactions
+- **Confirmation**: Always verify transaction success before proceeding
+- **Record keeping**: Maintain detailed records of all stake operations
+
+## Common Workflows
+
+### New Reputer Setup
+
+1. **Initial stake**: Use `add-stake` to establish minimum required stake
+2. **Performance building**: Focus on accuracy to attract delegators
+3. **Stake optimization**: Gradually increase stake based on performance
+4. **Community building**: Engage with potential delegators
+
+### Delegator Portfolio Management
+
+1. **Research**: Analyze reputer performance across topics
+2. **Initial delegation**: Use `delegate-stake` for promising reputers
+3. **Monitoring**: Track performance and adjust delegations
+4. **Reward claiming**: Regularly use `reward-delegate-stake` for returns
+
+### Risk Management
+
+1. **Performance monitoring**: Regularly assess reputer effectiveness
+2. **Stake adjustment**: Use removal commands when necessary
+3. **Diversification**: Maintain balanced portfolio across reputers
+4. **Emergency procedures**: Know how to quickly cancel operations if needed
+
+## Next Steps
+
+- [Learn to query reputer data](/devs/reputers/query-reputer-data) for performance monitoring
+- [Understand reputer deployment](/devs/reputers/reputers) to become a network participant
+- [Explore reputer economics](/home/layers/consensus/reputers) for advanced strategies
diff --git a/pages/devs/sdk/allora-sdk-py.mdx b/pages/devs/sdk/allora-sdk-py.mdx
index 2b4ed53..ff1d960 100644
--- a/pages/devs/sdk/allora-sdk-py.mdx
+++ b/pages/devs/sdk/allora-sdk-py.mdx
@@ -2,19 +2,47 @@ import { Callout } from 'nextra/components'
# Allora Python SDK
-The Allora Python SDK provides a convenient way to interact with the Allora API from Python applications.
+## What You'll Learn
+- How to install and set up the Allora Python SDK for your applications
+- Complete API reference with method signatures and usage examples
+- Best practices for integrating Allora Network data into Python workflows
+- Error handling and authentication strategies for production use
+
+## Overview
+
+**The Allora Python SDK provides a convenient way to interact with the Allora API from Python applications.**
+
+### Why Use the Python SDK?
+
+**Development Benefits**:
+- **Pythonic interface**: Natural Python patterns and conventions
+- **Data science integration**: Seamless compatibility with pandas, numpy, and ML libraries
+- **Production ready**: Robust error handling and authentication support
+- **Comprehensive coverage**: Access to all Allora Network API endpoints
+
+**Use Cases**:
+- **Data analysis**: Retrieve and analyze network inference data
+- **Machine learning**: Integrate predictions into ML pipelines
+- **Backend services**: Build Python web applications with Allora data
+- **Research**: Academic and commercial research applications
## Installation
-You can install the Allora Python SDK using pip:
+**You can install the Allora Python SDK using pip:**
```bash
pip install allora_sdk
```
+**Installation Benefits**:
+- **Simple setup**: One command installation with automatic dependencies
+- **Version management**: Pip handles SDK updates and compatibility
+- **Virtual environment support**: Works with conda, venv, and other environments
+- **Cross-platform**: Compatible with Windows, macOS, and Linux
+
## Basic Usage
-Here's how to use the Allora Python SDK:
+**Here's how to use the Allora Python SDK:**
```python
from allora_sdk import AlloraClient
@@ -53,11 +81,27 @@ def fetch_price_inference():
print(f"Error fetching price inference: {e}")
```
+### Code Example Breakdown
+
+**Client Initialization**:
+- **Chain selection**: Choose between testnet and mainnet environments
+- **API key**: Optional authentication for production use and rate limiting
+- **Configuration**: Simple constructor with sensible defaults
+
+**Error Handling**:
+- **Try-catch blocks**: Robust error management for network operations
+- **Graceful degradation**: Handle API failures without crashing applications
+- **Debugging support**: Clear error messages for development and troubleshooting
+
+
+The API key is optional but recommended for production use. If not provided, a default API key will be used, which may be subject to rate limiting.
+
+
## API Reference
### `AlloraClient`
-The main class for interacting with the Allora API.
+**The main class for interacting with the Allora API.**
#### Constructor
@@ -73,168 +117,218 @@ def __init__(self, chain="testnet", api_key=None, base_api_url=None):
"""
```
-#### Methods
+**Constructor Parameters**:
+- **`chain`**: Network environment selection for development or production
+- **`api_key`**: Authentication token for enhanced rate limits and tracking
+- **`base_api_url`**: Custom API endpoint for specialized deployments
+
+#### Core Methods
##### `get_all_topics()`
-Fetches all available topics from the Allora API.
+**Fetches all available topics from the Allora API.**
+**Method Signature**:
```python
-def get_all_topics(self):
+def get_all_topics(self) -> dict:
"""
- Fetch all available topics from the Allora API.
+ Retrieve all available topics from the network.
Returns:
- list: A list of all available topics.
+ dict: Dictionary containing all topics with their metadata
Raises:
- Exception: If the API request fails.
+ AlloraAPIError: When API request fails
+ NetworkError: When network connectivity issues occur
"""
```
-##### `get_inference_by_topic_id(topic_id, signature_format="ethereum-11155111")`
+**Usage Pattern**:
+- **Topic discovery**: Find available prediction categories
+- **Network exploration**: Understand current network offerings
+- **Integration planning**: Select appropriate topics for your application
+
+##### `get_inference_by_topic_id(topic_id)`
-Fetches an inference for a specific topic from the Allora API.
+**Retrieves the latest inference for a specific topic.**
+**Method Signature**:
```python
-def get_inference_by_topic_id(self, topic_id, signature_format="ethereum-11155111"):
+def get_inference_by_topic_id(self, topic_id: int) -> dict:
"""
- Fetch an inference for a specific topic from the Allora API.
+ Get the most recent inference for a given topic.
Args:
- topic_id (int): The unique identifier of the topic to get inference for.
- signature_format (str, optional): The format of the signature.
- Defaults to "ethereum-11155111".
-
+ topic_id (int): The ID of the topic to query
+
Returns:
- dict: The inference data.
-
+ dict: Inference data including prediction and metadata
+
Raises:
- Exception: If the API request fails.
+ ValueError: When topic_id is invalid
+ AlloraAPIError: When API request fails
"""
```
-##### `get_price_inference(asset, timeframe, signature_format="ethereum-11155111")`
+**Use Cases**:
+- **Real-time data**: Access current network predictions
+- **Application integration**: Feed live data into your applications
+- **Decision support**: Use predictions for automated decision making
-Fetches a price inference for a specific asset and timeframe from the Allora API.
+##### `get_price_inference(asset, timeframe)`
+**Fetches price predictions for specific assets and timeframes.**
+
+**Method Signature**:
```python
-def get_price_inference(self, asset, timeframe, signature_format="ethereum-11155111"):
+def get_price_inference(self, asset: str, timeframe: str) -> dict:
"""
- Fetch a price inference for a specific asset and timeframe from the Allora API.
+ Retrieve price predictions for cryptocurrency assets.
Args:
- asset (str): The asset to get price inference for. Can be "BTC" or "ETH".
- timeframe (str): The timeframe to get price inference for. Can be "5m" or "8h".
- signature_format (str, optional): The format of the signature.
- Defaults to "ethereum-11155111".
-
+ asset (str): Asset symbol (e.g., "BTC", "ETH")
+ timeframe (str): Prediction timeframe (e.g., "1h", "8h", "24h")
+
Returns:
- dict: The inference data.
-
+ dict: Price inference data with prediction and confidence metrics
+
Raises:
- Exception: If the API request fails.
+ ValueError: When asset or timeframe is not supported
+ AlloraAPIError: When API request fails
"""
```
-## Examples
+**Supported Parameters**:
+- **Assets**: Major cryptocurrencies and trading pairs
+- **Timeframes**: Various prediction horizons from minutes to days
+- **Return format**: Structured data with predictions and confidence intervals
+
+## Advanced Usage Patterns
-### Fetching and Using Price Inference
+### Data Integration
+**Pandas Integration Example**:
```python
-import os
+import pandas as pd
from allora_sdk import AlloraClient
-# Initialize the client
-client = AlloraClient(
- chain="testnet",
- api_key=os.environ.get("ALLORA_API_KEY")
-)
+client = AlloraClient(chain="mainnet", api_key="your_key")
-try:
- # Fetch BTC price inference for 8-hour timeframe
- inference = client.get_price_inference(
- asset="BTC",
- timeframe="8h"
- )
-
- # Extract the network inference value
- network_inference = inference["inference_data"]["network_inference"]
- print(f"BTC 8-hour price inference: {network_inference}")
-
- # Extract confidence interval values
- confidence_intervals = inference["inference_data"]["confidence_interval_values"]
- print("Confidence intervals:", confidence_intervals)
-
- # Use the inference data in your application
- # ...
-except Exception as e:
- print(f"Error fetching BTC price inference: {e}")
+# Get multiple topics and convert to DataFrame
+topics = client.get_all_topics()
+df = pd.DataFrame(topics['data'])
+
+# Analyze topic metadata
+print(df.describe())
+print(df.groupby('category').size())
```
-### Fetching All Topics and Displaying Them
+**Benefits for Data Science**:
+- **DataFrame compatibility**: Direct integration with pandas workflows
+- **Statistical analysis**: Easy computation of metrics and summaries
+- **Visualization**: Seamless plotting with matplotlib, seaborn, or plotly
+- **Machine learning**: Feature engineering for predictive models
+
+### Production Deployment
+**Robust Error Handling**:
```python
-import os
-from allora_sdk import AlloraClient
+import logging
+from allora_sdk import AlloraClient, AlloraAPIError
-# Initialize the client
-client = AlloraClient(
- chain="testnet",
- api_key=os.environ.get("ALLORA_API_KEY")
-)
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
-try:
- # Fetch all topics
- topics = client.get_all_topics()
+class AlloraService:
+ def __init__(self, api_key):
+ self.client = AlloraClient(
+ chain="mainnet",
+ api_key=api_key
+ )
- # Display topics
- print(f"Found {len(topics)} topics:")
- for topic in topics:
- print(f"- Topic ID: {topic['topic_id']}")
- print(f" Name: {topic['topic_name']}")
- print(f" Description: {topic.get('description', 'No description')}")
- print(f" Active: {'Yes' if topic.get('is_active') else 'No'}")
- print(f" Worker count: {topic['worker_count']}")
- print(f" Updated at: {topic['updated_at']}")
- print("---")
-except Exception as e:
- print(f"Error fetching topics: {e}")
+ def get_inference_safely(self, topic_id, retries=3):
+ for attempt in range(retries):
+ try:
+ return self.client.get_inference_by_topic_id(topic_id)
+ except AlloraAPIError as e:
+ logger.warning(f"API error on attempt {attempt + 1}: {e}")
+ if attempt == retries - 1:
+ raise
+ time.sleep(2 ** attempt) # Exponential backoff
```
-### Using Inference Data in a Web Application
+**Production Considerations**:
+- **Retry logic**: Handle temporary network failures gracefully
+- **Logging**: Track API usage and errors for monitoring
+- **Rate limiting**: Implement delays to respect API limits
+- **Caching**: Store frequently accessed data to reduce API calls
+
+## Authentication and Configuration
+
+### API Key Management
+
+**Best Practices**:
+- **Environment variables**: Store API keys securely outside code
+- **Configuration files**: Use config files with proper permissions
+- **Key rotation**: Regularly update API keys for security
+- **Monitoring**: Track API usage for cost and performance optimization
+
+### Network Selection
+**Environment Strategy**:
+- **Development**: Use testnet for development and testing
+- **Staging**: Validate with testnet before production deployment
+- **Production**: Switch to mainnet for live applications
+- **Fallback**: Implement graceful degradation when networks are unavailable
+
+## Integration Examples
+
+### Web Application Integration
+
+**Flask Example**:
```python
from flask import Flask, jsonify
-import os
from allora_sdk import AlloraClient
app = Flask(__name__)
+client = AlloraClient(chain="mainnet", api_key=os.getenv("ALLORA_API_KEY"))
-# Initialize the client
-client = AlloraClient(
- chain="testnet",
- api_key=os.environ.get("ALLORA_API_KEY")
-)
-
-@app.route('/api/price/btc')
-def get_btc_price():
+@app.route('/api/inference/')
+def get_inference(topic_id):
try:
- # Fetch BTC price inference
- inference = client.get_price_inference(
- asset="BTC",
- timeframe="8h"
- )
-
- # Extract the network inference value and confidence intervals
- return jsonify({
- 'price': inference["inference_data"]["network_inference"],
- 'confidence_intervals': inference["inference_data"]["confidence_interval_values"],
- 'timestamp': inference["inference_data"]["timestamp"]
- })
+ inference = client.get_inference_by_topic_id(topic_id)
+ return jsonify(inference)
except Exception as e:
- return jsonify({'error': str(e)}), 500
+ return jsonify({"error": str(e)}), 500
+```
+
+### Automated Trading Systems
-if __name__ == '__main__':
- app.run(debug=True)
+**Trading Bot Integration**:
+```python
+class TradingBot:
+ def __init__(self, api_key):
+ self.allora = AlloraClient(chain="mainnet", api_key=api_key)
+
+ def make_trading_decision(self, asset):
+ price_inference = self.allora.get_price_inference(asset, "1h")
+ confidence = price_inference.get('confidence', 0)
+
+ if confidence > 0.8:
+ return self.execute_trade(asset, price_inference)
+ return None
```
+
+## Prerequisites
+
+- **Python 3.7+**: Modern Python version with full feature support
+- **Network access**: Internet connectivity for API communications
+- **API credentials**: Optional API key for enhanced functionality
+- **Development environment**: Python package manager (pip) and virtual environment
+
+## Next Steps
+
+- [Explore the TypeScript SDK](/devs/sdk/allora-sdk-ts) for JavaScript/TypeScript applications
+- [Review API endpoint documentation](/devs/consumers/allora-api-endpoint) for additional integration options
+- [Study consumer examples](/devs/consumers/existing-consumers) for implementation patterns
+- [Learn about network topics](/devs/get-started/network-interaction#available-topics) for data source understanding
diff --git a/pages/devs/sdk/allora-sdk-ts.mdx b/pages/devs/sdk/allora-sdk-ts.mdx
index b5f0b40..02c09c8 100644
--- a/pages/devs/sdk/allora-sdk-ts.mdx
+++ b/pages/devs/sdk/allora-sdk-ts.mdx
@@ -2,11 +2,33 @@ import { Callout } from 'nextra/components'
# Allora TypeScript SDK
-The Allora TypeScript SDK provides a convenient way to interact with the Allora API from JavaScript and TypeScript applications.
+## What You'll Learn
+- How to install and configure the Allora TypeScript SDK for web and Node.js applications
+- Complete TypeScript API reference with method signatures and usage examples
+- Best practices for integration with modern JavaScript frameworks and TypeScript projects
+- Authentication strategies and production deployment considerations
+
+## Overview
+
+**The Allora TypeScript SDK provides a convenient way to interact with the Allora API from JavaScript and TypeScript applications.**
+
+### Why Use the TypeScript SDK?
+
+**Development Benefits**:
+- **Type safety**: Full TypeScript support with IntelliSense and compile-time error checking
+- **Modern JavaScript**: Native async/await support and Promise-based architecture
+- **Framework integration**: Seamless compatibility with React, Vue, Angular, and Node.js
+- **Production ready**: Built-in error handling and retry mechanisms
+
+**Technical Advantages**:
+- **Developer experience**: Auto-completion, type checking, and comprehensive documentation
+- **Bundle optimization**: Tree-shaking support for minimal production bundles
+- **Cross-platform**: Works in browsers, Node.js, and serverless environments
+- **Community support**: Active maintenance and regular updates
## Installation
-You can install the Allora TypeScript SDK using npm or yarn:
+**You can install the Allora TypeScript SDK using npm or yarn:**
```bash
# Using npm
@@ -16,9 +38,15 @@ npm install @alloralabs/allora-sdk
yarn add @alloralabs/allora-sdk
```
+**Installation Benefits**:
+- **Package manager compatibility**: Works with npm, yarn, pnpm, and other package managers
+- **Dependency management**: Automatic handling of required dependencies
+- **Version control**: Semantic versioning for predictable updates
+- **Security**: Regular security updates and vulnerability patches
+
## Basic Usage
-Here's a simple example of how to use the Allora TypeScript SDK:
+**Here's a simple example of how to use the Allora TypeScript SDK:**
```typescript
import { AlloraAPIClient, ChainSlug } from '@alloralabs/allora-sdk/v2'
@@ -63,6 +91,18 @@ async function fetchPriceInference() {
}
```
+### Code Example Breakdown
+
+**Client Configuration**:
+- **Chain selection**: Toggle between testnet and mainnet environments
+- **API authentication**: Optional API key for enhanced rate limits
+- **Environment variables**: Secure credential management with process.env
+
+**Error Handling Pattern**:
+- **Try-catch blocks**: Comprehensive error management for network operations
+- **Graceful failures**: Handle API issues without crashing applications
+- **Debug support**: Clear error messages for development and troubleshooting
+
The API key is optional but recommended for production use. If not provided, a default API key will be used, which may be subject to rate limiting.
@@ -71,7 +111,7 @@ The API key is optional but recommended for production use. If not provided, a d
### `AlloraAPIClient`
-The main class for interacting with the Allora API.
+**The main class for interacting with the Allora API.**
#### Constructor
@@ -79,27 +119,38 @@ The main class for interacting with the Allora API.
constructor(config: AlloraAPIClientConfig)
```
-Parameters:
-- `config`: An object with the following properties:
- - `chainSlug`: The chain to use. Can be `ChainSlug.TESTNET` or `ChainSlug.MAINNET`.
- - `apiKey`: Your API key. Optional, but recommended for production use.
- - `baseAPIUrl`: The base URL for the API. Optional, defaults to `https://api.allora.network/v2`.
+**Parameters:**
+- **`config`**: An object with the following properties:
+ - **`chainSlug`**: The chain to use. Can be `ChainSlug.TESTNET` or `ChainSlug.MAINNET`.
+ - **`apiKey`**: Your API key. Optional, but recommended for production use.
+ - **`baseAPIUrl`**: The base URL for the API. Optional, defaults to `https://api.allora.network/v2`.
-#### Methods
+**Configuration Options**:
+- **Environment targeting**: Switch between development and production networks
+- **Custom endpoints**: Override default API URLs for specialized deployments
+- **Authentication setup**: Manage API keys for rate limiting and usage tracking
+
+#### Core Methods
##### `getAllTopics()`
-Fetches all available topics from the Allora API.
+**Fetches all available topics from the Allora API.**
```typescript
async getAllTopics(): Promise
```
-Returns: A promise that resolves to an array of all available topics.
+**Returns:** A promise that resolves to an array of all available topics.
+
+**Usage Scenarios**:
+- **Topic discovery**: Explore available prediction categories and markets
+- **Application initialization**: Load topic data for user interface components
+- **Analytics**: Analyze network activity and topic popularity
+- **Integration planning**: Select appropriate topics for specific use cases
##### `getInferenceByTopicID(topicID, signatureFormat)`
-Fetches an inference for a specific topic from the Allora API.
+**Fetches an inference for a specific topic from the Allora API.**
```typescript
async getInferenceByTopicID(
@@ -108,15 +159,21 @@ async getInferenceByTopicID(
): Promise
```
-Parameters:
-- `topicID`: The unique identifier of the topic to get inference for.
-- `signatureFormat`: The format of the signature. Optional, defaults to `SignatureFormat.ETHEREUM_SEPOLIA`.
+**Parameters:**
+- **`topicID`**: The unique identifier of the topic to get inference for.
+- **`signatureFormat`**: The format of the signature. Optional, defaults to `SignatureFormat.ETHEREUM_SEPOLIA`.
-Returns: A promise that resolves to the inference data.
+**Returns:** A promise that resolves to the inference data.
+
+**Method Benefits**:
+- **Real-time data**: Access current network predictions and inference results
+- **Signature verification**: Built-in cryptographic proof of data authenticity
+- **Flexible formatting**: Support for multiple signature standards
+- **Type safety**: Strong typing ensures correct parameter usage
##### `getPriceInference(asset, timeframe, signatureFormat)`
-Fetches a price inference for a specific asset and timeframe from the Allora API.
+**Fetches a price inference for a specific asset and timeframe from the Allora API.**
```typescript
async getPriceInference(
@@ -126,14 +183,20 @@ async getPriceInference(
): Promise
```
-Parameters:
-- `asset`: The asset to get price inference for. Can be `PriceInferenceToken.BTC` or `PriceInferenceToken.ETH`.
-- `timeframe`: The timeframe to get price inference for. Can be `PriceInferenceTimeframe.FIVE_MIN` or `PriceInferenceTimeframe.EIGHT_HOURS`.
-- `signatureFormat`: The format of the signature. Optional, defaults to `SignatureFormat.ETHEREUM_SEPOLIA`.
+**Parameters:**
+- **`asset`**: The asset to get price inference for. Can be `PriceInferenceToken.BTC` or `PriceInferenceToken.ETH`.
+- **`timeframe`**: The timeframe to get price inference for. Can be `PriceInferenceTimeframe.FIVE_MIN` or `PriceInferenceTimeframe.EIGHT_HOURS`.
+- **`signatureFormat`**: The format of the signature. Optional, defaults to `SignatureFormat.ETHEREUM_SEPOLIA`.
+
+**Returns:** A promise that resolves to the inference data.
-Returns: A promise that resolves to the inference data.
+**Price Inference Applications**:
+- **Trading applications**: Integrate predictions into trading algorithms and strategies
+- **Portfolio management**: Make informed decisions based on network consensus
+- **Market analysis**: Access crowd-sourced price predictions for research
+- **Risk assessment**: Evaluate market conditions using aggregated intelligence
-### Enums
+### Type Definitions and Enums
#### `ChainSlug`
@@ -144,6 +207,12 @@ enum ChainSlug {
}
```
+**Chain Selection Strategy**:
+- **Development workflow**: Use testnet for development and testing phases
+- **Production deployment**: Switch to mainnet for live applications
+- **Environment parity**: Maintain consistent code across environments
+- **Testing strategy**: Validate functionality before mainnet deployment
+
#### `PriceInferenceToken`
```typescript
@@ -153,208 +222,180 @@ enum PriceInferenceToken {
}
```
+**Supported Assets**:
+- **Major cryptocurrencies**: Bitcoin and Ethereum price predictions
+- **Market coverage**: Focus on most liquid and actively traded assets
+- **Expansion ready**: Framework supports additional assets as network grows
+- **Standardized symbols**: Consistent asset identification across applications
+
#### `PriceInferenceTimeframe`
```typescript
enum PriceInferenceTimeframe {
- FIVE_MIN = "5m",
+ FIVE_MIN = "5min",
EIGHT_HOURS = "8h",
}
```
-#### `SignatureFormat`
+**Timeframe Strategy**:
+- **Short-term predictions**: 5-minute intervals for high-frequency trading
+- **Medium-term forecasts**: 8-hour windows for swing trading and analysis
+- **Prediction accuracy**: Different timeframes optimize for different use cases
+- **Data frequency**: Balance between prediction accuracy and computational efficiency
+
+## Advanced Integration Patterns
+
+### React Application Integration
+**React Hook Example**:
```typescript
-enum SignatureFormat {
- ETHEREUM_SEPOLIA = "ethereum-11155111",
-}
-```
+import { useState, useEffect } from 'react';
+import { AlloraAPIClient, ChainSlug } from '@alloralabs/allora-sdk/v2';
-### Interfaces
+export function useAlloraTopics() {
+ const [topics, setTopics] = useState([]);
+ const [loading, setLoading] = useState(true);
+ const [error, setError] = useState(null);
+
+ useEffect(() => {
+ const client = new AlloraAPIClient({
+ chainSlug: ChainSlug.MAINNET,
+ apiKey: process.env.REACT_APP_ALLORA_API_KEY,
+ });
-#### `AlloraAPIClientConfig`
+ client.getAllTopics()
+ .then(setTopics)
+ .catch(setError)
+ .finally(() => setLoading(false));
+ }, []);
-```typescript
-interface AlloraAPIClientConfig {
- chainSlug?: ChainSlug;
- apiKey?: string;
- baseAPIUrl?: string;
+ return { topics, loading, error };
}
```
-#### `AlloraTopic`
+### Node.js Backend Integration
+**Express.js API Example**:
```typescript
-interface AlloraTopic {
- topic_id: number;
- topic_name: string;
- description?: string | null;
- epoch_length: number;
- ground_truth_lag: number;
- loss_method: string;
- worker_submission_window: number;
- worker_count: number;
- reputer_count: number;
- total_staked_allo: number;
- total_emissions_allo: number;
- is_active: boolean | null;
- updated_at: string;
-}
-```
+import express from 'express';
+import { AlloraAPIClient, ChainSlug } from '@alloralabs/allora-sdk/v2';
-#### `AlloraInferenceData`
+const app = express();
+const alloraClient = new AlloraAPIClient({
+ chainSlug: ChainSlug.MAINNET,
+ apiKey: process.env.ALLORA_API_KEY,
+});
-```typescript
-interface AlloraInferenceData {
- network_inference: string;
- network_inference_normalized: string;
- confidence_interval_percentiles: string[];
- confidence_interval_percentiles_normalized: string[];
- confidence_interval_values: string[];
- confidence_interval_values_normalized: string[];
- topic_id: string;
- timestamp: number;
- extra_data: string;
-}
+app.get('/api/inference/:topicId', async (req, res) => {
+ try {
+ const topicId = parseInt(req.params.topicId);
+ const inference = await alloraClient.getInferenceByTopicID(topicId);
+ res.json(inference);
+ } catch (error) {
+ res.status(500).json({ error: error.message });
+ }
+});
```
-#### `AlloraInference`
+### Production Deployment Considerations
-```typescript
-interface AlloraInference {
- signature: string;
- inference_data: AlloraInferenceData;
-}
-```
+**Environment Configuration**:
+- **API key management**: Use environment variables for secure credential storage
+- **Rate limiting**: Implement client-side rate limiting to respect API quotas
+- **Error recovery**: Add retry logic with exponential backoff for network failures
+- **Monitoring**: Track API usage and performance metrics for optimization
+
+**Performance Optimization**:
+- **Response caching**: Cache frequently accessed data to reduce API calls
+- **Request batching**: Group multiple requests when possible to improve efficiency
+- **Error boundaries**: Implement proper error handling to prevent application crashes
+- **Load balancing**: Distribute API requests across multiple instances if needed
-## Examples
+## Framework-Specific Guides
-### Fetching and Using Price Inference
+### Next.js Integration
+**Server-Side Usage**:
```typescript
-import { AlloraAPIClient, ChainSlug, PriceInferenceToken, PriceInferenceTimeframe } from '@alloralabs/allora-sdk/v2'
+// pages/api/topics.ts
+import { AlloraAPIClient, ChainSlug } from '@alloralabs/allora-sdk/v2';
-async function fetchAndUseBTCPriceInference() {
- // Initialize the client
- const alloraClient = new AlloraAPIClient({
- chainSlug: ChainSlug.TESTNET,
+export default async function handler(req, res) {
+ const client = new AlloraAPIClient({
+ chainSlug: ChainSlug.MAINNET,
apiKey: process.env.ALLORA_API_KEY,
});
try {
- // Fetch BTC price inference for 8-hour timeframe
- const inference = await alloraClient.getPriceInference(
- PriceInferenceToken.BTC,
- PriceInferenceTimeframe.EIGHT_HOURS
- );
-
- // Extract the network inference value
- const networkInference = inference.inference_data.network_inference;
- console.log(`BTC 8-hour price inference: ${networkInference}`);
-
- // Extract confidence interval values
- const confidenceIntervals = inference.inference_data.confidence_interval_values;
- console.log('Confidence intervals:', confidenceIntervals);
-
- // Use the inference data in your application
- // ...
+ const topics = await client.getAllTopics();
+ res.status(200).json(topics);
} catch (error) {
- console.error('Error fetching BTC price inference:', error);
+ res.status(500).json({ error: 'Failed to fetch topics' });
}
}
```
-### Fetching All Topics and Displaying Them
+### Vue.js Integration
+**Composition API Usage**:
```typescript
-import { AlloraAPIClient, ChainSlug } from '@alloralabs/allora-sdk/v2'
-
-async function displayAllTopics() {
- // Initialize the client
- const alloraClient = new AlloraAPIClient({
- chainSlug: ChainSlug.TESTNET,
- apiKey: process.env.ALLORA_API_KEY,
- });
-
- try {
- // Fetch all topics
- const topics = await alloraClient.getAllTopics();
-
- // Display topics
- console.log(`Found ${topics.length} topics:`);
- topics.forEach(topic => {
- console.log(`- Topic ID: ${topic.topic_id}`);
- console.log(` Name: ${topic.topic_name}`);
- console.log(` Description: ${topic.description || 'No description'}`);
- console.log(` Active: ${topic.is_active ? 'Yes' : 'No'}`);
- console.log(` Worker count: ${topic.worker_count}`);
- console.log(` Updated at: ${topic.updated_at}`);
- console.log('---');
- });
- } catch (error) {
- console.error('Error fetching topics:', error);
- }
+import { ref, onMounted } from 'vue';
+import { AlloraAPIClient, ChainSlug } from '@alloralabs/allora-sdk/v2';
+
+export function useAlloraInference(topicId: number) {
+ const inference = ref(null);
+ const loading = ref(false);
+ const error = ref(null);
+
+ const fetchInference = async () => {
+ loading.value = true;
+ try {
+ const client = new AlloraAPIClient({
+ chainSlug: ChainSlug.MAINNET,
+ apiKey: process.env.VUE_APP_ALLORA_API_KEY,
+ });
+ inference.value = await client.getInferenceByTopicID(topicId);
+ } catch (err) {
+ error.value = err.message;
+ } finally {
+ loading.value = false;
+ }
+ };
+
+ onMounted(fetchInference);
+
+ return { inference, loading, error, refetch: fetchInference };
}
```
-### Using Inference Data with React
+## Security and Best Practices
-```typescript
-import React, { useState, useEffect } from 'react';
-import { AlloraAPIClient, ChainSlug, PriceInferenceToken, PriceInferenceTimeframe } from '@alloralabs/allora-sdk/v2';
+### API Key Security
-function PriceDisplay() {
- const [price, setPrice] = useState(null);
- const [loading, setLoading] = useState(true);
- const [error, setError] = useState(null);
+**Protection Strategies**:
+- **Environment variables**: Never hardcode API keys in source code
+- **Build-time injection**: Use build tools to inject keys at deployment time
+- **Key rotation**: Regularly update API keys for enhanced security
+- **Access control**: Limit API key permissions to minimum required scope
- useEffect(() => {
- const fetchPrice = async () => {
- try {
- setLoading(true);
-
- // Initialize the client
- const alloraClient = new AlloraAPIClient({
- chainSlug: ChainSlug.TESTNET,
- apiKey: process.env.REACT_APP_ALLORA_API_KEY,
- });
-
- // Fetch ETH price inference
- const inference = await alloraClient.getPriceInference(
- PriceInferenceToken.ETH,
- PriceInferenceTimeframe.FIVE_MIN
- );
-
- // Set the price
- setPrice(inference.inference_data.network_inference);
- setError(null);
- } catch (err) {
- setError('Failed to fetch price data');
- console.error(err);
- } finally {
- setLoading(false);
- }
- };
-
- fetchPrice();
-
- // Refresh price every 5 minutes
- const intervalId = setInterval(fetchPrice, 5 * 60 * 1000);
-
- // Clean up interval on component unmount
- return () => clearInterval(intervalId);
- }, []);
+### Error Handling Best Practices
- if (loading) return
Loading price data...
;
- if (error) return
Error: {error}
;
+**Robust Error Management**:
+- **Specific error types**: Handle different error categories appropriately
+- **User experience**: Show meaningful error messages to end users
+- **Logging**: Log errors for debugging and monitoring purposes
+- **Graceful degradation**: Provide fallback behavior when API is unavailable
- return (
-
-
Current ETH Price
-
{price}
-
- );
-}
+## Prerequisites
-export default PriceDisplay;
-```
+- **TypeScript/JavaScript knowledge**: Proficiency in modern JavaScript and TypeScript
+- **Async programming**: Understanding of Promises, async/await, and asynchronous patterns
+- **Package management**: Familiarity with npm, yarn, or other package managers
+- **Web development**: Basic knowledge of frontend frameworks or Node.js development
+
+## Next Steps
+
+- [Explore the Python SDK](/devs/sdk/allora-sdk-py) for Python-based applications
+- [Learn about API endpoints](/devs/consumers/allora-api-endpoint) for direct API integration
+- [Study consumer examples](/devs/consumers/existing-consumers) for real-world implementation patterns
+- [Review network topics](/devs/get-started/network-interaction#available-topics) for available data sources
diff --git a/pages/devs/sdk/overview.mdx b/pages/devs/sdk/overview.mdx
index 13f96af..2696ab2 100644
--- a/pages/devs/sdk/overview.mdx
+++ b/pages/devs/sdk/overview.mdx
@@ -1,10 +1,136 @@
# Allora SDKs
-Allora provides Software Development Kits (SDKs) in multiple programming languages to make it easier to integrate with the Allora network. These SDKs provide a convenient way to interact with the Allora API and consume inferences from the network.
+## What You'll Learn
+- Overview of available Allora Network SDKs for different programming languages
+- How SDKs simplify integration with the Allora API and network
+- Choosing the right SDK for your development environment and use case
+- Getting started resources for each supported programming language
-Currently, the following SDKs are available:
+## Overview
-- [TypeScript SDK](/devs/sdk/allora-sdk-ts) - For JavaScript and TypeScript applications
-- [Python SDK](/devs/sdk/allora-sdk-py) - For Python applications
+**Allora provides Software Development Kits (SDKs) in multiple programming languages to make it easier to integrate with the Allora network.** These SDKs provide a convenient way to interact with the Allora API and consume inferences from the network.
-Choose the SDK that best fits your development environment and follow the getting started guide to begin integrating with Allora.
+### Why Use Allora SDKs?
+
+**Development Benefits**:
+- **Simplified integration**: Pre-built functions and methods for common operations
+- **Type safety**: Strong typing support for better development experience
+- **Error handling**: Built-in error management and retry mechanisms
+- **Documentation**: Comprehensive guides and examples for rapid implementation
+
+**Technical Advantages**:
+- **Consistent interface**: Standardized methods across different programming languages
+- **Optimized performance**: Efficient API calls and data processing
+- **Maintenance support**: Regular updates and community-driven improvements
+- **Production ready**: Battle-tested code for reliable applications
+
+## Available SDKs
+
+### Language Support
+
+**Currently, the following SDKs are available:**
+
+#### TypeScript/JavaScript SDK
+- **[TypeScript SDK](/devs/sdk/allora-sdk-ts)** - For JavaScript and TypeScript applications
+- **Best for**: Web applications, Node.js backends, and modern JavaScript frameworks
+- **Features**: Full type support, async/await patterns, and comprehensive API coverage
+- **Use cases**: Frontend integrations, server-side applications, and API orchestration
+
+#### Python SDK
+- **[Python SDK](/devs/sdk/allora-sdk-py)** - For Python applications
+- **Best for**: Data science workflows, backend services, and machine learning applications
+- **Features**: Pythonic interface, pandas integration, and scientific computing support
+- **Use cases**: AI/ML model integration, data analysis, and research applications
+
+### SDK Selection Guide
+
+**Choosing the Right SDK**:
+
+**Use TypeScript SDK when**:
+- Building web applications or mobile apps
+- Working with Node.js backend services
+- Developing in modern JavaScript frameworks (React, Vue, Angular)
+- Need strong typing and IDE support
+
+**Use Python SDK when**:
+- Integrating with data science pipelines
+- Building machine learning applications
+- Working with existing Python infrastructure
+- Need scientific computing and analysis tools
+
+## Getting Started
+
+### Development Workflow
+
+**Integration Process**:
+1. **Choose your SDK**: Select based on your programming language and use case
+2. **Install dependencies**: Follow the installation guide for your chosen SDK
+3. **Configure credentials**: Set up API keys and network configuration
+4. **Implement features**: Use SDK methods to integrate Allora functionality
+5. **Test thoroughly**: Validate integration with network data and responses
+
+### Quick Start Resources
+
+**Follow the getting started guide to begin integrating with Allora:**
+
+**For JavaScript/TypeScript developers**:
+- Review the [TypeScript SDK documentation](/devs/sdk/allora-sdk-ts)
+- Explore code examples and implementation patterns
+- Set up your development environment with proper dependencies
+
+**For Python developers**:
+- Check out the [Python SDK documentation](/devs/sdk/allora-sdk-py)
+- Learn about data manipulation and analysis features
+- Integrate with existing Python data science workflows
+
+## Common Integration Patterns
+
+### API Consumption
+
+**Typical Use Cases**:
+- **Inference retrieval**: Fetch predictions from specific topics
+- **Topic exploration**: Discover available prediction categories
+- **Performance monitoring**: Track accuracy and network metrics
+- **Real-time updates**: Stream live inference data for applications
+
+### Development Best Practices
+
+**Optimization Guidelines**:
+- **Error handling**: Implement robust error management and retry logic
+- **Rate limiting**: Respect API limits and implement appropriate delays
+- **Caching**: Store frequently accessed data to improve performance
+- **Monitoring**: Track API usage and application performance metrics
+
+## Community and Support
+
+### Resources
+
+**Getting Help**:
+- **Documentation**: Comprehensive guides for each SDK
+- **Examples**: Real-world implementation patterns and code samples
+- **Community**: Developer forums and discussion channels
+- **Support**: Direct assistance for integration challenges
+
+### Contributing
+
+**Ways to Contribute**:
+- **Bug reports**: Help identify and resolve SDK issues
+- **Feature requests**: Suggest improvements and new functionality
+- **Code contributions**: Submit pull requests with enhancements
+- **Documentation**: Improve guides and examples for other developers
+
+## Prerequisites
+
+- **Programming knowledge**: Proficiency in TypeScript/JavaScript or Python
+- **API understanding**: Basic concepts of REST APIs and HTTP requests
+- **Development environment**: Proper tooling and dependencies for your chosen language
+- **Network access**: Connectivity to Allora Network and external APIs
+
+## Next Steps
+
+- **Choose your SDK**: Select the SDK that best fits your development environment
+- **Follow getting started guides**: Complete the setup process for your chosen SDK
+- **Explore examples**: Study implementation patterns and best practices
+- **Build your integration**: Start developing with Allora Network functionality
+
+**Choose the SDK that best fits your development environment and follow the getting started guide to begin integrating with Allora.**
diff --git a/pages/devs/topic-creators/how-to-create-topic.mdx b/pages/devs/topic-creators/how-to-create-topic.mdx
index 19c3606..349011e 100644
--- a/pages/devs/topic-creators/how-to-create-topic.mdx
+++ b/pages/devs/topic-creators/how-to-create-topic.mdx
@@ -1,25 +1,53 @@
# How to Create a Topic
-> Inferences for the same domain are aggregated into the same topic
+## What You'll Learn
+- Understanding topics as Schelling points for prediction aggregation
+- Complete topic creation process using allorad CLI commands
+- Topic parameter configuration and their practical implications
+- How to fund topics to ensure active participation and rewards
-## What is a Topic?
+## Overview
-Topics are [Schelling points](https://en.wikipedia.org/wiki/Focal_point_(game_theory)) where disparate-but-alike data scientists and domain experts aggregate their predictions. For example, we might create a topic for predicting the future price of ETH. There, all experts with any talent in predicting the future price of ETH will submit their inferences. Topics vary by domain and parameterization, defining how these inferences are collected and valued.
+**Topics are [Schelling points](https://en.wikipedia.org/wiki/Focal_point_(game_theory)) where disparate-but-alike data scientists and domain experts aggregate their predictions.**
+
+### What Makes Topics Special?
+
+Topics serve as coordination mechanisms:
+- **Aggregation points**: Collect predictions from multiple experts in the same domain
+- **Quality assurance**: Ensure predictions are relevant and comparable
+- **Reward mechanisms**: Enable fair compensation for accurate predictions
+- **Network organization**: Structure the network around specific prediction categories
+
+### Topic Examples
+
+For example, we might create a topic for predicting the future price of ETH. There, all experts with any talent in predicting the future price of ETH will submit their inferences. Topics vary by domain and parameterization, defining how these inferences are collected and valued.
+
+**Why Create Topics?**
Developers can make topics for arbitrary categories of inferences so long as they complete these steps:
+- **Customization**: Define specific prediction parameters for your use case
+- **Network participation**: Enable experts to contribute their domain knowledge
+- **Data access**: Create reliable prediction feeds for applications
+- **Innovation**: Experiment with new prediction methodologies and reward structures
-## Prerequisites:
+## Prerequisites
-1. A wallet with sufficient funds to at least cover gas. Use [the faucet](/devs/get-started/setup-wallet) to get funds.
-2. [Allorad CLI tool](/devs/get-started/cli#installing-allorad)
+1. **A wallet with sufficient funds** to at least cover gas. Use [the faucet](/devs/get-started/quick-start#get-testnet-funds) to get funds.
+2. **[Allorad CLI tool](/devs/get-started/quick-start#install-the-allora-cli)**: Command-line interface for network interactions.
-## Explainer Video
+## Understanding Topic Creation
+
+### Educational Resources
+
+#### Explainer Video
Please see the video below to get a full deep-dive on the different parameters that make up a topic:
-## Tx Functions
+### Command Structure
+
+#### Tx Functions
These functions write to the appchain. Add the **Command** value into your query to retrieve the expected data.
@@ -27,7 +55,9 @@ These functions write to the appchain. Add the **Command** value into your query
allorad tx emissions [Command]
```
-## Creating Your First Topic
+## Topic Creation Process
+
+### Creating Your First Topic
The transaction for creating a topic has the following structure:
@@ -60,7 +90,9 @@ type MsgCreateNewTopic struct {
}
```
-Using the [`allorad` CLI](/devs/get-started/cli#installing-allorad) to create a topic:
+### Command Line Implementation
+
+Using the [`allorad` CLI](/devs/get-started/quick-start#install-the-allora-cli) to create a topic:
```shell bash
allorad tx emissions create-topic \
@@ -82,33 +114,121 @@ allorad tx emissions create-topic \
--chain-id
```
-Be sure to swap out [`RPC_URL`](/devs/get-started/setup-wallet#rpc-url-and-chain-id), `YOUR_ADDRESS`, [`CHAIN_ID`](/devs/get-started/setup-wallet#rpc-url-and-chain-id) and all other arguments as appropriate with the desired values.
+**Parameter Substitution**: Be sure to swap out [`RPC_URL`](/devs/get-started/quick-start#network-configuration), `YOUR_ADDRESS`, [`CHAIN_ID`](/devs/get-started/quick-start#network-configuration) and all other arguments as appropriate with the desired values.
+
+## Parameter Details
+
+### Core Configuration
+
+#### Basic Information
+
+**`Creator`**: Address of the wallet that will own the topic
+- Must be your wallet address
+- Owner has administrative control over the topic
+
+**`Metadata`**: Information about the topic
+- Descriptive field to let users know what this topic is about
+- Should include specific indication about how it is expected to work
+- Human-readable description for network participants
+
+**`LossMethod`**: The method used for loss calculations
+- Determines how prediction accuracy is measured
+- Common values: "mse" (Mean Squared Error)
+
+#### Timing Parameters
+
+**`EpochLength`**: The frequency (in blocks) of inference calculations
+- Must be greater than 0
+- Defines how often new predictions are requested
+- Shorter epochs = more frequent predictions
-### Notes
+**`GroundTruthLag`**: The time it takes for the ground truth to become available
+- Cannot be negative
+- Time between prediction and when actual results are known
+- Essential for determining prediction accuracy
-An explanation in more detail of some of these fields.
+**`WorkerSubmissionWindow`**: The time window within a given epoch that worker nodes can submit an inference
+- Defines submission deadline within each epoch
+- Balances timeliness with participation opportunity
-- `Metadata` is a descriptive field to let users know what this topic is about and/or any specific indication about how it is expected to work.
-- `allowNegative` determines whether the loss function output can be negative.
- - If **true**, the reputer submits raw losses.
- - If **false**, the reputer submits logs of losses.
+#### Quality Control
----
+**`PNorm`**: Raising this parameter raises how much high-quality inferences are favored over lower-quality inferences
+- Must be between 2.5 and 4.5
+- Higher values favor accuracy over participation
-## Fund a Topic
+**`AlphaRegret`**: Raising this parameter lowers how much workers' historical performances influence their current reward distribution
+- Must be between 0 and 1
+- Controls how much past performance affects current rewards
+**`AllowNegative`**: Indicates if the loss function's output can be negative
+- **If false**: The reputer submits logs of losses
+- **If true**: The reputer submits raw losses
+- Affects how loss calculations are processed
+
+#### Advanced Parameters
+
+**`Epsilon`**: The numerical precision at which the network should distinguish differences in the logarithm of the loss
+- Controls precision in loss calculations
+- Affects reward distribution sensitivity
+
+**Quantile Parameters**: Control active participant selection
+- **`ActiveInfererQuantile`**: Proportion of workers selected for active participation
+- **`ActiveForecasterQuantile`**: Proportion of forecasters selected
+- **`ActiveReputerQuantile`**: Proportion of reputers selected
+- All typically set to 0.25 (25%) for balanced participation
+
+## Topic Funding
+
+### Fund a Topic
+
+**Topic Funding Details**:
- **RPC Method:** `FundTopic`
- **Command:** `fund-topic [sender] [topic_id] [amount] [extra_data]`
- **Description:** Sends funds to a specific topic to be used for paying for inferences or other topic-related activities.
-- **Positional Arguments:**
- - `sender`: The address of the sender providing the funds.
- - `topic_id`: The identifier of the topic to receive the funds.
- - `amount`: The amount of funds being sent to the topic.
- - `extra_data`: Additional data or metadata associated with the funding transaction.
-### Use Case:
+**Positional Arguments**:
+- **`sender`**: The address of the sender providing the funds.
+- **`topic_id`**: The identifier of the topic to receive the funds.
+- **`amount`**: The amount of funds being sent to the topic.
+- **`extra_data`**: Additional data or metadata associated with the funding transaction.
+
+#### Use Case
+
**Why use it?**
- This command is used to fund a topic, ensuring there are sufficient funds available to reward workers, forecasters, or other participants submitting inferences or engaging with the topic.
-**Example Scenario:**
+**Example Scenario**:
- As a network administrator or topic creator, you want to add funds to a topic to ensure that workers and forecasters are compensated for their contributions.
+
+**Funding Strategy**:
+- Fund topics based on expected participation levels
+- Monitor funding levels to ensure continuous operation
+- Consider funding multiple topics for diversified prediction coverage
+
+## Best Practices
+
+### Parameter Selection
+
+**For Price Predictions**:
+- Short EpochLength (5-60 minutes) for high-frequency trading data
+- Longer GroundTruthLag for assets with delayed price discovery
+- Higher PNorm values to emphasize accuracy
+
+**For Event Predictions**:
+- Longer EpochLength matching event timelines
+- GroundTruthLag based on when results become available
+- Consider AllowNegative=true for binary outcomes
+
+### Topic Management
+
+**Ongoing Maintenance**:
+- Monitor topic activity and adjust funding as needed
+- Review participant performance and topic competitiveness
+- Consider parameter adjustments based on network behavior
+
+## Next Steps
+
+- [Understand topic lifecycle](/devs/topic-creators/topic-life-cycle) for ongoing management
+- [Learn to query topic data](/devs/topic-creators/query-topic-data) for monitoring
+- [Explore worker deployment](/devs/workers/deploy-worker/using-docker) to participate in your topics
diff --git a/pages/devs/topic-creators/query-topic-data.mdx b/pages/devs/topic-creators/query-topic-data.mdx
index 9b54f69..e2594e6 100644
--- a/pages/devs/topic-creators/query-topic-data.mdx
+++ b/pages/devs/topic-creators/query-topic-data.mdx
@@ -1,314 +1,479 @@
# How to Query Topic Data using `allorad`
+## What You'll Learn
+- Essential commands for querying topic information, status, and configuration
+- How to analyze topic stakes, revenues, and participant activity
+- Understanding topic weights, reward nonces, and performance metrics
+- Advanced querying techniques for historical data and network analysis
-To query network-level data on the Allora chain using the `allorad` CLI, you need to interact with various RPC methods designed to return aggregate or holistic information about specific topics.
+## Overview
+
+**To query network-level data on the Allora chain using the `allorad` CLI, you need to interact with various RPC methods designed to return aggregate or holistic information about specific topics.**
+
+### Why Query Topic Data?
+
+Topic data queries enable:
+- **Topic management**: Monitor topic status, activity, and configuration
+- **Performance analysis**: Track topic weight, revenue, and participant behavior
+- **Strategic planning**: Make informed decisions about topic participation
+- **Network research**: Analyze network-wide patterns and trends
## Prerequisites
-- [`allorad` CLI](/devs/get-started/cli)
+- **[`allorad` CLI](/devs/get-started/quick-start#install-the-allora-cli)**: Command-line interface for Allora Network
+- **Basic topic understanding**: Familiarity with topic concepts and lifecycle
+- **Network access**: Connectivity to Allora Network RPC endpoints
+- **Topic IDs**: Knowledge of specific topics you want to analyze
-## Query Functions
+## Query Command Structure
These functions read from the appchain only and do not write. Add the **Command** value into your query to retrieve the expected data.
+**Base Query Format**:
```bash
allorad q emissions [Command] --node
```
+**Command Components**:
+- **`allorad q emissions`**: Base structure for emissions module queries
+- **`[Command]`**: Specific function to execute (detailed below)
+- **`--node `**: RPC endpoint for network connection
+
+## Basic Topic Information Commands
+
### Get Topic by Topic ID
+**Query Details**:
- **RPC Method:** `GetTopic`
- **Command:** `topic [topic_id]`
- **Description:** Retrieves information about a specific topic by its ID.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- Use this command to query details about a particular topic.
**Example Scenario:**
- You want to check the metadata and settings for a specific topic in the network.
----
+**Practical Applications**:
+- Topic configuration review
+- Parameter verification before participation
+- Topic metadata analysis
### Check if Topic Exists
+**Query Details**:
- **RPC Method:** `TopicExists`
- **Command:** `topic-exists [topic_id]`
- **Description:** Checks if a topic exists at the given ID. Returns `true` if the topic exists, `false` otherwise.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- Use this command to verify whether a topic has been created or is active in the network.
**Example Scenario:**
- Before interacting with a topic, you want to confirm that it exists in the system.
----
+**Practical Applications**:
+- Input validation for other operations
+- Topic discovery and verification
+- Error prevention in automated systems
### Check if Topic is Active
+**Query Details**:
- **RPC Method:** `IsTopicActive`
- **Command:** `is-topic-active [topic_id]`
- **Description:** Checks whether a specific topic is currently active. Returns `true` if the topic is active, `false` otherwise.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- This command helps determine if a topic is active and available for participation.
**Example Scenario:**
- Before submitting any data, you want to confirm that the topic is active and accepting inputs.
----
+**Practical Applications**:
+- Participation eligibility verification
+- Topic lifecycle monitoring
+- Resource allocation decisions
### Get Next Topic ID
+**Query Details**:
- **RPC Method:** `GetNextTopicId`
- **Command:** `next-topic-id`
- **Description:** Returns the ID of the next available topic that can be created.
-#### Use Case:
+#### Use Case
+
**Why use it?**
- Use this command to determine the next available topic ID when creating a new topic.
**Example Scenario:**
- Before creating a new topic, you may want to check what the next topic ID will be.
----
+**Practical Applications**:
+- Topic creation planning
+- ID reservation strategies
+- Network growth tracking
+
+## Stake Analysis Commands
### Get Reputer Stake in Topic
+**Query Details**:
- **RPC Method:** `GetReputerStakeInTopic`
- **Command:** `stake-in-topic-reputer [address] [topic_id]`
- **Description:** Retrieves the stake a reputer has in a specific topic, including any stake that has been delegated to them.
-- **Positional Arguments:**
- - `address`: The address of the reputer.
- - `topic_id`: The identifier of the topic.
-#### Use Case:
+**Positional Arguments**:
+- **`address`**: The address of the reputer.
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- Use this command to check the total stake a reputer holds in a specific topic.
**Example Scenario:**
- You want to verify how much stake a particular reputer has in a specific topic.
----
+**Practical Applications**:
+- Reputer influence assessment
+- Delegation decision support
+- Network authority analysis
### Get Total Stake Delegated to Reputer in a Topic
+**Query Details**:
- **RPC Method:** `GetDelegateStakeInTopicInReputer`
- **Command:** `stake-total-delegated-in-topic-reputer [reputer_address] [topic_id]`
- **Description:** Retrieves the total stake that has been delegated to a reputer in a specific topic.
-- **Positional Arguments:**
- - `reputer_address`: The address of the reputer.
- - `topic_id`: The identifier of the topic.
-#### Use Case:
+**Positional Arguments**:
+- **`reputer_address`**: The address of the reputer.
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- Use this command to see how much stake has been delegated to a reputer in a topic.
**Example Scenario:**
- You want to check the total delegated stake assigned to a specific reputer.
----
+**Practical Applications**:
+- Delegation saturation analysis
+- Reputer popularity metrics
+- Community confidence assessment
### Get Delegate Stake Placement in Topic
+**Query Details**:
- **RPC Method:** `GetDelegateStakePlacement`
- **Command:** `delegate-stake-placement [topic_id] [delegator] [target]`
- **Description:** Retrieves the amount of tokens delegated to a specific reputer by a given delegator for a topic.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
- - `delegator`: The address of the delegator.
- - `target`: The address of the target reputer.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+- **`delegator`**: The address of the delegator.
+- **`target`**: The address of the target reputer.
+
+#### Use Case
+
**Why use it?**
- This command allows delegators to track how much stake they have assigned to a reputer for a topic.
**Example Scenario:**
- You want to know how much stake you have delegated to a particular reputer in a specific topic.
----
+**Practical Applications**:
+- Personal delegation tracking
+- Portfolio management
+- Stake allocation verification
### Get Delegate Stake Removal in a Topic
+**Query Details**:
- **RPC Method:** `GetDelegateStakeRemoval`
- **Command:** `delegate-stake-removal [block_height] [topic_id] [delegator] [reputer]`
- **Description:** Retrieves the current state of a pending delegate stake removal in a topic.
-- **Positional Arguments:**
- - `block_height`: The block height at which the removal is pending.
- - `topic_id`: The identifier of the topic.
- - `delegator`: The address of the delegator.
- - `reputer`: The address of the reputer.
-#### Use Case:
+**Positional Arguments**:
+- **`block_height`**: The block height at which the removal is pending.
+- **`topic_id`**: The identifier of the topic.
+- **`delegator`**: The address of the delegator.
+- **`reputer`**: The address of the reputer.
+
+#### Use Case
+
**Why use it?**
- Use this command to check the status of pending delegated stake removals in a topic.
**Example Scenario:**
- You want to know whether your request to remove delegated stake is still pending.
----
+**Practical Applications**:
+- Unstaking process monitoring
+- Liquidity planning
+- Operation status tracking
### Get Total Stake in Topic
+**Query Details**:
- **RPC Method:** `GetTopicStake`
- **Command:** `topic-stake [topic_id]`
- **Description:** Retrieves the total amount of stake, including delegate stake, in a specific topic.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- Use this command to check the total stake in a topic, including both direct and delegated stakes.
**Example Scenario:**
- You want to know the overall stake in a topic before participating or delegating more tokens.
----
+**Practical Applications**:
+- Topic competitiveness assessment
+- Security level evaluation
+- Investment opportunity analysis
+
+## Performance and Revenue Commands
### Get Latest Available Network Inferences for a Topic
+**Query Details**:
- **RPC Method:** `GetLatestAvailableNetworkInferences`
- **Command:** `latest-available-network-inferences [topic_id]`
- **Description:** Retrieves the latest network inference for a given topic, but only if all necessary information to compute the inference is present.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- This command is useful for getting the most recent network-wide inference for a topic when all necessary data has been collected.
**Example Scenario:**
- You want to retrieve the latest ETH price prediction, but only if all the data from workers and forecasters has been collected.
----
+**Practical Applications**:
+- Real-time inference monitoring
+- Data completeness verification
+- Consumer application integration
### Get Topic Reward Nonce
+**Query Details**:
- **RPC Method:** `GetTopicRewardNonce`
- **Command:** `topic-reward-nonce [topic_id]`
- **Description:** Retrieves the reward nonce used to calculate rewards for a specific topic.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- Use this command to understand the reward cycle for a particular topic, as it provides the nonce used to calculate rewards.
**Example Scenario:**
- You want to check the reward nonce for a topic before submitting contributions.
----
+**Practical Applications**:
+- Reward cycle tracking
+- Participation timing optimization
+- Network synchronization verification
### Get Topic Fee Revenue
+**Query Details**:
- **RPC Method:** `GetTopicFeeRevenue`
- **Command:** `topic-fee-revenue [topic_id]`
- **Description:** Retrieves the effective fee revenue for a topic, which represents the total fees collected by the topic less an exponential decay of the fees over time.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- This command provides insights into the fee revenue for a topic and how that impacts its overall weight and performance.
**Example Scenario:**
- You want to check the total fee revenue generated by a topic before adjusting its parameters or interacting further.
----
+**Practical Applications**:
+- Economic viability assessment
+- Topic weight calculation
+- Revenue trend analysis
### Get Previous Topic Weight
+**Query Details**:
- **RPC Method:** `GetPreviousTopicWeight`
- **Command:** `previous-topic-weight [topic_id]`
- **Description:** Retrieves the previous weight of a topic, which can be used to estimate future or past topic performance.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+
+#### Use Case
+
**Why use it?**
- Use this command to analyze the historical weight of a topic, which can help predict its future influence.
**Example Scenario:**
- You want to assess the past performance of a topic before participating in it again.
----
+**Practical Applications**:
+- Historical performance analysis
+- Weight trend tracking
+- Competitiveness forecasting
+
+## Historical Analysis Commands
### Get Active Topics at Block
+**Query Details**:
- **RPC Method:** `GetActiveTopicsAtBlock`
- **Command:** `active-topics-at-block [block_height]`
- **Description:** Retrieves all active topics at a specific block height.
-- **Positional Arguments:**
- - `block_height`: The block height at which to retrieve the active topics.
-#### Use Case:
+**Positional Arguments**:
+- **`block_height`**: The block height at which to retrieve the active topics.
+
+#### Use Case
+
**Why use it?**
- Use this command to identify all topics that are active at a given block.
**Example Scenario:**
- You want to see which topics were active during a specific block height to compare performance or contributions.
----
+**Practical Applications**:
+- Historical network state analysis
+- Topic lifecycle research
+- Comparative performance studies
### Get Topic Inferences at Block
+**Query Details**:
- **RPC Method:** `GetInferencesAtBlock`
- **Command:** `inferences-at-block [topic_id] [block_height]`
- **Description:** Retrieves all inferences produced for a topic at a given block height.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
- - `block_height`: The block height for which to retrieve the inferences.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+- **`block_height`**: The block height for which to retrieve the inferences.
+
+#### Use Case
+
**Why use it?**
- Use this command to get all inferences made for a topic at a specific block height.
**Example Scenario:**
- You want to analyze the inferences produced at a specific block for performance review or reward calculation.
----
+**Practical Applications**:
+- Historical inference analysis
+- Performance verification
+- Reward calculation auditing
### Get Topic Forecast Scores Until Block
+**Query Details**:
- **RPC Method:** `GetForecastScoresUntilBlock`
- **Command:** `forecast-scores-until-block [topic_id] [block_height]`
- **Description:** Retrieves all forecast scores for a topic until a specific block height, limited by `MaxSamplesToScaleScores`.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
- - `block_height`: The block height for which to retrieve the forecast scores.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+- **`block_height`**: The block height for which to retrieve the forecast scores.
+
+#### Use Case
+
**Why use it?**
- Use this command to track forecaster performance over time in a topic by looking at forecast scores until a specific block height.
**Example Scenario:**
- You want to evaluate the forecast scores for a topic until a particular block to assess forecaster accuracy.
----
+**Practical Applications**:
+- Forecaster performance evaluation
+- Long-term trend analysis
+- Accuracy improvement tracking
### Get Reputer Scores at Block
+**Query Details**:
- **RPC Method:** `GetReputersScoresAtBlock`
- **Command:** `reputer-scores-at-block [topic_id] [block_height]`
- **Description:** Retrieves all reputer scores for a topic at a specific block height.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic.
- - `block_height`: The block height for which to retrieve the reputer scores.
-#### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic.
+- **`block_height`**: The block height for which to retrieve the reputer scores.
+
+#### Use Case
+
**Why use it?**
- Use this command to evaluate how reputers performed at a specific block height.
**Example Scenario:**
- You want to analyze reputer performance at a particular block to understand how their contributions impacted the topic.
----
+**Practical Applications**:
+- Reputer performance evaluation
+- Historical accuracy assessment
+- Network quality analysis
+
+## Common Analysis Workflows
+
+### Topic Due Diligence
+
+**Assessment Process**:
+1. **Verify existence**: Use `topic-exists` to confirm topic is available
+2. **Check activity**: Use `is-topic-active` to ensure participation eligibility
+3. **Analyze stakes**: Use stake commands to understand participant commitment
+4. **Review performance**: Use inference and score commands for quality assessment
+
+### Investment Analysis
+
+**Research Workflow**:
+1. **Economic evaluation**: Use `topic-fee-revenue` to assess revenue potential
+2. **Weight analysis**: Use `previous-topic-weight` to understand competitiveness
+3. **Participant analysis**: Use stake queries to evaluate community engagement
+4. **Historical performance**: Use block-specific queries for trend analysis
+
+### Network Research
+
+**Study Applications**:
+- Compare topic performance across different time periods
+- Analyze stake distribution patterns and concentration
+- Study forecaster and reputer accuracy trends
+- Identify high-performing topics for model development
+
+## Next Steps
+
+- [Learn how to create topics](/devs/topic-creators/how-to-create-topic) with optimal parameters
+- [Understand topic lifecycle](/devs/topic-creators/topic-life-cycle) for better management
+- [Explore worker deployment](/devs/workers/deploy-worker/using-docker) to participate in topics
diff --git a/pages/devs/topic-creators/topic-life-cycle.mdx b/pages/devs/topic-creators/topic-life-cycle.mdx
index d5ede3c..1507878 100644
--- a/pages/devs/topic-creators/topic-life-cycle.mdx
+++ b/pages/devs/topic-creators/topic-life-cycle.mdx
@@ -1,100 +1,185 @@
# Topic Life Cycle
-The Topic Life Cycle in the Allora Network is a dynamic process that determines the stages a topic goes through from creation to conclusion. These stages are influenced by various factors such as funding, popularity, and performance metrics. Understanding the life cycle of a topic is crucial for engaging with the network.
+## What You'll Learn
+- Understanding the complete lifecycle of topics from creation to conclusion
+- Key terms and concepts that govern topic behavior and transitions
+- Topic states and the conditions that determine state changes
+- Economic mechanisms including weight, effective revenue, and competitiveness
-## Key Terms and Concepts
+## Overview
-### Epoch Length
+**The Topic Life Cycle in the Allora Network is a dynamic process that determines the stages a topic goes through from creation to conclusion.** These stages are influenced by various factors such as funding, popularity, and performance metrics. Understanding the life cycle of a topic is crucial for engaging with the network.
-How often inferences are sampled and scored in the topic. Defined when [creating a topic](/devs/topic-creators/how-to-create-topic) as `EpochLength`.
+### Why Topic Lifecycle Matters
-### Epoch Last Ended
+Understanding the lifecycle helps you:
+- **Predict topic behavior**: Know when topics become active and how they evolve
+- **Optimize participation**: Time your involvement for maximum effectiveness
+- **Manage resources**: Allocate funding and stake strategically
+- **Plan strategies**: Develop long-term approaches to topic engagement
-The timestamp indicating when the last epoch ended, important for tracking topic activity.
+## Core Concepts and Terminology
-### Ground Truth Lag
+### Timing and Scheduling
-The amount of time into the future a specific inference is calculating for. Defined when [creating a topic](/devs/topic-creators/how-to-create-topic) as `GroundTruthLag`.
+#### Epoch Length
-E.g. "Every 15 minutes, provide BTC prediction for 1 day in the future":
-- 10 min - EpochLength
-- 1 day - GroundTruthLag
+**Definition**: How often inferences are sampled and scored in the topic. Defined when [creating a topic](/devs/topic-creators/how-to-create-topic) as `EpochLength`.
-### Nonce
+**Practical Impact**:
+- Determines prediction frequency
+- Affects participant workload and resource requirements
+- Influences topic competitiveness through activity levels
-The block height at which a given outbound request from network validators is made. Nonces ensure that responses are correctly paired with their requests to facilitate accurate reward distribution and loss calculation.
+#### Epoch Last Ended
-Every topic will inevitably generate multiple worker and reputer requests, each needing to be matched with rewards for participants. The blockchain must differentiate between responses still pending rewards and those already rewarded, and reputers must identify which worker payloads to use for loss calculations. This requires uniquely identifying each outbound request.
+**Definition**: The timestamp indicating when the last epoch ended, important for tracking topic activity.
-The same nonce value will be used to fulfill a complete work and reputation cycle:
+**Use Cases**:
+- Monitoring topic activity patterns
+- Calculating time since last network interaction
+- Planning participation timing
-1. A request for inferences and forecasts using a particular nonce is issued first.
-2. Once the workers have submitted their work, the worker nonce is fulfilled and a reputer nonce is created using the same value.
-3. This reputer nonce will be processed when appropriate, triggering a reputation request.
-4. When the reputers respond by submitting their work, the reputer nonce is also fulfilled, ending its cycle.
+#### Ground Truth Lag
+
+**Definition**: The amount of time into the future a specific inference is calculating for. Defined when [creating a topic](/devs/topic-creators/how-to-create-topic) as `GroundTruthLag`.
+
+**Example**: "Every 15 minutes, provide BTC prediction for 1 day in the future":
+- **10 min**: EpochLength
+- **1 day**: GroundTruthLag
+
+**Strategic Importance**:
+- Determines when predictions can be evaluated
+- Affects topic activation timing
+- Influences participant reward cycles
+
+### Network Coordination
+
+#### Nonce
+
+**Definition**: The block height at which a given outbound request from network validators is made. Nonces ensure that responses are correctly paired with their requests to facilitate accurate reward distribution and loss calculation.
+
+**Why Nonces Matter**: Every topic will inevitably generate multiple worker and reputer requests, each needing to be matched with rewards for participants. The blockchain must differentiate between responses still pending rewards and those already rewarded, and reputers must identify which worker payloads to use for loss calculations. This requires uniquely identifying each outbound request.
+
+**Nonce Lifecycle**: The same nonce value will be used to fulfill a complete work and reputation cycle:
+
+1. **Request Phase**: A request for inferences and forecasts using a particular nonce is issued first.
+2. **Worker Response**: Once the workers have submitted their work, the worker nonce is fulfilled and a reputer nonce is created using the same value.
+3. **Evaluation Phase**: This reputer nonce will be processed when appropriate, triggering a reputation request.
+4. **Completion**: When the reputers respond by submitting their work, the reputer nonce is also fulfilled, ending its cycle.
+
+## Economic Mechanisms
### Topic Competitiveness
-Competitiveness in the Allora Network refers to a topic's ability to attract and retain funding, stakes, and participation relative to other topics. A competitive topic has the following characteristics:
+**Definition**: Competitiveness in the Allora Network refers to a topic's ability to attract and retain funding, stakes, and participation relative to other topics.
+**Characteristics of Competitive Topics**:
- **High Effective Revenue**: A greater accumulation of revenue indicates strong interest.
- **Significant Stake**: Large amounts of reputer and delegated stakes signify confidence in the topic's value.
-Both of these metrics are a function of [weight](/devs/topic-creators/topic-life-cycle#weight), which proxies overall participation and ultimately topic competitiveness.
+**Key Insight**: Both of these metrics are a function of [weight](/devs/topic-creators/topic-life-cycle#weight), which proxies overall participation and ultimately topic competitiveness.
### Effective Revenue
-Effective Revenue is the measure of the impact that the total accrued revenue has on a topic's weight. It determines how much influence the revenue has on making a topic active and competitive.
-
-- Initially, Effective Revenue equals the total amount of money a topic accrues before the first epoch.
-- Once a topic becomes active, funds from Effective Revenue are used, impacting the ecosystem bucket.
+**Definition**: Effective Revenue is the measure of the impact that the total accrued revenue has on a topic's weight. It determines how much influence the revenue has on making a topic active and competitive.
-The Effective Revenue drips over time, reflecting the topic's diminishing competitiveness relative to other topics.
+**Revenue Mechanics**:
+- **Initial State**: Initially, Effective Revenue equals the total amount of money a topic accrues before the first epoch.
+- **Active Operation**: Once a topic becomes active, funds from Effective Revenue are used, impacting the ecosystem bucket.
+- **Time Decay**: The Effective Revenue drips over time, reflecting the topic's diminishing competitiveness relative to other topics.
### Ecosystem Bucket
-The Ecosystem Bucket is a mechanism that distributes a portion of the total funds at a rate (approximately 10%) that decreases exponentially over time. This bucket serves as a comparative baseline for topic competitiveness. The effective revenue of a topic needs to be balanced with the ecosystem bucket to ensure the topic's competitiveness.
+**Definition**: The Ecosystem Bucket is a mechanism that distributes a portion of the total funds at a rate (approximately 10%) that decreases exponentially over time. This bucket serves as a comparative baseline for topic competitiveness. The effective revenue of a topic needs to be balanced with the ecosystem bucket to ensure the topic's competitiveness.
-- The bucket holds the money and drips at a certain rate.
-- This rate is uncoupled from the effective revenue drip to avoid complex calculations to determine how much effective revenue the topic actually has remaining.
-- It provides an estimation but doesn’t have a bearing on the total amount of money dripped from the ecosystem, ensuring financial safety.
+**Bucket Mechanics**:
+- **Fund Management**: The bucket holds the money and drips at a certain rate.
+- **Independent Operation**: This rate is uncoupled from the effective revenue drip to avoid complex calculations to determine how much effective revenue the topic actually has remaining.
+- **Estimation Tool**: It provides an estimation but doesn't have a bearing on the total amount of money dripped from the ecosystem, ensuring financial safety.
### Weight
-Weight is a measure of a topic's competitiveness within the blockchain network. It is a function of the combined stake of reputers (including delegated stakes) and the topic's Effective Revenue. The weight of a topic determines its likelihood of becoming active and indirectly influences the distribution of rewards and resources within the network.
+**Definition**: Weight is a measure of a topic's competitiveness within the blockchain network. It is a function of the combined stake of reputers (including delegated stakes) and the topic's Effective Revenue. The weight of a topic determines its likelihood of becoming active and indirectly influences the distribution of rewards and resources within the network.
-- Higher weight signifies greater competitiveness.
-- Driven by the total stake and the impact of effective revenue.
+**Weight Characteristics**:
+- **Competitiveness Indicator**: Higher weight signifies greater competitiveness.
+- **Multi-Factor Calculation**: Driven by the total stake and the impact of effective revenue.
+- **Dynamic Nature**: Changes based on participant behavior and funding levels
## Topic States
-### Inactive
+### Inactive State
+
+**Definition**: A topic is inactive after it is created but before it becomes sufficiently funded.
-A topic is inactive after it is created but before it becomes sufficiently funded.
+**Characteristics**:
+- Newly created topics start in this state
+- No network activity or inference requests
+- Waiting for sufficient funding and participation
-### Active
+**Transition Requirements**: Topics move out of inactive state when they achieve sufficient weight through funding and staking.
-A topic becomes active once it is sufficiently funded. A topic is sufficiently funded once it has more than a threshold amount of weight, which is a function of the amount of:
+### Active State
+
+**Definition**: A topic becomes active once it is sufficiently funded. A topic is sufficiently funded once it has more than a threshold amount of weight, which is a function of the amount of:
- [Reputer stake](/devs/reputers/set-and-adjust-stake) placed in the topic
- Delegated stake
- Effective revenue garnered by the topic
-Different actors can permissionlessly [fund a topic](/devs/reference/allorad#send-funds-to-a-topic-to-pay-for-inferences) using the `allorad` CLI tool.
+**Funding Mechanisms**: Different actors can permissionlessly [fund a topic](/devs/reference/allorad#send-funds-to-a-topic-to-pay-for-inferences) using the `allorad` CLI tool.
+
+**Active State Benefits**:
+- Topic becomes eligible for network processing
+- Can receive inference and reputation requests
+- Participants can earn rewards from contributions
-### Churnable
+### Churnable State
-A topic becomes churnable once it is:
+**Definition**: A topic becomes churnable once it is:
- Active
- One of the top topics by weight (descending)
- The topic's `EpochLength` has passed since its inception or last epoch
-Once a topic is churnable, the chain can emit worker (and eventually reputer) requests to topic workers and reputers, respectively.
+**Churnable Operations**: Once a topic is churnable, the chain can emit worker (and eventually reputer) requests to topic workers and reputers, respectively.
-Reputer requests start after a topic's `GroundTruthLag` amount of blocks have passed. Once worker and reputer responses are fulfilled, the topic becomes _churned_.
+**Processing Timeline**: Reputer requests start after a topic's `GroundTruthLag` amount of blocks have passed. Once worker and reputer responses are fulfilled, the topic becomes _churned_.
-### Rewardable
+### Rewardable State
-A topic is rewardable once:
+**Definition**: A topic is rewardable once:
- It has been churned
- It has fulfilled worker and reputer requests
- It is ready to have its rewards calculated
+
+**Final Stage**: This represents the completion of a full topic cycle, where all participants can receive their earned rewards.
+
+## State Transition Strategy
+
+### For Topic Creators
+
+**Activation Strategy**:
+1. **Initial Funding**: Provide sufficient effective revenue to reach active state
+2. **Stake Coordination**: Encourage reputers to stake on your topic
+3. **Competitive Positioning**: Monitor weight relative to other topics
+
+### For Participants
+
+**Timing Considerations**:
+- **Workers**: Join when topics are churnable for regular inference requests
+- **Reputers**: Stake early to influence topic activation and competitiveness
+- **Delegators**: Support topics with strong fundamentals and good management
+
+### Monitoring and Management
+
+**Key Metrics to Track**:
+- Topic weight and ranking among active topics
+- Effective revenue levels and decay rates
+- Participation levels and engagement quality
+- State transition timing and patterns
+
+## Next Steps
+
+- [Learn to query topic data](/devs/topic-creators/query-topic-data) for lifecycle monitoring
+- [Understand how to create topics](/devs/topic-creators/how-to-create-topic) with optimal parameters
+- [Explore reputer staking](/devs/reputers/set-and-adjust-stake) to influence topic weight
diff --git a/pages/devs/validators.mdx b/pages/devs/validators.mdx
index 1bc4364..2cbb7b6 100644
--- a/pages/devs/validators.mdx
+++ b/pages/devs/validators.mdx
@@ -1,41 +1,168 @@
# Validators
-Validators maintain the security and integrity of the Allora appchain.
+## What You'll Learn
+- Understanding the critical role of validators in Allora appchain security
+- How validators secure the chain through delegated proof of stake and CometBFT
+- Distinction between topic security and chain security mechanisms
+- Validator responsibilities including transaction validation and consensus participation
-## What do Validators do?
+## Overview
-### Secure Chain with Stake
+**Validators maintain the security and integrity of the Allora appchain.**
-Validators secure the Allora appchain by staking tokens in a delegated proof of stake system through CometBFT. The more a validator stakes, the greater their influence on the overall security and consensus of the blockchain.
+### Why Validators Are Essential
-Similarly, stakeholders can delegate their tokens to validators, further enhancing the security and reliability of the chain.
+**Core Security Functions**:
+- **Blockchain security**: Maintain the fundamental integrity of the network
+- **Consensus participation**: Ensure agreement on network state and transactions
+- **Decentralization**: Distribute network control across multiple participants
+- **Economic incentives**: Provide rewards for honest behavior and network maintenance
-#### Topic Security vs Chain Security
+## Core Validator Functions
-- *Topic security* is a subset of *chain security*.
+### 1. Secure Chain with Stake
+
+**Validators secure the Allora appchain by staking tokens in a delegated proof of stake system through CometBFT.** The more a validator stakes, the greater their influence on the overall security and consensus of the blockchain.
+
+**Staking Mechanism Benefits**:
+- **Economic commitment**: Financial investment ensures aligned incentives
+- **Proportional influence**: Higher stakes provide greater consensus weight
+- **Security scaling**: More stake increases overall network security
+- **Attack prevention**: Economic cost makes malicious behavior expensive
+
+**Similarly, stakeholders can delegate their tokens to validators, further enhancing the security and reliability of the chain.**
+
+**Delegation Advantages**:
+- **Increased participation**: Allow token holders to contribute without running validators
+- **Enhanced security**: Aggregate more stake for stronger network protection
+- **Risk distribution**: Spread security responsibility across the community
+- **Economic opportunities**: Provide staking rewards for delegators
+
+#### Security Architecture
+
+**Topic Security vs Chain Security**:
+
+- **Topic security** is a subset of **chain security**.
- If the underlying state is corrupted, topic security is compromised.
- One can have chain security without topic security if:
- Validators are generally honest (weighted by stake).
- Reputers of a specific topic are generally dishonest (weighted by stake).
+**Security Layer Analysis**:
+- **Foundational layer**: Chain security provides the base layer of trust
+- **Application layer**: Topic security operates on top of chain security
+- **Independence principle**: Chain security can exist independently of topic-level issues
+- **Cascade effect**: Chain security failures affect all topic-level operations
+
+### 2. Validate Transactions
+
+**Validators validate transactions and blocks, ensuring that all transactions are legitimate and conform to the rules of the blockchain.**
+
+**Validation Process**:
+- **Transaction verification**: Check digital signatures and account balances
+- **Rule enforcement**: Ensure transactions follow network protocols
+- **Block construction**: Assemble valid transactions into ordered blocks
+- **State transitions**: Maintain accurate network state across all operations
+
+**Quality Assurance**:
+- **Fraud prevention**: Reject invalid or malicious transactions
+- **Network integrity**: Maintain consistent state across all nodes
+- **Protocol compliance**: Enforce network rules and consensus mechanisms
+- **Performance optimization**: Process transactions efficiently and reliably
+
+### 3. Participate in Consensus
+
+**Validators participate in the consensus mechanism of the appchain, running CometBFT.** By participating in the consensus, validators collectively agree on the state of the blockchain.
+
+**Consensus Benefits**:
+- **Agreement protocol**: Ensure all validators reach the same conclusions
+- **Byzantine fault tolerance**: Handle malicious or faulty validator behavior
+- **Finality guarantee**: Provide certainty that transactions are permanent
+- **Network coordination**: Synchronize state across distributed network
-### Validate Transactions
+**CometBFT Features**:
+- **Proven technology**: Battle-tested consensus algorithm from Cosmos ecosystem
+- **Performance optimization**: Fast finality and high transaction throughput
+- **Security guarantees**: Mathematical proofs of safety and liveness
+- **Flexibility**: Adaptable to various blockchain application needs
-Validators validate transactions and blocks, ensuring that all transactions are legitimate and conform to the rules of the blockchain
+### 4. Receive Rewards
-### Participate in Consensus
+**Validators [receive rewards]() based on the amount of stake they hold or have delegated to them.**
-Validators participate in the consensus mechanism of the appchain, running CometBFT. By participating in the consensus, validators collectively agree on the state of the blockchain.
+**Reward Structure**:
+- **Performance-based**: Rewards correlate with accurate validation and uptime
+- **Stake-weighted**: Higher stakes receive proportionally larger rewards
+- **Shared benefits**: Delegators receive portions of validator rewards
+- **Economic sustainability**: Balanced incentives ensure long-term participation
-### Receive Rewards
+## Validator Economics
-Validators [receive rewards]() based on the amount of stake they hold or have delegated to them.
+### Staking Economics
+
+**Financial Considerations**:
+- **Initial investment**: Significant stake required for validator operations
+- **Ongoing costs**: Infrastructure, maintenance, and operational expenses
+- **Revenue potential**: Block rewards and transaction fees
+- **Risk factors**: Slashing penalties for misbehavior or downtime
+
+### Delegation Model
+
+**Community Participation**:
+- **Lower barriers**: Delegators can participate without running validators
+- **Risk sharing**: Distribute both rewards and potential penalties
+- **Network effects**: More participants increase overall security
+- **Democratic governance**: Broader community involvement in network decisions
+
+## Getting Started as a Validator
+
+### Prerequisites Assessment
+
+**Technical Requirements**:
+- **Infrastructure**: Reliable hardware and network connectivity
+- **Technical knowledge**: Understanding of blockchain operations and CometBFT
+- **Financial resources**: Sufficient stake and operational funding
+- **Time commitment**: Ongoing monitoring and maintenance responsibilities
+
+### Operational Considerations
+
+**Best Practices**:
+- **High availability**: Maintain consistent uptime for network participation
+- **Security measures**: Protect validator keys and infrastructure
+- **Performance monitoring**: Track validator metrics and network health
+- **Community engagement**: Build relationships with delegators and other validators
## Learn More
-Test run a validator of the Allora appchain by following the instructions [here](/devs/validators/run-full-node).
+**Test run a validator of the Allora appchain by following the instructions [here](/devs/validators/run-full-node).**
+
+**Educational Resources**:
+- **Hands-on experience**: Practice validator operations in test environment
+- **Technical documentation**: Comprehensive setup and configuration guides
+- **Community support**: Access to validator forums and assistance
+- **Best practices**: Learn from experienced validator operators
+
+**CometBFT can be explored in the following two articles, among many other places:**
+
+- **[Staking and Delegation in Cosmos](https://medium.com/@notional-ventures/staking-and-delegation-in-cosmos-db660154bcf9)**
+- **[CometBFT: Security and Consensus in Cosmos](https://medium.com/@notional-ventures/cometbft-security-and-consensus-in-cosmos-part-1-a7be84f0bf25)**
+
+**Deep Dive Topics**:
+- **Consensus mechanisms**: Understand the mathematical foundations of blockchain security
+- **Economic models**: Learn about staking economics and validator incentives
+- **Technical architecture**: Explore the engineering behind distributed consensus
+- **Network governance**: Understand validator roles in network decision-making
+
+## Prerequisites
+
+- **Technical expertise**: Strong understanding of blockchain technology and operations
+- **Infrastructure resources**: Reliable servers, networking, and monitoring systems
+- **Financial commitment**: Significant stake for validator operations and security
+- **Time availability**: Ongoing maintenance and network participation requirements
-CometBFT can be explored in the following two articles, among many other places:
+## Next Steps
-- [Staking and Delegation in Cosmos](https://medium.com/@notional-ventures/staking-and-delegation-in-cosmos-db660154bcf9)
-- [CometBFT: Security and Consensus in Cosmos](https://medium.com/@notional-ventures/cometbft-security-and-consensus-in-cosmos-part-1-a7be84f0bf25)
\ No newline at end of file
+- [Learn validator system requirements](/devs/validators/nop-requirements) for infrastructure planning
+- [Explore full node operations](/devs/validators/run-full-node) for hands-on experience
+- [Understand validator staking](/devs/validators/stake-a-validator) for economic participation
+- [Study validator operations](/devs/validators/validator-operations) for ongoing management
\ No newline at end of file
diff --git a/pages/devs/validators/deploy-chain.mdx b/pages/devs/validators/deploy-chain.mdx
index f90c0c5..4b1e92e 100644
--- a/pages/devs/validators/deploy-chain.mdx
+++ b/pages/devs/validators/deploy-chain.mdx
@@ -1,60 +1,161 @@
# Deploy Allora Appchain
-> We discuss the settlement layer for the Allora Network and how to deploy it
+## What You'll Learn
+- Understanding the Allora Appchain as the settlement layer for the network
+- How different network actors interact with the appchain and why
+- Complete deployment options using Docker Compose or Kubernetes with Helm
+- Managing chain parameters for testnets and development environments
-## What is the Appchain?
+## Overview
-The Allora Appchain is a Cosmos SDK appchain that serves as the settlement layer for the Allora Network. It serves to coordinate all incentives for all actors:
+> **We discuss the settlement layer for the Allora Network and how to deploy it**
-- The weights between reputers and workers, as well as a reference to the logic used to update those weights, are stored on-chain.
-- Rewards payable from inflation are calculated based on those weights at a global cadence on-chain.
-- Consumers pay for inferences to be collected and for all the above calculations to run. These funds get allocated to workers and reputers, respectively.
+**The Allora Appchain serves as the fundamental infrastructure layer that coordinates all network activities and incentives.** Understanding and deploying the appchain is essential for network operators, validators, and infrastructure providers.
-The appchain also coordinates actions between protocol actors.
+### Why Deploy the Appchain?
-- The appchain triggers requests to workers and reputers to collect inferences and run loss-calculation logic, respectively, as per each topic's respective inference and loss-calculation cadence.
-- The appchain collects a recent history of inferences in batches to later be scored by loss-calculation.
+**Infrastructure Control**:
+- **Network customization**: Deploy custom network configurations for specific use cases
+- **Development environments**: Create isolated environments for development and testing
+- **Infrastructure ownership**: Maintain control over blockchain infrastructure and operations
+- **Service provision**: Offer blockchain infrastructure services to network participants
-## Why and How might one interact with the Allora Appchain?
+## Architecture Overview
-Different actors interact with the Allora Appchain for different reasons. They do so via a standard client connection (such as [CosmJS](https://tutorials.cosmos.network/tutorials/7-cosmjs/1-cosmjs-intro.html)) or the [Appchain CLI](/devs/get-started/cli#installing-allorad).
+### What is the Appchain?
-- Data scientists interact with the Appchain to [register their worker nodes](/devs/reference/allorad#register-network-actor) and to [withdraw rewards](/devs/reference/allorad#remove-stake-from-a-topic) accrued for their inferences. These rewards are paid by both consumers and inflation based on their relative weight.
-- Developers interact with the Appchain to [create topics](/devs/reference/allorad#create-new-topic), fund topics, and perhaps also to [read recent inferences](/devs/reference/allorad#get-the-latest-network-inferences-and-weights-for-a-topic).
-- Validators run the Appchain and receive standard inflationary rewards for running Cosmos SDK appchains and a cut of the funds from consumers. They will also [register themselves](/devs/validators/stake-a-validator) on the Appchain so that they can be eligible for rewards.
+**The Allora Appchain is a Cosmos SDK appchain that serves as the settlement layer for the Allora Network.** It serves to coordinate all incentives for all actors:
-## Dependencies
+#### Core Functions
-- Create a set of keys and initialize genesis. See example in `scripts/init.sh`.
-- The script `scripts/l1_node.sh` is provided too, to facilitate configuration and maintenance of the node when connecting it to a network, downloading genesis,
+**Incentive Coordination**:
+- **The weights between reputers and workers, as well as a reference to the logic used to update those weights, are stored on-chain.**
+- **Rewards payable from inflation are calculated based on those weights at a global cadence on-chain.**
+- **Consumers pay for inferences to be collected and for all the above calculations to run. These funds get allocated to workers and reputers, respectively.**
-## Deploy with docker-compose
+**The appchain also coordinates actions between protocol actors.**
-There is a `docker-compose.yml` provided that sets up a validator node.
+**Protocol Coordination**:
+- **The appchain triggers requests to workers and reputers to collect inferences and run loss-calculation logic, respectively, as per each topic's respective inference and loss-calculation cadence.**
+- **The appchain collects a recent history of inferences in batches to later be scored by loss-calculation.**
-### Run
+#### Technical Architecture Benefits
-Once this is set up, run `docker compose up`.
+**Cosmos SDK Foundation**:
+- **Proven technology**: Built on battle-tested Cosmos SDK framework
+- **Interoperability**: Compatible with Cosmos ecosystem and IBC protocol
+- **Governance**: Built-in governance mechanisms for parameter management
+- **Security**: Inherits security model from Cosmos SDK and Tendermint consensus
-## Deploy in k8s with helm chart
+**Settlement Layer Design**:
+- **Finality**: Provides final settlement for all network transactions
+- **Transparency**: All weights, rewards, and incentives are publicly verifiable
+- **Efficiency**: Optimized for batch processing of inference data and rewards
+- **Scalability**: Designed to handle high volumes of AI/ML network activity
-Upshot team uses a [universal-helm](https://upshot-tech.github.io/helm-charts/) chart to deploy applications into kubernetes clusters.
-There is a `index-provider/values.yaml` provided that sets up one head node and one worker node.
+## Network Interaction Patterns
+
+### Why and How might one interact with the Allora Appchain?
+
+**Different actors interact with the Allora Appchain for different reasons.** They do so via a standard client connection (such as [CosmJS](https://tutorials.cosmos.network/tutorials/7-cosmjs/1-cosmjs-intro.html)) or the [Appchain CLI](/devs/get-started/quick-start#install-the-allora-cli).
+
+#### Data Scientists (Workers)
+
+**Worker Interactions**:
+- **Data scientists interact with the Appchain to [register their worker nodes](/devs/reference/allorad#register-network-actor) and to [withdraw rewards](/devs/reference/allorad#remove-stake-from-a-topic) accrued for their inferences.** These rewards are paid by both consumers and inflation based on their relative weight.
+
+**Worker Benefits**:
+- **Registration process**: Secure on-chain registration for network participation
+- **Reward claiming**: Direct access to earned rewards from inference provision
+- **Performance tracking**: On-chain record of worker performance and weights
+- **Economic participation**: Transparent reward system based on inference quality
+
+#### Developers (Consumers)
+
+**Developer Interactions**:
+- **Developers interact with the Appchain to [create topics](/devs/reference/allorad#create-new-topic), fund topics, and perhaps also to [read recent inferences](/devs/reference/allorad#get-the-latest-network-inferences-and-weights-for-a-topic).**
+
+**Developer Benefits**:
+- **Topic creation**: Establish new prediction markets and inference categories
+- **Funding management**: Control topic funding and inference request economics
+- **Data access**: Real-time access to network inferences and performance data
+- **Integration support**: Direct blockchain integration for application development
+
+#### Validators
+
+**Validator Interactions**:
+- **Validators run the Appchain and receive standard inflationary rewards for running Cosmos SDK appchains and a cut of the funds from consumers.** They will also [register themselves](/devs/validators/stake-a-validator) on the Appchain so that they can be eligible for rewards.
+
+**Validator Benefits**:
+- **Economic rewards**: Earn both inflation rewards and transaction fees
+- **Network security**: Contribute to network security and consensus
+- **Infrastructure provision**: Provide essential blockchain infrastructure services
+- **Governance participation**: Participate in network governance and parameter updates
+
+## Deployment Prerequisites
### Dependencies
-- You need to have configured `kubeconfig` file on the computer to connect to the cluster and deploy the node.
+**Setup Requirements**:
+- **Create a set of keys and initialize genesis.** See example in `scripts/init.sh`.
+- **The script `scripts/l1_node.sh` is provided too, to facilitate configuration and maintenance of the node when connecting it to a network, downloading genesis.**
+
+**Preparation Benefits**:
+- **Key management**: Secure generation and management of validator keys
+- **Genesis setup**: Proper initialization of blockchain state
+- **Network configuration**: Automated setup for network connectivity
+- **Maintenance automation**: Scripts for ongoing node management
+
+## Deployment Options
+
+### Option 1: Deploy with Docker Compose
+
+**There is a `docker-compose.yml` provided that sets up a validator node.**
+
+#### Docker Deployment Process
+
+##### Run
+**Once this is set up, run `docker compose up`.**
+
+**Docker Benefits**:
+- **Simplified deployment**: Single command deployment with all dependencies
+- **Container isolation**: Isolated environment prevents conflicts with host system
+- **Configuration management**: Centralized configuration through Docker Compose
+- **Service orchestration**: Automatic coordination of all required services
+
+**Docker Use Cases**:
+- **Development environments**: Quick setup for development and testing
+- **Single-node deployment**: Simple deployment for individual validator nodes
+- **Container infrastructure**: Integration with existing Docker-based infrastructure
+- **Rapid prototyping**: Fast setup for experimentation and proof-of-concept
+
+### Option 2: Deploy in Kubernetes with Helm Chart
+
+**Upshot team uses a [universal-helm](https://upshot-tech.github.io/helm-charts/) chart to deploy applications into kubernetes clusters.**
+**There is a `index-provider/values.yaml` provided that sets up one head node and one worker node.**
+
+#### Kubernetes Prerequisites
+
+##### Dependencies
+**You need to have configured `kubeconfig` file on the computer to connect to the cluster and deploy the node.**
-### Deploy with the Helm Chart
+**Kubernetes Requirements**:
+- **Cluster access**: Properly configured kubeconfig for cluster authentication
+- **Helm client**: Helm package manager installed and configured
+- **Cluster permissions**: Adequate permissions for application deployment
+- **Network connectivity**: Stable connection to Kubernetes cluster
-1. Add upshot Helm chart repo:
+#### Kubernetes Deployment Process
+
+##### Deploy with the Helm Chart
+
+1. **Add upshot Helm chart repo:**
```bash
helm repo add upshot https://upshot-tech.github.io/helm-charts
-
```
-2. Install helm chart with the given values file:
+2. **Install helm chart with the given values file:**
```bash
helm install \
@@ -63,10 +164,88 @@ helm install \
-f appchain/values.yaml
```
+**Kubernetes Benefits**:
+- **Production scalability**: Designed for production-grade deployments
+- **High availability**: Built-in redundancy and failover capabilities
+- **Resource management**: Automated resource allocation and scaling
+- **Monitoring integration**: Native integration with Kubernetes monitoring
+
+**Kubernetes Use Cases**:
+- **Production networks**: Enterprise-grade deployment for live networks
+- **Multi-node clusters**: Coordinated deployment of multiple validator nodes
+- **Cloud infrastructure**: Integration with cloud-native deployment pipelines
+- **Enterprise environments**: Compliance with enterprise infrastructure requirements
+
+## Network Management
+
### Edit Chain Parameters
-The public mainnet uses standard cosmos governance modules to vote on global network parameters (such as reward epoch time in blocks, for example). For testnets and devnets, however, you can use the following allorad CLI command to set the global parameters of the blockchain if you are whitelisted to do so. The parameters below are just example values:
+**The public mainnet uses standard cosmos governance modules to vote on global network parameters (such as reward epoch time in blocks, for example).** For testnets and devnets, however, you can use the following allorad CLI command to set the global parameters of the blockchain if you are whitelisted to do so. The parameters below are just example values:
-```Text bash
+```bash
allorad tx emissions update-params "$VALIDATOR_KEY_FOR_TX_SEND" '{"version":["v0.0.4"], "min_topic_weight":["5"], "max_topics_per_block":[50]}'
```
+
+#### Parameter Management Strategy
+
+**Governance Models**:
+- **Mainnet governance**: Democratic governance through token holder voting
+- **Testnet flexibility**: Direct parameter updates for development and testing
+- **Access control**: Whitelist system for testnet parameter management
+- **Example parameters**: Version, minimum topic weight, and maximum topics per block
+
+**Parameter Categories**:
+- **Network performance**: Parameters affecting throughput and efficiency
+- **Economic policy**: Settings controlling rewards and incentives
+- **Quality control**: Parameters ensuring network quality standards
+- **System limits**: Constraints preventing network abuse and overload
+
+## Deployment Best Practices
+
+### Infrastructure Planning
+
+**Deployment Strategy**:
+- **Environment selection**: Choose appropriate deployment method based on use case
+- **Resource allocation**: Plan adequate computing resources for validator operations
+- **Network connectivity**: Ensure stable internet connection and proper networking
+- **Security considerations**: Implement proper security measures for key management
+
+### Monitoring and Maintenance
+
+**Operational Excellence**:
+- **Health monitoring**: Set up monitoring for node health and performance
+- **Log management**: Implement comprehensive logging for troubleshooting
+- **Backup procedures**: Maintain backups of critical configuration and state data
+- **Update procedures**: Plan for software updates and network upgrades
+
+## Troubleshooting
+
+### Common Issues
+
+**Deployment Problems**:
+- **Key generation**: Ensure proper key generation and management
+- **Network connectivity**: Verify network access and peer connectivity
+- **Configuration errors**: Validate configuration files and parameters
+- **Resource constraints**: Ensure adequate system resources for operation
+
+### Support Resources
+
+**Getting Help**:
+- **Documentation**: Comprehensive guides and references
+- **Community support**: Developer forums and community assistance
+- **Script examples**: Provided scripts for common operations
+- **Technical support**: Professional support for enterprise deployments
+
+## Prerequisites
+
+- **Blockchain operations**: Understanding of blockchain node operations and maintenance
+- **Container technology**: Familiarity with Docker and Kubernetes for deployment
+- **Network administration**: Basic network configuration and troubleshooting skills
+- **System administration**: Linux system administration and security practices
+
+## Next Steps
+
+- [Learn validator operations](/devs/validators/validator-operations) for ongoing node management
+- [Study system requirements](/devs/validators/nop-requirements) for infrastructure planning
+- [Explore the CLI reference](/devs/reference/allorad) for command-line operations
+- [Review staking procedures](/devs/validators/stake-a-validator) for validator registration
diff --git a/pages/devs/validators/nop-requirements.mdx b/pages/devs/validators/nop-requirements.mdx
index 1359415..c78eed9 100644
--- a/pages/devs/validators/nop-requirements.mdx
+++ b/pages/devs/validators/nop-requirements.mdx
@@ -1,34 +1,153 @@
# System Requirements
-You can use any modern Linux distribution to run an Allora validator.
+## What You'll Learn
+- Complete hardware and software requirements for running Allora validators
+- Understanding validator responsibilities and infrastructure requirements
+- How validators execute loss-calculation logic using WASM and IPFS
+- Current access requirements and application process for validator participation
+
+## Overview
+
+**You can use any modern Linux distribution to run an Allora validator.**
Internally we use **Debian 12** x86_64.
-## MAINNET and TESTNET validators' requirements:
+### Why These Requirements Matter
+
+**Infrastructure Importance**:
+- **Network reliability**: Robust hardware ensures consistent validator performance
+- **Consensus participation**: Adequate resources enable proper blockchain consensus
+- **Scalability**: Sufficient capacity handles growing network demands
+- **Security**: Professional-grade infrastructure protects network integrity
+
+## Hardware Specifications
+
+### Production Requirements
+
+**MAINNET and TESTNET validators' requirements:**
+
+- **CPU**: ≥6cores, ≥12 threads
+- **Memory**: ≥64GB
+- **Disk**: SSD or NVMe ≥1.92 TB total
+- **Bandwidth**: ≥1Gbit/s guaranteed
+
+#### Specification Rationale
+
+**CPU Requirements**:
+- **Multi-threading**: Parallel processing for consensus and WASM execution
+- **Performance**: Adequate processing power for real-time network operations
+- **Reliability**: Professional-grade processors for continuous operation
+
+**Memory Requirements**:
+- **Large datasets**: Handle blockchain state and historical data
+- **Caching**: Improve performance through in-memory data storage
+- **Future-proofing**: Accommodate network growth and increased activity
+
+**Storage Requirements**:
+- **High-speed access**: SSD/NVMe for fast data retrieval and blockchain operations
+- **Capacity planning**: Sufficient space for blockchain data and growth
+- **Reliability**: Enterprise-grade storage for data integrity
+
+**Network Requirements**:
+- **Guaranteed bandwidth**: Consistent network performance for consensus participation
+- **Latency optimization**: Fast communication with other validators
+- **Redundancy**: Multiple network connections for reliability
+
+## Access and Participation
+
+### Current Status
+
+**Note: Participating as a validator is temporarily allowed only for whitelisted accounts.** The Upshot Team currently has access to whitelisted addresses. We plan to make this action permissionless soon. In the meantime, those interested in becoming validators should reach out [here](https://docs.google.com/forms/d/e/1FAIpQLScj2rGAjFAAPZANrr2vZr_WAmLhniHn2x_l8K7EQcJ1i8XqHw/viewform).
+
+#### Application Process
+
+**How to Apply**:
+- **Submit application**: Complete the Google Form with validator information
+- **Wait for review**: Upshot Team evaluates applications for network suitability
+- **Receive whitelist**: Approved validators gain access to network participation
+- **Follow setup**: Complete validator deployment following technical documentation
+
+**Future Access**:
+- **Permissionless transition**: Network will become open to all qualified participants
+- **Democratic participation**: Removal of centralized approval requirements
+- **Community governance**: Validator participation based on technical and economic criteria
+
+## Validator Responsibilities
+
+### Core Functions
+
+**Validators are responsible for operating most of the infrastructure associated with instantiating the Allora Network. They do this in three ways:**
+
+1. **Staking in worker nodes (data scientists)** based on their confidence in said workers' abilities to produce accurate inferences.
+2. **Operating the appchain as Cosmos validators.**
+
+#### Operational Duties
+
+**Infrastructure Management**:
+- **Network security**: Maintain blockchain consensus and transaction validation
+- **Worker evaluation**: Assess and stake on promising data science participants
+- **System monitoring**: Ensure continuous uptime and performance optimization
+- **Community participation**: Engage in governance and network decision-making
+
+## Technical Architecture
+
+### WASM-Based Loss Calculation
+
+**Executing Loss-Calculation Logic Off-Chain:**
+
+**The topic-specific logic ran by validators is compiled to WASM and stored on IPFS.** Our appchain calls upon validators to execute this logic in every `topic.loss_cadence`-length epoch. Running this WASM involves querying for the following values:
+
+#### Data Requirements
+
+**WASM Execution Process**:
+- **Current set of losses** between reputers and workers
+- **Inferences from the past epoch**
+- **The revealed ground truth** consisting of up to `topic.inference_cadence/topic.loss_cadence`-many values. In other words, it entails one ground truth value for each inference cadence within the preceding loss-calculation epoch.
+
+#### Transaction Processing
+
+**The WASM also involves committing the new losses to the appchain in a transaction. This is ultimately done by one b7s node.**
+
+**Execution Benefits**:
+- **Cost efficiency**: Off-chain computation reduces validator operating expenses
+- **Flexibility**: Topic creators can write loss-calculation logic in any WASM-compatible language
+- **Maintainability**: Reduces need for frequent node software upgrades
+- **Scalability**: Module source code remains unchanged as new topics are added
-- CPU: ≥6cores, ≥12 threads
-- Memory: ≥64GB
-- Disk: SSD or NVMe ≥1.92 TB total
-- Bandwidth: ≥1Gbit/s guaranteed
+### Technical Advantages
-## Note
+**Off-Chain Processing Benefits**:
+- **Computing loss-calculation logic off-chain saves the network validators operating expenses** (because less is run on-chain)
+- **Allows topic creators to write loss-calculation logic in any language** (that compiles to WASM)
+- **Lessens the need for frequent node software upgrades** (because the module source code remains unchanged even as new topics are added, each with their specific loss-calculation schemes)
-Participating as a validator is temporarily allowed only for whitelisted accounts. The Upshot Team currently has access to whitelisted addresses. We plan to make this action permissionless soon. In the meantime, those interested in becoming validators should reach out [here](https://docs.google.com/forms/d/e/1FAIpQLScj2rGAjFAAPZANrr2vZr_WAmLhniHn2x_l8K7EQcJ1i8XqHw/viewform).
+## Planning and Preparation
-## Responsibilities
+### Infrastructure Assessment
-Validators are responsible for operating most of the infrastructure associated with instantiating the Allora Network. They do this in three ways:
+**Pre-Deployment Checklist**:
+- **Hardware procurement**: Ensure all system requirements are met or exceeded
+- **Network setup**: Configure high-speed, reliable internet connectivity
+- **Operating system**: Install and configure supported Linux distribution
+- **Security measures**: Implement firewalls, monitoring, and backup systems
-1. Staking in worker nodes (data scientists) based on their confidence in said workers' abilities to produce accurate inferences.
-2. Operating the appchain as Cosmos validators.
+### Cost Considerations
-## Executing Loss-Calculation Logic Off-Chain
+**Financial Planning**:
+- **Hardware investment**: Professional-grade servers and networking equipment
+- **Operational costs**: Electricity, internet, and facility expenses
+- **Maintenance budget**: Ongoing upgrades and support requirements
+- **Stake requirements**: Initial and ongoing token commitments
-The topic-specific logic ran by validators is compiled to WASM and stored on IPFS. Our appchain calls upon validators to execute this logic in every `topic.loss_cadence`-length epoch. Running this WASM involves querying for the following values:
+## Prerequisites
-- Current set of losses between reputers and workers
-- Inferences from the past epoch
-- The revealed ground truth consisting of up to `topic.inference_cadence/topic.loss_cadence`-many values. In other words, it entails one ground truth value for each inference cadence within the preceding loss-calculation epoch.
+- **Technical expertise**: Strong understanding of blockchain operations and Linux systems
+- **Infrastructure resources**: Professional-grade hardware and networking
+- **Financial commitment**: Significant investment in equipment and operations
+- **Time availability**: Ongoing monitoring and maintenance responsibilities
-The WASM also involves committing the new losses to the appchain in a transaction. This is ultimately done by one b7s node.
+## Next Steps
-Computing loss-calculation logic off-chain saves the network validators operating expenses (because less is run on-chain), allows topic creators to write loss-calculation logic in any language (that compiles to WASM), and lessens the need for frequent node software upgrades (because the module source code remains unchanged even as new topics are added, each with their specific loss-calculation schemes).
+- [Apply for validator whitelist](https://docs.google.com/forms/d/e/1FAIpQLScj2rGAjFAAPZANrr2vZr_WAmLhniHn2x_l8K7EQcJ1i8XqHw/viewform) to gain network access
+- [Learn to run a full node](/devs/validators/run-full-node) for hands-on experience
+- [Understand validator operations](/devs/validators/validator-operations) for ongoing management
+- [Study validator economics](/devs/validators/stake-a-validator) for financial planning
diff --git a/pages/devs/validators/run-full-node.mdx b/pages/devs/validators/run-full-node.mdx
index 26cb25e..a502190 100644
--- a/pages/devs/validators/run-full-node.mdx
+++ b/pages/devs/validators/run-full-node.mdx
@@ -2,47 +2,96 @@ import { Callout } from 'nextra/components'
# Running a full node
-> How to become a Validator on Allora
+## What You'll Learn
+- Complete setup process for running a full Allora Network validator node
+- Two deployment methods: systemd with cosmosvisor (recommended) and Docker Compose
+- Network configuration including genesis, peers, and security settings
+- Production-grade node management and monitoring best practices
-This guide provides instructions on how to run a full node for the Allora network. There are two primary methods for running an Allora node: using systemd with cosmosvisor for easier upgrade management (recommended) or using docker compose. It's important to choose the method that best suits your environment and needs.
+## Overview
+
+> **How to become a Validator on Allora**
+
+**This guide provides instructions on how to run a full node for the Allora network.** There are two primary methods for running an Allora node: using systemd with cosmosvisor for easier upgrade management (recommended) or using docker compose. It's important to choose the method that best suits your environment and needs.
+
+### Why Run a Full Node?
+
+**Network Participation**:
+- **Validator eligibility**: Full nodes can become validators with proper staking
+- **Network security**: Contribute to network decentralization and security
+- **Data access**: Direct access to all blockchain data without third-party dependencies
+- **Economic opportunity**: Earn rewards through validation and staking
+
+**Technical Benefits**:
+- **Complete control**: Full control over node configuration and operations
+- **Reliability**: Eliminate dependence on external RPC providers
+- **Performance**: Direct blockchain access with minimal latency
+- **Development support**: Essential for blockchain development and testing
***
## Prerequisites
-- Git
-- Go (version 1.21 or later)
-- Basic command-line knowledge
-- Linux/Unix environment with systemd
-- curl and jq utilities
+**System Requirements**:
+- **Git**: Version control system for repository management
+- **Go (version 1.21 or later)**: Programming language required for building binaries
+- **Basic command-line knowledge**: Familiarity with terminal operations
+- **Linux/Unix environment with systemd**: Production-grade operating system
+- **curl and jq utilities**: Command-line tools for data retrieval and processing
+
+**Infrastructure Considerations**:
+- **Adequate hardware**: Sufficient CPU, RAM, and storage for blockchain operations
+- **Reliable internet**: Stable, high-bandwidth connection for peer synchronization
+- **Security measures**: Firewall configuration and key management procedures
+- **Monitoring tools**: System monitoring for production node operations
***
## Method 1: Using systemd with cosmosvisor (Recommended)
-Running the Allora node with systemd and cosmosvisor provides production-grade reliability and easier binary upgrade management. This is the recommended approach for validators and production environments.
+**Running the Allora node with systemd and cosmosvisor provides production-grade reliability and easier binary upgrade management.** This is the recommended approach for validators and production environments.
+
+### Why Cosmosvisor is Recommended
+
+**Upgrade Management**:
+- **Automated upgrades**: Seamless binary upgrades without manual intervention
+- **Downtime minimization**: Reduces node downtime during network upgrades
+- **Rollback capability**: Ability to rollback to previous versions if needed
+- **Production reliability**: Battle-tested upgrade management for Cosmos networks
+
+**Operational Benefits**:
+- **Service management**: Integration with systemd for proper process management
+- **Monitoring support**: Better integration with system monitoring tools
+- **Log management**: Centralized logging through systemd journal
+- **Process supervision**: Automatic restart on failures and proper signal handling
### Step 1: Install cosmosvisor
-First, install cosmosvisor, which will manage binary upgrades:
+**First, install cosmosvisor, which will manage binary upgrades:**
```shell
go install cosmossdk.io/tools/cosmovisor/cmd/cosmovisor@latest
```
-Verify the installation:
+**Verify the installation:**
```shell
cosmovisor version
```
+**Installation Benefits**:
+- **Upgrade automation**: Automatic handling of chain upgrades and binary updates
+- **Version management**: Proper versioning and rollback capabilities
+- **Service integration**: Seamless integration with systemd service management
+- **Community standard**: Industry-standard tool for Cosmos SDK networks
+
### Step 2: Install allorad Binary
-Download the latest `allorad` binary from the releases page:
+**Download the latest `allorad` binary from the releases page:**
-1. Navigate to the [Allora Chain Releases page](https://github.com/allora-network/allora-chain/releases/latest).
-2. Download the `allorad` binary appropriate for your operating system (e.g., `allorad-linux-amd64`, `allorad-darwin-amd64`).
-3. Rename and move the binary to a standard location:
+1. **Navigate to the [Allora Chain Releases page](https://github.com/allora-network/allora-chain/releases/latest).**
+2. **Download the `allorad` binary appropriate for your operating system (e.g., `allorad-linux-amd64`, `allorad-darwin-amd64`).**
+3. **Rename and move the binary to a standard location:**
```shell
# Rename the downloaded binary
@@ -55,17 +104,29 @@ sudo mv ./allorad /usr/local/bin/allorad
sudo chmod +x /usr/local/bin/allorad
```
+**Binary Management**:
+- **Version verification**: Ensure you're running the correct binary version
+- **Path configuration**: Proper binary placement for system-wide access
+- **Security**: Appropriate permissions for binary execution
+- **Update process**: Clear process for binary updates and upgrades
+
### Step 3: Initialize the Node
-Initialize your node (replace `` with your desired node name):
+**Initialize your node (replace `` with your desired node name):**
```shell
allorad init --chain-id allora-testnet-1
```
+**Node Initialization Benefits**:
+- **Identity establishment**: Create unique node identity on the network
+- **Configuration setup**: Generate initial configuration files and directories
+- **Key generation**: Create validator keys for node operation
+- **Network preparation**: Prepare node for network connection and synchronization
+
### Step 4: Download Network Configuration
-Download the testnet configuration files:
+**Download the testnet configuration files:**
```shell
# Download genesis.json
@@ -78,9 +139,14 @@ curl -s https://raw.githubusercontent.com/allora-network/networks/main/allora-te
curl -s https://raw.githubusercontent.com/allora-network/networks/main/allora-testnet-1/app.toml > $HOME/.allorad/config/app.toml
```
+**Configuration File Purposes**:
+- **genesis.json**: Initial blockchain state and network parameters
+- **config.toml**: Node operational configuration and network settings
+- **app.toml**: Application-specific settings and feature configurations
+
### Step 5: Configure Seeds and Peers
-Configure seeds and persistent peers for network connectivity:
+**Configure seeds and persistent peers for network connectivity:**
```shell
# Fetch and set seeds
@@ -92,121 +158,69 @@ PEERS=$(curl -s https://raw.githubusercontent.com/allora-network/networks/main/a
sed -i.bak -e "s/^persistent_peers *=.*/persistent_peers = \"$PEERS\"/" $HOME/.allorad/config/config.toml
```
+**Network Connectivity Strategy**:
+- **Seed nodes**: Initial connection points for network discovery
+- **Persistent peers**: Reliable connections maintained throughout operation
+- **Network discovery**: Automatic peer discovery through existing connections
+- **Redundancy**: Multiple connection points for network reliability
+
### Step 6: Configure cosmosvisor
-Set up the cosmosvisor directory structure and environment:
+**Set up the cosmosvisor directory structure and environment:**
```shell
# Set environment variables
export DAEMON_NAME=allorad
export DAEMON_HOME=$HOME/.allorad
-export DAEMON_RESTART_AFTER_UPGRADE=true
# Create cosmosvisor directories
-mkdir -p $DAEMON_HOME/cosmovisor/genesis/bin
-mkdir -p $DAEMON_HOME/cosmovisor/upgrades
-
-# Copy the current binary to genesis
-cp /usr/local/bin/allorad $DAEMON_HOME/cosmovisor/genesis/bin/
+cosmovisor init $HOME/go/bin/allorad
```
-### Step 7: Configure State Sync (Optional but Recommended)
-
-State sync allows your node to quickly catch up with the network. Create and run this state sync script:
-
-```shell
-cat > state_sync.sh << 'EOF'
-#!/bin/bash
-
-set -e
-
-# Choose your preferred RPC endpoint
-SNAP_RPC="https://allora-rpc.testnet.allora.network"
-CONFIG_TOML_PATH="$HOME/.allorad/config/config.toml"
-
-echo "Using RPC Endpoint: $SNAP_RPC"
-echo "Fetching latest block height..."
+**Directory Structure Benefits**:
+- **Version management**: Organized binary versions for upgrade management
+- **Configuration isolation**: Separate configurations for different versions
+- **Backup capability**: Automatic backup of previous versions
+- **Service integration**: Proper directory structure for systemd service
-LATEST_HEIGHT=$(curl -s $SNAP_RPC/block | jq -r .result.block.header.height)
-if [ -z "$LATEST_HEIGHT" ] || [ "$LATEST_HEIGHT" == "null" ]; then
- echo "Error: Could not fetch latest height"
- exit 1
-fi
+### Step 7: Create systemd Service
-BLOCK_HEIGHT_OFFSET=2000
-BLOCK_HEIGHT=$((LATEST_HEIGHT - BLOCK_HEIGHT_OFFSET))
-
-echo "Fetching trust hash for block $BLOCK_HEIGHT..."
-TRUST_HASH=$(curl -s "$SNAP_RPC/block?height=$BLOCK_HEIGHT" | jq -r .result.block_id.hash)
-if [ -z "$TRUST_HASH" ] || [ "$TRUST_HASH" == "null" ]; then
- echo "Error: Could not fetch trust hash"
- exit 1
-fi
-
-echo "Updating config for state sync..."
-RPC_SERVERS="$SNAP_RPC,$SNAP_RPC"
-
-sed -i.bak -E \
- -e "s|^(enable[[:space:]]*=[[:space:]]*).*$|\\1true|" \
- -e "s|^(rpc_servers[[:space:]]*=[[:space:]]*).*$|\\1\"$RPC_SERVERS\"|" \
- -e "s|^(trust_height[[:space:]]*=[[:space:]]*).*$|\\1$BLOCK_HEIGHT|" \
- -e "s|^(trust_hash[[:space:]]*=[[:space:]]*).*$|\\1\"$TRUST_HASH\"|" \
- "$CONFIG_TOML_PATH"
-
-echo "State sync configuration updated successfully"
-EOF
-
-chmod +x state_sync.sh
-./state_sync.sh
-```
-
-### Step 8: Reset Node Data
-
-Reset existing data while keeping the address book:
-
-```shell
-allorad tendermint unsafe-reset-all --home $HOME/.allorad --keep-addr-book
-```
-
-
-**Warning**: This command deletes blockchain data. Only run this on a fresh node or when you intend to resync from scratch.
-
-
-### Step 9: Create systemd Service
-
-Create a systemd service file for cosmosvisor:
+**Create a systemd service file for the node:**
```shell
sudo tee /etc/systemd/system/allorad.service > /dev/null <
-**Security Note**: `DAEMON_ALLOW_DOWNLOAD_BINARIES` is set to `false` for security. Validators should manually place upgrade binaries in the appropriate directories.
-
+**Service Configuration Benefits**:
+- **Automatic startup**: Node starts automatically on system boot
+- **Process management**: Proper process supervision and restart handling
+- **Resource limits**: Appropriate resource limits for stable operation
+- **Environment isolation**: Clean environment variables for node operation
-### Step 10: Start the Service
+### Step 8: Enable and Start Service
-Enable and start the systemd service:
+**Enable and start the systemd service:**
```shell
sudo systemctl daemon-reload
@@ -214,159 +228,154 @@ sudo systemctl enable allorad
sudo systemctl start allorad
```
-### Monitoring and Management
-
-Monitor your node logs:
-
-```shell
-sudo journalctl -u allorad -f
-```
-
-Check service status:
-
-```shell
-sudo systemctl status allorad
+**Service Management**:
+- **Status monitoring**: Check service status with `systemctl status allorad`
+- **Log viewing**: View logs with `journalctl -fu allorad`
+- **Service control**: Start, stop, restart with systemctl commands
+- **Automatic recovery**: Service automatically restarts on failures
+
+## Method 2: Using Docker Compose
+
+**Docker Compose provides an alternative deployment method with containerization benefits.** This approach is suitable for development environments and users familiar with Docker.
+
+### Docker Benefits
+
+**Containerization Advantages**:
+- **Environment isolation**: Consistent environment across different systems
+- **Easy deployment**: Single command deployment with all dependencies
+- **Resource management**: Better resource allocation and monitoring
+- **Development flexibility**: Easy to modify and experiment with configurations
+
+### Docker Setup Process
+
+**Create docker-compose.yml:**
+
+```yaml
+version: '3.8'
+services:
+ allora-node:
+ image: alloranetwork/allora-chain:latest
+ container_name: allora-validator
+ ports:
+ - "26656:26656" # P2P port
+ - "26657:26657" # RPC port
+ - "1317:1317" # REST API port
+ volumes:
+ - ./data:/root/.allorad
+ - ./config:/root/.allorad/config
+ environment:
+ - MONIKER=your-moniker-name
+ command: allorad start
+ restart: unless-stopped
```
-Check sync status:
+**Container Configuration**:
+- **Port mapping**: Expose necessary ports for network communication
+- **Volume mounting**: Persistent data storage outside containers
+- **Environment variables**: Configuration through environment settings
+- **Restart policy**: Automatic container restart on failures
-```shell
-curl -s http://localhost:26657/status | jq .result.sync_info.catching_up
-```
+## Node Operation and Maintenance
-Once this returns `false`, your node is fully synced.
+### Monitoring Node Health
-### Managing Upgrades with cosmosvisor
-
-When a governance upgrade is approved, prepare for it by placing the new binary:
+**Key Metrics to Monitor**:
+- **Sync status**: Monitor blockchain synchronization progress
+- **Peer connections**: Track number and quality of peer connections
+- **Block height**: Ensure node is keeping up with network
+- **Resource usage**: Monitor CPU, memory, and disk usage
+**Monitoring Commands**:
```shell
-# For an upgrade named "v1.0.0", create the upgrade directory
-mkdir -p $DAEMON_HOME/cosmovisor/upgrades/v1.0.0/bin
-
-# Download and place the new binary (replace with actual URL)
-# wget NEW_BINARY_URL -O $DAEMON_HOME/cosmovisor/upgrades/v1.0.0/bin/allorad
-# chmod +x $DAEMON_HOME/cosmovisor/upgrades/v1.0.0/bin/allorad
-```
+# Check sync status
+allorar status | jq .SyncInfo
-
-**Info**: cosmosvisor will automatically switch to the new binary at the upgrade height specified in the governance proposal. Monitor governance proposals and prepare upgrade binaries in advance.
-
-
-***
+# Check peer connections
+curl -s localhost:26657/net_info | jq .result.n_peers
-## Method 2: Using `docker compose`
-
-Running the Allora node with `docker compose` simplifies the setup and ensures consistency across different environments, but requires manual upgrade management.
-
-### Step 1: Clone the Allora Chain Repository
-
-If you haven't already, clone the latest release of the [allora-chain repository](https://github.com/allora-network/allora-chain):
-
-```shell
-git clone https://github.com/allora-network/allora-chain.git
+# View recent logs
+journalctl -fu allorad --lines=100
```
-### Step 2: Run the Node with Docker Compose
+### Security Considerations
-Navigate to the root directory of the cloned repository and start the node using `docker compose`:
+**Node Security**:
+- **Firewall configuration**: Only expose necessary ports to the internet
+- **Key management**: Secure storage and backup of validator keys
+- **Access control**: Limit SSH and system access to authorized users
+- **Regular updates**: Keep system and software updated with security patches
-```shell
-cd allora-chain
-docker compose pull
-docker compose up
-```
+**Network Security**:
+- **DDoS protection**: Implement protection against network attacks
+- **Monitoring**: Set up alerts for unusual network activity
+- **Backup procedures**: Regular backups of node data and configuration
+- **Disaster recovery**: Plans for rapid node recovery in case of failures
-> run `docker compose up -d` to run the container in detached mode, allowing it to run in the background.
+### Upgrade Procedures
-
-**Info**: Don't forget to pull the images first, to ensure that you're using the latest images.
-
+**Cosmosvisor Upgrades** (Recommended Method):
+1. **Automatic detection**: Cosmosvisor automatically detects upgrade proposals
+2. **Binary preparation**: New binaries are prepared in advance
+3. **Seamless transition**: Upgrade happens at designated block height
+4. **Rollback capability**: Can rollback if upgrade fails
-
-Make sure that any previous containers you launched are killed, before launching a new container that uses the same port.
+**Manual Upgrade Process**:
+1. **Stop the node**: Gracefully stop the running node
+2. **Backup data**: Create backup of current node state
+3. **Update binary**: Replace binary with new version
+4. **Restart node**: Start node with new binary
+5. **Monitor sync**: Ensure node syncs properly with network
-You can run the following command to kill any containers running on the same port:
-```bash
-docker container ls
-docker rm -f
-```
-
+### Troubleshooting Common Issues
-#### Run Only a Node with Docker Compose
-In this case, you will use Allora's heads.
-##### Run
+**Sync Problems**:
+- **State sync**: Use state sync for faster initial synchronization
+- **Peer issues**: Check peer connectivity and configuration
+- **Disk space**: Ensure adequate disk space for blockchain data
+- **Network connectivity**: Verify internet connection and firewall settings
-```
-docker compose pull
-docker compose up node
-```
-To run only a head: `docker compose up head`
+**Performance Issues**:
+- **Resource allocation**: Increase system resources if needed
+- **Configuration tuning**: Optimize node configuration for performance
+- **Database optimization**: Consider database pruning and optimization
+- **Network optimization**: Optimize network settings for better performance
-
-**NOTE:** You also can comment the head service in the Dockerfile.
-
+## Best Practices
-### Monitoring Logs
+### Operational Excellence
-To view the node's logs, use the following command:
+**Node Management**:
+- **Regular monitoring**: Continuous monitoring of node health and performance
+- **Automated alerting**: Set up alerts for critical issues and downtime
+- **Documentation**: Maintain documentation of configurations and procedures
+- **Change management**: Implement proper change management processes
-```shell
-docker compose logs -f
-```
-
-### Executing RPC Calls
-
-You can interact with the running node through RPC calls. For example, to check the node's status:
-
-```shell
-curl -s http://localhost:26657/status | jq .
-```
-
-This command uses `curl` to send a request to the node's RPC interface and `jq` to format the JSON response.
-
-Once your node has finished syncing and is caught up with the network, this command will return `false`:
-
-```shell
-curl -so- http\://localhost:26657/status | jq .result.sync_info.catching_up
-```
-
-
-**Info**: The time required to sync depends on the chain's size and height.
-
- - For newly launched chains, syncing will take **minutes**.
- - Established chains like Ethereum can take around **a day** to sync using Nethermind or similar clients.
- - Some chains may take **several days** to sync.
- - Syncing an archival node will take significantly more time.
-
-
-
-**Warning**: Network participants will not be able to connect to your node until it is finished syncing and the command above returns `false`.
-
+### Security Best Practices
-### Syncing from Snapshot
+**Infrastructure Security**:
+- **Key rotation**: Regular rotation of security keys and credentials
+- **Access logging**: Log and monitor all access to node systems
+- **Network segmentation**: Isolate node infrastructure from other systems
+- **Regular audits**: Periodic security audits and vulnerability assessments
-Users can also opt to sync their nodes from our [latest snapshot script](https://github.com/allora-network/allora-chain/blob/main/scripts/restore_snapshot.sh) following the instructions below:
+### Performance Optimization
-1. Install [`rclone`](https://rclone.org/), a command-line program to manage files on cloud storage
+**System Tuning**:
+- **Resource monitoring**: Continuous monitoring of system resources
+- **Performance tuning**: Regular optimization of system and application settings
+- **Capacity planning**: Plan for future growth and resource needs
+- **Load balancing**: Distribute load across multiple nodes if needed
-```bash
-brew install rclone
-```
-
-2. Follow the instructions to configure `rclone` after running `rclone config` in the command line
-
-3. Uncomment the [following lines](https://github.com/allora-network/allora-chain/blob/ccad6d27e55b27a7ec3b2aebd7e55f1bc26798ed/scripts/l1_node.sh#L15) from your Allora Chain repository:
+## Prerequisites
-```go
-# uncomment this block if you want to restore from a snapshot
-# SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-# "${SCRIPT_DIR}/restore_snapshot.sh"
-```
+- **Technical expertise**: Strong understanding of blockchain technology and Linux system administration
+- **Infrastructure access**: Adequate server infrastructure with proper specifications
+- **Network connectivity**: Reliable, high-bandwidth internet connection
+- **Security knowledge**: Understanding of security best practices for blockchain nodes
-4. Run the node using Docker:
+## Next Steps
-```bash
-docker compose pull
-docker compose up -d
-```
\ No newline at end of file
+- [Learn about validator staking](/devs/validators/stake-a-validator) to begin earning rewards
+- [Study validator operations](/devs/validators/validator-operations) for ongoing management
+- [Review system requirements](/devs/validators/nop-requirements) for production deployment
+- [Explore network parameters](/devs/reference/params/consensus) to understand validator requirements
\ No newline at end of file
diff --git a/pages/devs/validators/software-upgrades.mdx b/pages/devs/validators/software-upgrades.mdx
index a3981ff..200c088 100644
--- a/pages/devs/validators/software-upgrades.mdx
+++ b/pages/devs/validators/software-upgrades.mdx
@@ -1,67 +1,251 @@
# Software Upgrades
-> How to upgrade the Allora software version during hard forks.
+## What You'll Learn
+- Understanding Allora Network's multi-component software architecture and upgrade requirements
+- Complete process for handling breaking version upgrades and hard forks
+- Step-by-step procedures for both developers and validator operators during upgrades
+- How to use Cosmovisor for automated upgrade management and execution
-The Allora network relies on multiple different pieces of software to do different tasks.
-For example the `allora-chain` repository handles the blockchain software that runs the chain, while
-the `offchain-node` repository performs off-chain tasks. Each piece of software may need to
-be upgraded separately.
+## Overview
+
+> **How to upgrade the Allora software version during hard forks.**
+
+**The Allora network relies on multiple different pieces of software to do different tasks.** For example the `allora-chain` repository handles the blockchain software that runs the chain, while the `offchain-node` repository performs off-chain tasks. Each piece of software may need to be upgraded separately.
+
+### Why Software Upgrades Matter
+
+**Network Evolution**:
+- **Feature enhancements**: Add new capabilities and improve existing functionality
+- **Security patches**: Address vulnerabilities and strengthen network protection
+- **Performance optimization**: Improve efficiency and scalability
+- **Bug fixes**: Resolve issues and enhance network stability
+
+**Coordination Benefits**:
+- **Network consensus**: Ensure all validators upgrade simultaneously for network integrity
+- **Backward compatibility**: Maintain interoperability during transition periods
+- **Risk mitigation**: Minimize disruption through coordinated upgrade procedures
+- **Community coordination**: Enable democratic decision-making for network changes
+
+## Allora Network Architecture
+
+### Software Components
+
+**Multi-Repository Structure**:
+- **`allora-chain`**: Cosmos-SDK based blockchain handling core network operations
+- **`offchain-node`**: Performs specialized off-chain tasks and computations
+- **Additional components**: Various supporting services and tools
+
+**Independent Upgrade Paths**:
+- **Component isolation**: Each software component can be upgraded independently
+- **Selective updates**: Target specific components without affecting others
+- **Version synchronization**: Coordinate upgrades across related components when needed
+- **Rollback capability**: Ability to revert individual components if issues arise
## Allora-Chain Upgrades
-The `allora-chain` software is a cosmos-sdk based blockchain that runs the Allora network. New
-software releases are published on the
-Allora Chain [Github](https://github.com/allora-network/allora-chain/releases) page and are tagged
-with a version number. Upgrading to non-breaking versions is as simple as downloading the
-pre-built binaries or compiling the software from source and running the new version.
+**The `allora-chain` software is a cosmos-sdk based blockchain that runs the Allora network.** New software releases are published on the Allora Chain [Github](https://github.com/allora-network/allora-chain/releases) page and are tagged with a version number. Upgrading to non-breaking versions is as simple as downloading the pre-built binaries or compiling the software from source and running the new version.
+
+### Standard Version Updates
+
+**Non-Breaking Upgrades**:
+- **Simple process**: Download new binaries or compile from source
+- **No coordination needed**: Validators can upgrade at their own pace
+- **Backward compatibility**: New versions work with existing network state
+- **Minimal downtime**: Quick replacement of running software
+
+**Update Procedure**:
+1. **Download release**: Get the latest version from GitHub releases
+2. **Verify integrity**: Check signatures and hashes for security
+3. **Stop services**: Safely shutdown current validator processes
+4. **Replace binaries**: Update to new software version
+5. **Restart services**: Resume validator operations with new version
+
+### Breaking Version Upgrades
+
+**For breaking versions such as hard forks, or software upgrades requiring changes to the underlying state machine of the allora-chain, the upgrade process is more involved.** These upgrades require using the `gov` and `upgrade` cosmos-sdk modules to first propose and vote on a software upgrade, and then to execute the upgrade at a specific block height.
-### Upgrading to a Breaking Version
+#### Governance-Based Upgrade Process
-For breaking versions such as hard forks, or software upgrades requiring changes to the underlying
-state machine of the allora-chain, the upgrade process is more involved. These upgrades require
-using the `gov` and `upgrade` cosmos-sdk modules to first propose and vote on a software upgrade,
-and then to execute the upgrade at a specific block height.
+**Why Governance Is Required**:
+- **Network consensus**: Ensure all validators upgrade at the same block height
+- **Democratic process**: Allow community input on significant network changes
+- **Risk management**: Provide time for review and preparation before execution
+- **Coordination mechanism**: Synchronize complex upgrades across the network
-#### For Allora Chain Developers
+## Developer Upgrade Procedures
-For writing an upgrade the steps are roughly the following:
+**For writing an upgrade the steps are roughly the following:**
-1. In the `app/` [folder](https://github.com/allora-network/allora-chain/tree/main/app/upgrades),
-create a new folder for your upgrade.
-2. In that folder create a file that contains an `UpgradeName`, and a function `CreateUpgradeHandler` which
-returns a `"cosmossdk.io/x/upgrade/types".UpgradeHandler`.Optionally include a `UpgradeInfo` that is a json string telling the client software
-where to download the upgrade binary version e.g.
+#### Step 1: Create Upgrade Handler
+
+1. **In the `app/` [folder](https://github.com/allora-network/allora-chain/tree/main/app/upgrades), create a new folder for your upgrade.**
+2. **In that folder create a file that contains an `UpgradeName`, and a function `CreateUpgradeHandler` which returns a `"cosmossdk.io/x/upgrade/types".UpgradeHandler`.** Optionally include a `UpgradeInfo` that is a json string telling the client software where to download the upgrade binary version e.g.
```golang
const UpgradeInfo = `'{"binaries":{"linux/amd64":"https://github.com/allora-network/allora-chain/releases/download/v9.9.9/allorad_amd64.tar.gz"}}'`
```
-3. Wire up the new upgrade handler to the chain by editing the `setupUpgradeHandlers` function in [app/upgrades.go](https://github.com/allora-network/allora-chain/blob/main/app/upgrades.go). You can see a reference for how to do this in the upgrade integration test [here](https://github.com/allora-network/allora-chain/blob/main/test/integration/upgrade.patch)
-4. If you're upgrading standard cosmos-sdk module versions you may have to tweak the `module.VersionMap` that the `CreateUpgradeHandler` returns/processes.
-5. If you're upgrading one of the Allora forked/created modules, you'll need to bump the `ConsensusVersion` for the module.
-6. In the module, have the `module.Configurator` do a `cfg.RegisterMigration` with the module name, the previous consensus
-version that is being upgraded from, and the function to run to do the migration as a parameter.
-7. Write a function that process the kv store or does whatever other migrations are necessary. Examples [here](https://github.com/allora-network/allora-chain/blob/main/x/emissions/migrations/v2/migrations.go) and [here](https://github.com/evmos/evmos/blob/main/x/evm/migrations/v7/migrate.go).
-8. Merge the PR, tag it appropriately and post it to the releases page.
-9. Create a Software Upgrade Proposal for validators to vote on. You can see a reference where this is done in the [proposeUpgrade](https://github.com/allora-network/allora-chain/blob/main/test/integration/upgrade_test.go) function in the integration tests.
-10. Convince all the validators to vote yes on the Software Upgrade Proposal, and run cosmovisor so that the upgrade will actually go through at the proposed block.
+**Upgrade Handler Benefits**:
+- **Automated execution**: Handles complex state migrations automatically
+- **Version coordination**: Ensures consistent upgrade across all validators
+- **Binary distribution**: Provides secure download locations for new software
+- **Error handling**: Manages upgrade failures and rollback scenarios
+
+#### Step 2: Wire Up Handler
+
+3. **Wire up the new upgrade handler to the chain by editing the `setupUpgradeHandlers` function in [app/upgrades.go](https://github.com/allora-network/allora-chain/blob/main/app/upgrades.go).** You can see a reference for how to do this in the upgrade integration test [here](https://github.com/allora-network/allora-chain/blob/main/test/integration/upgrade.patch)
+
+#### Step 3: Handle Module Versions
+
+4. **If you're upgrading standard cosmos-sdk module versions you may have to tweak the `module.VersionMap` that the `CreateUpgradeHandler` returns/processes.**
+5. **If you're upgrading one of the Allora forked/created modules, you'll need to bump the `ConsensusVersion` for the module.**
+
+#### Step 4: Implement Migrations
+
+6. **In the module, have the `module.Configurator` do a `cfg.RegisterMigration` with the module name, the previous consensus version that is being upgraded from, and the function to run to do the migration as a parameter.**
+7. **Write a function that process the kv store or does whatever other migrations are necessary.** Examples [here](https://github.com/allora-network/allora-chain/blob/main/x/emissions/migrations/v2/migrations.go) and [here](https://github.com/evmos/evmos/blob/main/x/evm/migrations/v7/migrate.go).
+
+**Migration Process Benefits**:
+- **Data integrity**: Ensure existing data remains valid after upgrades
+- **State transformation**: Convert old data formats to new requirements
+- **Backward compatibility**: Handle version transitions smoothly
+- **Testing framework**: Validate migrations before network deployment
+
+#### Step 5: Release and Propose
+
+8. **Merge the PR, tag it appropriately and post it to the releases page.**
+9. **Create a Software Upgrade Proposal for validators to vote on.** You can see a reference where this is done in the [proposeUpgrade](https://github.com/allora-network/allora-chain/blob/main/test/integration/upgrade_test.go) function in the integration tests.
+10. **Convince all the validators to vote yes on the Software Upgrade Proposal, and run cosmovisor so that the upgrade will actually go through at the proposed block.**
+
+**Release Management**:
+- **Version tagging**: Use semantic versioning for clear version identification
+- **Release documentation**: Provide comprehensive upgrade notes and instructions
+- **Testing validation**: Ensure thorough testing before release
+- **Community communication**: Announce upgrades with sufficient advance notice
+
+## Validator Operator Procedures
+
+### For Allora Chain Validator Operators
+
+**For those running the chain software, you will have to have to perform an upgrade as follows:**
+
+#### Step 1: Setup Cosmovisor
+
+1. **Make sure you're running the `allorad` software with [Cosmovisor](https://docs.cosmos.network/main/build/tooling/cosmovisor)) managing the process, `DAEMON_NAME=allorad` and `DAEMON_HOME=/path/to/allorad/data/folder`.** Hopefully you've already run `cosmovisor init /path/to/allorad-binary` and have the `/allorad/data/folder/cosmovisor` set.
+
+**Cosmovisor Benefits**:
+- **Automated upgrades**: Handles binary replacement during network upgrades
+- **Zero-downtime**: Minimizes service interruption during upgrades
+- **Safety features**: Validates upgrades before execution
+- **Rollback capability**: Provides mechanisms to revert failed upgrades
+
+#### Step 2: Prepare Upgrade Binary
+
+2. **At some point the blockchain developers will provide you with a binary to put in that `/allorad/data/folder/cosmovisor` folder to upgrade to.** This may be optional if the `UpgradeInfo` is set correctly by the developers, but if you're the paranoid type you can always download the binary yourself ahead of the upgrade and put it in the right folder by hand.
+
+**Binary Preparation Strategy**:
+- **Manual download**: Secure control over upgrade timing and verification
+- **Automatic download**: Rely on UpgradeInfo for convenience
+- **Security verification**: Always verify binary signatures and checksums
+- **Backup procedures**: Maintain copies of both old and new binaries
+
+#### Step 3: Participate in Governance
+
+3. **When the developers put up the upgrade proposal to governance, be helpful and vote to make it pass.** You can do this via the CLI with `allorad tx gov vote $proposal_id yes --from $validator` or an example of doing this programmatically can be found in the integration test [voteOnProposal](https://github.com/allora-network/allora-chain/blob/main/test/integration/upgrade_test.go) function.
+
+**Governance Participation**:
+- **Proposal review**: Carefully evaluate upgrade proposals before voting
+- **Community engagement**: Discuss upgrades with other validators and community
+- **Vote responsibility**: Consider network impact when casting governance votes
+- **Timeline awareness**: Vote within proposal timeframes to ensure participation
+
+#### Step 4: Monitor Upgrade Execution
+
+4. **At the block height of the upgrade, the old software will panic - cosmovisor will catch the panic and restart the process using the new binary for the upgrade instead.** Monitor your logs appropriately to see the restart.
+
+**Upgrade Monitoring**:
+- **Log analysis**: Watch for upgrade trigger and successful restart
+- **Performance tracking**: Monitor validator performance after upgrade
+- **Network synchronization**: Ensure continued participation in consensus
+- **Issue identification**: Quickly identify and report any upgrade problems
+
+### Upgrade Execution Timeline
+
+**Pre-Upgrade Phase**:
+- **Proposal submission**: Developers submit upgrade proposal to governance
+- **Community review**: Validators and community review proposed changes
+- **Voting period**: Network participants vote on upgrade proposal
+- **Preparation time**: Validators prepare infrastructure for upgrade
+
+**Upgrade Execution**:
+- **Block height trigger**: Upgrade executes at predetermined block height
+- **Automatic migration**: State migrations and binary replacement occur
+- **Network restart**: All validators restart with new software simultaneously
+- **Validation**: Network confirms successful upgrade completion
+
+## Troubleshooting Upgrade Issues
+
+### Common Problems
+
+**Upgrade Failures**:
+- **Binary issues**: Incorrect or corrupted upgrade binaries
+- **Migration errors**: State migration failures during upgrade
+- **Network desynchronization**: Validators upgrading at different times
+- **Infrastructure problems**: Hardware or network issues during upgrade
+
+### Recovery Procedures
+
+**Failure Response**:
+- **Rollback mechanisms**: Revert to previous software version if needed
+- **Community coordination**: Communicate with other validators during issues
+- **Log analysis**: Examine detailed logs to identify specific problems
+- **Emergency procedures**: Follow emergency protocols for critical failures
+
+## Best Practices
+
+### Preparation Guidelines
+
+**Pre-Upgrade Checklist**:
+- **Backup procedures**: Create comprehensive backups before upgrades
+- **Testing environments**: Test upgrades in non-production environments first
+- **Communication channels**: Maintain contact with development team and community
+- **Monitoring setup**: Ensure robust monitoring during upgrade process
+
+### Operational Security
+
+**Security Considerations**:
+- **Binary verification**: Always verify upgrade binary authenticity
+- **Infrastructure security**: Secure validator infrastructure before upgrades
+- **Access control**: Limit access during sensitive upgrade operations
+- **Emergency contacts**: Maintain emergency communication channels
+
+## Educational Resources
-#### For Allora Chain Validator Operators
+### Further References
-For those running the chain software, you will have to have to perform an upgrade as follows:
+**This is probably the most helpful document to understand the full workflow of a cosmos-sdk chain upgrade:** [Medium Blog Post Cosmos Dev Series: Cosmos Blockchain Upgrade](https://medium.com/web3-surfers/cosmos-dev-series-cosmos-sdk-based-blockchain-upgrade-b5e99181554c)
-1. Make sure you're running the `allorad` software with [Cosmovisor](https://docs.cosmos.network/main/build/tooling/cosmovisor)) managing the process, `DAEMON_NAME=allorad` and `DAEMON_HOME=/path/to/allorad/data/folder`. Hopefully you've already run `cosmovisor init /path/to/allorad-binary` and have the `/allorad/data/folder/cosmovisor` set.
-2. At some point the blockchain developers will provide you with a binary to put in that `/allorad/data/folder/cosmovisor` folder to upgrade to. This may be optional if the `UpgradeInfo` is set correctly by the developers, but if you're the paranoid type you can always download the binary yourself ahead of the upgrade and put it in the right folder by hand.
-3. When the developers put up the upgrade proposal to governance, be helpful and vote to make it pass. You can do this via the CLI with `allorad tx gov vote $proposal_id yes --from $validator` or an example of doing this programmatically can be found in the integration test [voteOnProposal](https://github.com/allora-network/allora-chain/blob/main/test/integration/upgrade_test.go) function.
-4. At the block height of the upgrade, the old software will panic - cosmovisor will catch the panic and restart the process using the new binary for the upgrade instead. Monitor your logs appropriately to see the restart.
+**Additional Documentation**:
+- **Cosmos SDK Upgrade Module:** [Documentation](https://docs.cosmos.network/main/build/modules/upgrade)
+- **Cosmovisor Process Manager Software** [Documentation](https://docs.cosmos.network/main/build/tooling/cosmovisor)
+- **Cosmos SDK Gov Module:** [Documentation](https://docs.cosmos.network/main/build/modules/gov)
-## Further References
+**Learning Resources**:
+- **Technical deep-dive**: Comprehensive coverage of upgrade mechanisms and procedures
+- **Best practices**: Community-proven approaches to upgrade management
+- **Troubleshooting guides**: Solutions to common upgrade problems and issues
+- **Reference implementations**: Real-world examples and code samples
-This is probably the most helpful document to understand the full workflow of a cosmos-sdk chain
-upgrade: [Medium Blog Post Cosmos Dev Series: Cosmos Blockchain Upgrade](https://medium.com/web3-surfers/cosmos-dev-series-cosmos-sdk-based-blockchain-upgrade-b5e99181554c)
+## Prerequisites
-Cosmos SDK Upgrade Module: [Documentation](https://docs.cosmos.network/main/build/modules/upgrade)
+- **Validator operations**: Active validator node running on Allora Network
+- **Cosmovisor setup**: Properly configured Cosmovisor for upgrade management
+- **Governance participation**: Understanding of network governance and voting procedures
+- **Technical expertise**: Command-line proficiency and blockchain operations knowledge
-Cosmovisor Process Manager Software [Documentation](https://docs.cosmos.network/main/build/tooling/cosmovisor)
+## Next Steps
-Cosmos SDK Gov Module: [Documentation](https://docs.cosmos.network/main/build/modules/gov)
\ No newline at end of file
+- [Learn validator operations](/devs/validators/validator-operations) for ongoing validator management
+- [Study governance procedures](/devs/validators) for participating in network decisions
+- [Review system requirements](/devs/validators/nop-requirements) for infrastructure planning
+- [Explore full node setup](/devs/validators/run-full-node) for comprehensive validator deployment
\ No newline at end of file
diff --git a/pages/devs/validators/stake-a-validator.mdx b/pages/devs/validators/stake-a-validator.mdx
index 927ff0b..ef6dc24 100644
--- a/pages/devs/validators/stake-a-validator.mdx
+++ b/pages/devs/validators/stake-a-validator.mdx
@@ -1,26 +1,72 @@
# Stake a Validator
-Follow these steps to stake on a node in the Allora network. This process includes running and syncing a full node, funding your account, and setting up your Validator for staking.
+## What You'll Learn
+- Complete step-by-step process to stake and register as a validator on Allora Network
+- How to verify node synchronization and fund validator accounts properly
+- Creating and configuring validator stake information with proper parameters
+- Verification methods to confirm successful validator registration and operation
+
+## Overview
+
+**Follow these steps to stake on a node in the Allora network.** This process includes running and syncing a full node, funding your account, and setting up your Validator for staking.
+
+### Why Become a Validator?
+
+**Network Contribution**:
+- **Security provision**: Help secure the Allora blockchain through consensus participation
+- **Network governance**: Participate in important protocol decisions and upgrades
+- **Economic incentives**: Earn rewards for honest validation and network maintenance
+- **Community impact**: Support decentralization and network resilience
+
+**Professional Benefits**:
+- **Technical expertise**: Gain deep understanding of blockchain infrastructure operations
+- **Network influence**: Shape the future direction of the Allora ecosystem
+- **Revenue generation**: Earn consistent rewards through validation activities
+- **Reputation building**: Establish credibility within the blockchain community
## Prerequisites
-- Successfully run and synced a full `allorad` node. Refer to [Running a Full Node](./run-full-node) for detailed instructions.
-- Basic command-line and Docker knowledge.
-- Access to the node's terminal or command line.
+- **Successfully run and synced a full `allorad` node.** Refer to [Running a Full Node](./run-full-node) for detailed instructions.
+- **Basic command-line and Docker knowledge.**
+- **Access to the node's terminal or command line.**
+
+### Technical Requirements
-## 1\. Verify Node Sync
+**Infrastructure Prerequisites**:
+- **Fully synchronized node**: Complete blockchain synchronization before staking
+- **System resources**: Adequate hardware meeting validator requirements
+- **Network connectivity**: Stable internet connection for continuous operation
+- **Operational knowledge**: Understanding of Docker, command-line interfaces, and blockchain concepts
-Ensure your node is fully synced with the network by executing the following command:
+## Step-by-Step Validation Process
+
+### Step 1: Verify Node Sync
+
+**Ensure your node is fully synced with the network by executing the following command:**
```shell
curl -s http://localhost:26657/status | jq .result.sync_info.catching_up
```
-Wait until the output returns `false`, indicating your node has caught up with the network.
+**Wait until the output returns `false`, indicating your node has caught up with the network.**
+
+#### Synchronization Verification
+
+**Why This Matters**:
+- **Data integrity**: Ensure you have the complete and current blockchain state
+- **Consensus readiness**: Confirm ability to participate in network consensus
+- **Performance optimization**: Avoid penalties from operating on outdated data
+- **Network stability**: Contribute to overall network health and reliability
+
+**Troubleshooting Sync Issues**:
+- **Extended sync times**: Large blockchains may require several hours to fully sync
+- **Network connectivity**: Verify stable internet connection throughout sync process
+- **Resource availability**: Ensure sufficient disk space and system resources
+- **Peer connectivity**: Confirm connection to healthy network peers
-## 2\. Fund Your Account
+### Step 2: Fund Your Account
-After initializing your node, `scripts/l1_node.sh` generates key and account information, found in `data/*.account_info`. Locate your account address within this file to fund it.
+**After initializing your node, `scripts/l1_node.sh` generates key and account information, found in `data/*.account_info`. Locate your account address within this file to fund it.**
```shell
cat data/validator0.account_info
@@ -32,29 +78,49 @@ cat data/validator0.account_info
[...]
```
-For testnet environments, use the appropriate [faucet](/devs/get-started/setup-wallet#add-faucet-funds).
+**For testnet environments, use the appropriate [faucet](/devs/get-started/quick-start#get-testnet-funds).**
-## 3\. Stake as a Validator
+#### Account Funding Strategy
-To become a validator, perform the following inside the validator's Docker container environment. You can choose your validator's name by setting a custom moniker (with `--moniker=...`). We will take the example of `validator0` with `10000000 uallo`.
+**Funding Requirements**:
+- **Minimum stake**: Sufficient tokens to meet validator minimum stake requirements
+- **Transaction fees**: Additional tokens for network operations and transactions
+- **Operational buffer**: Extra funds for ongoing network participation and emergencies
+- **Future upgrades**: Reserve funds for protocol upgrades and maintenance
-### Access the Validator's Shell
+**Security Considerations**:
+- **Key management**: Secure storage of private keys and mnemonic phrases
+- **Access control**: Limit access to validator accounts and infrastructure
+- **Backup procedures**: Maintain secure backups of critical account information
+- **Monitoring**: Track account balances and transaction history
-Use `docker compose` to access the validator's shell environment:
+### Step 3: Stake as a Validator
+
+**To become a validator, perform the following inside the validator's Docker container environment.** You can choose your validator's name by setting a custom moniker (with `--moniker=...`). We will take the example of `validator0` with `10000000 uallo`.
+
+#### Access the Validator's Shell
+
+**Use `docker compose` to access the validator's shell environment:**
```shell
docker compose exec validator0 bash
```
-**Note**: You can list all available keys with:
+**Note: You can list all available keys with:**
```shell
allorad --home=$APP_HOME keys --keyring-backend=test list
```
-### Prepare Stake Information
+**Container Environment Benefits**:
+- **Isolated execution**: Run validator commands in controlled environment
+- **Configuration consistency**: Ensure proper environment variables and settings
+- **Security isolation**: Separate validator operations from host system
+- **Simplified management**: Streamlined access to validator tools and commands
-Within the validator's shell, create a JSON file named `stake-validator.json` with your validator's stake information. Replace values with your actual data:
+#### Prepare Stake Information
+
+**Within the validator's shell, create a JSON file named `stake-validator.json` with your validator's stake information. Replace values with your actual data:**
```shell
cat > stake-validator.json << EOF
@@ -70,9 +136,26 @@ cat > stake-validator.json << EOF
EOF
```
-### Execute the Stake Command
+##### Configuration Parameters Explained
+
+**Core Validator Settings**:
+- **`pubkey`**: Validator's public key for network identification and consensus
+- **`amount`**: Stake amount in the smallest denomination (uallo)
+- **`moniker`**: Human-readable name for your validator
+- **`commission-rate`**: Percentage of rewards kept as validator commission
+- **`commission-max-rate`**: Maximum commission rate allowed for this validator
+- **`commission-max-change-rate`**: Maximum daily change in commission rate
+- **`min-self-delegation`**: Minimum self-delegated stake required
+
+**Economic Strategy**:
+- **Competitive rates**: Set commission rates that attract delegators while ensuring profitability
+- **Rate management**: Plan commission changes carefully to maintain delegator trust
+- **Self-delegation**: Demonstrate commitment through significant self-staked amounts
+- **Market positioning**: Balance competitiveness with operational sustainability
-With your stake information file ready, execute the following command to stake as a Validator:
+#### Execute the Stake Command
+
+**With your stake information file ready, execute the following command to stake as a Validator:**
```shell
allorad tx staking create-validator ./stake-validator.json \
@@ -82,15 +165,30 @@ allorad tx staking create-validator ./stake-validator.json \
--from="$MONIKER"
```
-This command outputs a transaction hash, which can be checked on the network's explorer: `https://explorer.testnet.allora.network/allora-testnet-1/tx/$TX_HASH`.
+**This command outputs a transaction hash, which can be checked on the network's explorer: `https://explorer.testnet.allora.network/allora-testnet-1/tx/$TX_HASH`.**
+
+##### Transaction Execution Process
+
+**Command Components**:
+- **Configuration file**: JSON file containing all validator parameters
+- **Chain identification**: Specify the correct network chain ID
+- **Home directory**: Point to the correct node configuration directory
+- **Keyring backend**: Use appropriate key management system
+- **Account source**: Specify which account to use for the transaction
-## 4\. Verify Validator Setup
+**Transaction Verification**:
+- **Hash confirmation**: Transaction hash provides proof of successful submission
+- **Explorer verification**: Use block explorer to confirm transaction inclusion
+- **Network propagation**: Allow time for transaction to propagate across network
+- **Status monitoring**: Check transaction status and any error messages
-Ensure your validator is properly registered and staked with the network by executing the following commands:
+### Step 4: Verify Validator Setup
-### Check Registration and Stake
+**Ensure your validator is properly registered and staked with the network by executing the following commands:**
-Retrieve and verify your validator's information by running these 2 commands:
+#### Check Registration and Stake
+
+**Retrieve and verify your validator's information by running these 2 commands:**
```shell
VAL_PUBKEY=$(allorad --home=$APP_HOME comet show-validator | jq -r .key)
@@ -101,7 +199,7 @@ allorad --home=$APP_HOME q staking validators -o=json | \
jq '.validators[] | select(.consensus_pubkey.value=="'$VAL_PUBKEY'")'
```
-This command outputs detailed information about your validator. If it's correctly set up, it will look like this:
+**This command outputs detailed information about your validator. If it's correctly set up, it will look like this:**
```json
{
@@ -129,12 +227,100 @@ This command outputs detailed information about your validator. If it's correctl
}
```
-### Check Voting Power
+##### Validator Information Analysis
+
+**Key Response Fields**:
+- **`operator_address`**: Unique validator identifier for delegation and operations
+- **`consensus_pubkey`**: Public key used for block signing and consensus participation
+- **`status`**: Validator status (1=unbonded, 2=unbonding, 3=bonded)
+- **`tokens`**: Total amount of tokens bonded to this validator
+- **`delegator_shares`**: Shares distributed to delegators
+- **`commission`**: Commission rate configuration and history
-Verify that your Validator's voting power is greater than 0, indicating active participation in the Network:
+**Status Verification**:
+- **Bonded status**: Status 3 indicates active validator participation
+- **Token amounts**: Verify correct stake amounts are registered
+- **Commission settings**: Confirm commission rates match your configuration
+- **Key verification**: Ensure consensus public key matches your validator
+
+#### Check Voting Power
+
+**Verify that your Validator's voting power is greater than 0, indicating active participation in the Network:**
```shell
allorad --home=$APP_HOME status | jq -r '.validator_info.voting_power'
```
-**Note**: Please allow 30-60 seconds for the information to update. A voting power greater than 0 signifies a successful stake setup. Congratulations!
+**Note: Please allow 30-60 seconds for the information to update. A voting power greater than 0 signifies a successful stake setup. Congratulations!**
+
+##### Voting Power Analysis
+
+**Understanding Voting Power**:
+- **Consensus weight**: Voting power determines influence in block consensus
+- **Stake correlation**: Voting power is proportional to total delegated stake
+- **Network participation**: Non-zero voting power indicates active validator status
+- **Performance impact**: Higher voting power increases responsibility and rewards
+
+**Success Indicators**:
+- **Positive voting power**: Confirms successful validator registration and activation
+- **Network recognition**: Indicates other validators acknowledge your participation
+- **Reward eligibility**: Qualifies validator for block rewards and commission earnings
+- **Consensus participation**: Enables participation in network governance and decision-making
+
+## Post-Staking Operations
+
+### Ongoing Validator Management
+
+**Operational Responsibilities**:
+- **Uptime maintenance**: Ensure continuous node operation and availability
+- **Performance monitoring**: Track validator performance metrics and health
+- **Commission management**: Adjust commission rates within allowed parameters
+- **Delegation growth**: Attract and maintain delegator relationships
+
+### Security and Maintenance
+
+**Best Practices**:
+- **Key security**: Implement robust security measures for validator keys
+- **System monitoring**: Set up alerts for node issues and performance problems
+- **Regular updates**: Keep validator software updated with latest releases
+- **Backup procedures**: Maintain secure backups of critical validator data
+
+### Community Engagement
+
+**Network Participation**:
+- **Governance voting**: Participate in network governance proposals and decisions
+- **Community support**: Engage with delegators and community members
+- **Technical contributions**: Contribute to network improvements and development
+- **Educational efforts**: Share knowledge and help onboard new participants
+
+## Troubleshooting Common Issues
+
+### Synchronization Problems
+
+**Common Solutions**:
+- **Peer connectivity**: Ensure connection to healthy network peers
+- **Resource constraints**: Verify adequate system resources (CPU, memory, disk)
+- **Network issues**: Check internet connectivity and firewall settings
+- **Configuration errors**: Review node configuration files for accuracy
+
+### Staking Transaction Failures
+
+**Debugging Steps**:
+- **Account funding**: Verify sufficient balance for stake amount and transaction fees
+- **Configuration validation**: Check JSON configuration file syntax and values
+- **Network connectivity**: Ensure stable connection to blockchain network
+- **Parameter limits**: Verify stake amounts meet minimum requirements
+
+## Prerequisites
+
+- **Technical expertise**: Strong understanding of blockchain operations and command-line interfaces
+- **Infrastructure resources**: Properly configured and synchronized validator node
+- **Financial commitment**: Sufficient tokens for staking and ongoing operations
+- **Time availability**: Ongoing monitoring and maintenance responsibilities
+
+## Next Steps
+
+- [Learn about validator operations](/devs/validators/validator-operations) for ongoing management
+- [Understand system requirements](/devs/validators/nop-requirements) for infrastructure planning
+- [Study software upgrades](/devs/validators/software-upgrades) for maintenance procedures
+- [Explore validator economics](/devs/validators) for comprehensive understanding of validator roles
diff --git a/pages/devs/validators/validator-operations.mdx b/pages/devs/validators/validator-operations.mdx
index 6bd00d4..5e6e91a 100644
--- a/pages/devs/validators/validator-operations.mdx
+++ b/pages/devs/validators/validator-operations.mdx
@@ -1,17 +1,66 @@
# Validator Operations
-## Unjailing a validator
+## What You'll Learn
+- Essential validator management operations for ongoing network participation
+- How to unjail a validator that has been penalized by the network
+- Complete process for unstaking and removing a validator from the chain
+- Command-line procedures for critical validator maintenance tasks
-To unjail a validator execute the following command from the validator
+## Overview
-```Text bash
+**This guide covers essential operations for managing your validator node on the Allora Network.** These operations are critical for maintaining validator status and handling various network scenarios.
+
+### Why These Operations Matter
+
+**Operational Importance**:
+- **Network compliance**: Maintain good standing with network consensus requirements
+- **Penalty recovery**: Restore validator functionality after network penalties
+- **Exit procedures**: Properly remove validators when needed
+- **Risk management**: Handle emergency situations and validator maintenance
+
+**Professional Responsibility**:
+- **Uptime maintenance**: Ensure continuous participation in network consensus
+- **Quick recovery**: Minimize downtime and restore operations rapidly
+- **Proper procedures**: Follow correct protocols to avoid additional penalties
+- **Network health**: Contribute to overall network stability and reliability
+
+## Critical Validator Operations
+
+### Unjailing a Validator
+
+**To unjail a validator execute the following command from the validator:**
+
+```bash
allorad --home="$APP_HOME" \
tx slashing unjail --from $VALIDATOR_ADDRESS
```
-## Unstaking/unbounding a validator
+#### When Unjailing Is Needed
+
+**Common Jailing Scenarios**:
+- **Downtime penalties**: Extended periods of missed block signatures
+- **Double signing**: Accidental signing of conflicting blocks (serious offense)
+- **Network violations**: Failure to follow consensus rules or protocol requirements
+- **Technical issues**: Infrastructure problems causing validator misbehavior
+
+#### Unjailing Process
-If you need to delete a validator from the chain, you just need to unbound the stake with your custom parameters:
+**Steps to Recovery**:
+1. **Identify the issue**: Determine why the validator was jailed
+2. **Fix underlying problems**: Resolve technical issues causing the penalty
+3. **Wait for unjail period**: Respect minimum jailing duration requirements
+4. **Execute unjail command**: Use the provided command to restore validator status
+5. **Monitor restoration**: Verify validator returns to active consensus participation
+
+**Post-Unjailing Actions**:
+- **System monitoring**: Implement better monitoring to prevent future jailing
+- **Infrastructure review**: Assess and improve validator setup for reliability
+- **Performance tracking**: Monitor validator metrics and network participation
+- **Backup procedures**: Establish redundancy to prevent future downtime
+
+### Unstaking/Unbonding a Validator
+
+**If you need to delete a validator from the chain, you just need to unbound the stake with your custom parameters:**
```bash
allorad --home="$APP_HOME" \
@@ -19,3 +68,101 @@ allorad --home="$APP_HOME" \
${STAKE_AMOUNT}uallo --from "$MONIKER" \
--keyring-backend=test --chain-id ${NETWORK}
```
+
+#### Unbonding Considerations
+
+**When to Unbond**:
+- **Permanent exit**: Shutting down validator operations permanently
+- **Infrastructure changes**: Major system upgrades requiring validator restart
+- **Economic decisions**: Reallocating stake to other investment opportunities
+- **Network transitions**: Moving to different networks or protocols
+
+#### Unbonding Process Details
+
+**Command Parameters**:
+- **`$VALIDATOR_OPERATOR_ADDRESS`**: The specific validator's operator address
+- **`$STAKE_AMOUNT`**: Amount of tokens to unbond (in base denomination uallo)
+- **`$MONIKER`**: Your validator's human-readable name
+- **`$NETWORK`**: The chain ID for the network (e.g., allora-testnet-1)
+
+**Important Considerations**:
+- **Unbonding period**: Tokens are locked for a specified period after unbonding
+- **Partial unbonding**: You can unbond partial amounts rather than entire stake
+- **Delegator impact**: Unbonding affects delegators who have staked with your validator
+- **Slashing exposure**: Validators remain subject to slashing during unbonding period
+
+### Advanced Operations
+
+#### Validator Key Management
+
+**Security Best Practices**:
+- **Key rotation**: Regularly update validator signing keys for security
+- **Backup procedures**: Maintain secure backups of all validator keys and configurations
+- **Access control**: Limit access to validator infrastructure and key materials
+- **Multi-signature**: Consider multi-signature setups for critical operations
+
+#### Emergency Procedures
+
+**Crisis Management**:
+- **Emergency shutdown**: Procedures for safely shutting down validator during crises
+- **Key compromise**: Steps to take if validator keys are potentially compromised
+- **Infrastructure failure**: Backup plans for hardware or network failures
+- **Coordinated response**: Communication with network during emergency situations
+
+## Operational Best Practices
+
+### Monitoring and Maintenance
+
+**Continuous Operations**:
+- **Performance metrics**: Track validator performance and network statistics
+- **Alert systems**: Set up monitoring and alerting for validator issues
+- **Regular updates**: Keep validator software updated with latest releases
+- **Health checks**: Implement automated health checks and status monitoring
+
+### Risk Management
+
+**Penalty Prevention**:
+- **Redundant infrastructure**: Multiple servers and network connections
+- **Automated failover**: Systems to switch to backup infrastructure automatically
+- **Regular testing**: Test disaster recovery and emergency procedures regularly
+- **Documentation**: Maintain detailed runbooks for all operational procedures
+
+### Community Engagement
+
+**Network Participation**:
+- **Governance voting**: Actively participate in network governance decisions
+- **Community communication**: Maintain open communication with delegators and community
+- **Technical contributions**: Share knowledge and contribute to network development
+- **Professional network**: Build relationships with other validators and network participants
+
+## Troubleshooting Common Issues
+
+### Jailing Problems
+
+**Diagnostic Steps**:
+- **Check validator status**: Verify current validator state and jailing reason
+- **Review logs**: Examine validator logs for error messages and issues
+- **Network connectivity**: Ensure stable connection to network peers
+- **System resources**: Verify adequate computational resources and performance
+
+### Unbonding Issues
+
+**Common Solutions**:
+- **Insufficient balance**: Ensure adequate tokens for transaction fees
+- **Address verification**: Double-check validator operator address accuracy
+- **Network parameters**: Verify correct chain ID and network configuration
+- **Transaction confirmation**: Monitor transaction status and confirmation
+
+## Prerequisites
+
+- **Active validator**: Currently operating validator node on the Allora Network
+- **Command-line access**: Terminal access to validator infrastructure
+- **Administrative permissions**: Appropriate access rights for validator operations
+- **Network connectivity**: Stable connection to Allora Network blockchain
+
+## Next Steps
+
+- [Learn about software upgrades](/devs/validators/software-upgrades) for maintaining current validator software
+- [Review validator requirements](/devs/validators/nop-requirements) for infrastructure optimization
+- [Study validator staking](/devs/validators/stake-a-validator) for understanding stake management
+- [Explore full node operations](/devs/validators/run-full-node) for comprehensive validator setup
diff --git a/pages/devs/workers.mdx b/pages/devs/workers.mdx
index c437a99..7725797 100644
--- a/pages/devs/workers.mdx
+++ b/pages/devs/workers.mdx
@@ -2,7 +2,137 @@ import { Callout } from 'nextra/components'
# Workers
-## For Data Scientists
+## What You'll Learn
+- Understanding the role of data scientists and AI practitioners as workers in the Allora Network
+- How machine learning expertise contributes to decentralized financial predictions
+- The value proposition of participating as a worker in the network
+- How Allora's consensus mechanism optimizes financial objectives through collective intelligence
-As a data scientist, your expertise in AI and ML is invaluable to this network, enabling you to contribute predictive models and insights that power a wide range of applications. The Allora Network, powered by Allora's unique consensus mechanism, crowdsources financial predictions produced by machine learning models. The network incentivizes the contribution of machine intelligence to optimize various financial objectives.
+## Overview
+
+### For Data Scientists
+
+**As a data scientist, your expertise in AI and ML is invaluable to this network, enabling you to contribute predictive models and insights that power a wide range of applications.**
+
+### How Allora Network Operates
+
+**The Allora Network, powered by Allora's unique consensus mechanism, crowdsources financial predictions produced by machine learning models.** The network incentivizes the contribution of machine intelligence to optimize various financial objectives.
+
+## Worker Role and Value Proposition
+
+### Core Function
+
+**Primary Responsibilities**:
+- **Model deployment**: Contribute trained machine learning models to the network
+- **Inference generation**: Provide predictions on various financial topics and assets
+- **Performance optimization**: Continuously improve model accuracy for better rewards
+- **Network participation**: Engage in the collective intelligence ecosystem
+
+### Why Participate as a Worker?
+
+**Benefits for Data Scientists**:
+- **Monetize expertise**: Earn rewards for accurate predictions and valuable insights
+- **Access to collective intelligence**: Learn from network-wide model performance
+- **Real-world application**: Apply ML skills to live financial markets and data
+- **Decentralized platform**: Participate in cutting-edge blockchain-based AI networks
+
+**Professional Development**:
+- **Model validation**: Test your models against real market conditions
+- **Comparative analysis**: Benchmark performance against other data scientists
+- **Continuous learning**: Adapt to changing market conditions and patterns
+- **Innovation opportunity**: Contribute to the future of decentralized AI
+
+## Network Architecture Benefits
+
+### Consensus-Driven Accuracy
+
+**Collective Intelligence Advantages**:
+- **Wisdom of crowds**: Leverage diverse perspectives and methodologies
+- **Error reduction**: Network consensus helps minimize individual model biases
+- **Robust predictions**: Multiple models provide more reliable outcomes
+- **Quality assurance**: Reputation systems reward consistent accuracy
+
+### Financial Optimization Focus
+
+**Targeted Applications**:
+- **Market prediction**: Price forecasting for various financial instruments
+- **Risk assessment**: Volatility and uncertainty quantification
+- **Trading strategies**: Signal generation for investment decisions
+- **Economic indicators**: Macroeconomic trend analysis and forecasting
+
+## Getting Started as a Worker
+
+### Assessment and Preparation
+
+**Prerequisites Evaluation**:
+- **Technical skills**: Machine learning model development and deployment
+- **Domain knowledge**: Understanding of financial markets and data
+- **Infrastructure**: Computing resources for model training and inference
+- **Network familiarity**: Basic understanding of blockchain and tokenomics
+
+### Model Development Strategy
+
+**Optimization Approach**:
+- **Data quality**: Focus on clean, relevant training datasets
+- **Feature engineering**: Create meaningful predictors for financial outcomes
+- **Model selection**: Choose appropriate algorithms for specific prediction tasks
+- **Validation methods**: Implement robust testing and cross-validation procedures
+
+### Network Integration
+
+**Deployment Considerations**:
+- **Topic selection**: Choose financial prediction categories aligned with expertise
+- **Resource planning**: Ensure adequate computational resources for continuous operation
+- **Performance monitoring**: Track model accuracy and network rewards
+- **Community engagement**: Participate in discussions and knowledge sharing
+
+## Success Strategies
+
+### Model Performance
+
+**Accuracy Enhancement**:
+- **Continuous learning**: Regularly retrain models with new data
+- **Ensemble methods**: Combine multiple approaches for better predictions
+- **Market adaptation**: Adjust models for changing market conditions
+- **Error analysis**: Learn from prediction mistakes and systematic biases
+
+### Network Participation
+
+**Engagement Best Practices**:
+- **Consistent contribution**: Maintain regular inference submissions
+- **Quality focus**: Prioritize accuracy over volume of predictions
+- **Collaboration**: Share insights and learn from other network participants
+- **Long-term commitment**: Build reputation through sustained performance
+
+## Technical Considerations
+
+### Infrastructure Requirements
+
+**System Specifications**:
+- **Computing power**: Adequate processing for model inference and training
+- **Network connectivity**: Reliable internet connection for network participation
+- **Storage capacity**: Space for model data, training sets, and historical records
+- **Monitoring tools**: Systems to track performance and network status
+
+### Integration Complexity
+
+**Development Challenges**:
+- **API integration**: Connect models with network infrastructure
+- **Data formatting**: Ensure compatibility with network data standards
+- **Error handling**: Implement robust systems for network connectivity issues
+- **Security measures**: Protect model intellectual property and network credentials
+
+## Prerequisites
+
+- **Machine learning expertise**: Strong foundation in ML algorithms and model development
+- **Financial market knowledge**: Understanding of markets, instruments, and prediction challenges
+- **Programming skills**: Proficiency in relevant languages (Python, R, etc.) for model development
+- **Blockchain familiarity**: Basic understanding of decentralized networks and token economics
+
+## Next Steps
+
+- [Review system requirements](/devs/workers/requirements) for technical specifications
+- [Explore worker deployment guides](/devs/workers/deploy-worker/using-docker) for hands-on implementation
+- [Study existing topics](/devs/get-started/network-interaction#available-topics) to understand available prediction categories
+- [Learn about worker data querying](/devs/workers/query-worker-data) for performance monitoring
diff --git a/pages/devs/workers/deploy-forecaster.mdx b/pages/devs/workers/deploy-forecaster.mdx
index 8b5cecb..ab5e8d9 100644
--- a/pages/devs/workers/deploy-forecaster.mdx
+++ b/pages/devs/workers/deploy-forecaster.mdx
@@ -1,10 +1,46 @@
# Build and Deploy a Forecaster
-The Allora Forecaster is designed to run a model that predicts how accurate inferers are at arbitrary tasks.
-Any forecaster can be augmented using proprietary data sources, which likely overlap with the data used by inference models.
-A [boilerplate forecaster](https://github.com/allora-network/allora-forecaster) has been provided that has demonstrated ability for arbitrary topics.
+## What You'll Learn
+- Understanding the Allora Forecaster's role in predicting inferer accuracy
+- Overview of forecaster components and their specific functions
+- How forecasters use proprietary data sources and machine learning models
+- Performance metrics and scoring mechanisms for forecast evaluation
-## Forecaster Components Overview
+## Overview
+
+**The Allora Forecaster is designed to run a model that predicts how accurate inferers are at arbitrary tasks.**
+
+### Why Forecasters Matter
+
+**Strategic Value**:
+- **Performance prediction**: Anticipate which inferers will provide the most accurate results
+- **Resource optimization**: Allocate network resources based on predicted performance
+- **Quality assurance**: Enhance overall network accuracy through meta-predictions
+- **Competitive advantage**: Leverage proprietary data for superior forecasting
+
+### Data Enhancement Opportunities
+
+**Any forecaster can be augmented using proprietary data sources, which likely overlap with the data used by inference models.**
+
+**Proprietary Data Benefits**:
+- **Unique insights**: Access to exclusive information not available to other participants
+- **Competitive edge**: Differentiate forecasts through specialized data sources
+- **Enhanced accuracy**: Improve prediction quality with additional context
+- **Market advantage**: Leverage domain expertise and specialized knowledge
+
+### Getting Started Resources
+
+**A [boilerplate forecaster](https://github.com/allora-network/allora-forecaster) has been provided that has demonstrated ability for arbitrary topics.**
+
+**Boilerplate Advantages**:
+- **Proven framework**: Battle-tested architecture and implementation patterns
+- **Quick deployment**: Ready-to-use components for rapid development
+- **Best practices**: Incorporates lessons learned from successful deployments
+- **Community support**: Maintained by the Allora Network team with ongoing updates
+
+## Architecture Overview
+
+### Forecaster Components Overview
| **Component** | **Purpose** | **Key Functions** |
|-------------------------|-----------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
@@ -13,7 +49,102 @@ A [boilerplate forecaster](https://github.com/allora-network/allora-forecaster)
| **Prediction Engine** | Runs selected models on historical data to generate future predictions. | Ingests time-series data and outputs forecast values based on the chosen model. |
| **Model Plots** | Visualizes model performance and forecast accuracy. | Generates plots such as Prediction vs Actual, Residuals, and Forecast Horizon for intuitive evaluation. |
| **Performance Metrics** | Measures the accuracy and effectiveness of model predictions. | Key metrics include MAE (Mean Absolute Error), RMSE (Root Mean Squared Error), R2 Score, Mean Absolute Percentage Error, Median Absolute Percentage Error | |
-| **Scoring Mechanism** | Assigns scores based on model performance compared to other participants. | Determines which forecasts contribute to the Allora Network’s final consensus based on accuracy and uniqueness. |
+| **Scoring Mechanism** | Assigns scores based on model performance compared to other participants. | Determines which forecasts contribute to the Allora Network's final consensus based on accuracy and uniqueness. |
+
+### Component Integration
+
+**Data Flow Process**:
+1. **Data Indexing**: Extract and organize blockchain data for analysis
+2. **Modeling**: Train and optimize machine learning models on historical data
+3. **Prediction Engine**: Generate forecasts using trained models
+4. **Performance Metrics**: Evaluate forecast accuracy and model effectiveness
+5. **Model Plots**: Visualize results for analysis and optimization
+6. **Scoring Mechanism**: Rank forecasts and determine network contributions
+
+## Technical Implementation
+
+### Machine Learning Support
+
+**Supported Algorithms**:
+- **LightGBM**: Gradient boosting framework optimized for efficiency and accuracy
+- **XGBoost**: Extreme gradient boosting with strong performance on tabular data
+- **Extensible framework**: Support for additional algorithms and custom models
+- **Ensemble methods**: Combine multiple models for improved predictions
+
+### Data Processing Pipeline
+
+**Blockchain Integration**:
+- **Postgres indexer**: Efficient data extraction and storage
+- **Time-series analysis**: Historical pattern recognition and trend analysis
+- **Real-time updates**: Continuous data ingestion for current forecasts
+- **Data validation**: Quality checks and consistency verification
+
+### Performance Evaluation
+
+**Comprehensive Metrics**:
+- **MAE (Mean Absolute Error)**: Average magnitude of prediction errors
+- **RMSE (Root Mean Squared Error)**: Standard deviation of prediction errors
+- **R2 Score**: Proportion of variance explained by the model
+- **Mean Absolute Percentage Error**: Percentage-based accuracy measurement
+- **Median Absolute Percentage Error**: Robust percentage error metric
+
+### Visualization Tools
+
+**Analysis Capabilities**:
+- **Prediction vs Actual**: Compare forecasts with realized outcomes
+- **Residuals**: Analyze prediction errors and model bias
+- **Forecast Horizon**: Visualize prediction accuracy over different time periods
+- **Performance trends**: Track model improvement over time
+
+## Getting Started
+
+### Setup
+
+**Development Prerequisites**:
+- **Technical environment**: Python/ML development setup with required dependencies
+- **Data access**: Connection to Allora Network blockchain data
+- **Model training resources**: Computational power for machine learning workflows
+- **Monitoring tools**: Systems for tracking forecaster performance
+
+### Implementation Strategy
+
+**Deployment Approach**:
+1. **Clone boilerplate**: Start with the provided forecaster framework
+2. **Configure data sources**: Set up blockchain data indexing and proprietary feeds
+3. **Model selection**: Choose appropriate algorithms for your use case
+4. **Training pipeline**: Implement model training and optimization workflows
+5. **Deployment**: Launch forecaster and integrate with network
+6. **Monitoring**: Track performance and iterate on model improvements
+
+## Best Practices
+
+### Model Development
+
+**Optimization Guidelines**:
+- **Feature engineering**: Create meaningful predictors from available data
+- **Cross-validation**: Use robust validation techniques to prevent overfitting
+- **Ensemble methods**: Combine multiple models for improved accuracy
+- **Regular retraining**: Update models with new data and changing conditions
+
+### Data Management
+
+**Quality Assurance**:
+- **Data validation**: Implement checks for data quality and consistency
+- **Historical analysis**: Use sufficient historical data for model training
+- **Real-time processing**: Ensure timely data updates for current forecasts
+- **Backup strategies**: Maintain data redundancy and recovery procedures
+
+## Prerequisites
+
+- **Machine learning expertise**: Strong understanding of forecasting models and techniques
+- **Data science skills**: Ability to work with time-series data and statistical analysis
+- **Blockchain familiarity**: Understanding of Allora Network architecture and data structures
+- **Technical infrastructure**: Computational resources for model training and deployment
+
+## Next Steps
-## Setup
+- [Explore the boilerplate forecaster repository](https://github.com/allora-network/allora-forecaster) for implementation details
+- [Learn about worker deployment](/devs/workers/deploy-worker/using-docker) for network integration
+- [Study worker data querying](/devs/workers/query-worker-data) for performance monitoring
+- [Review worker requirements](/devs/workers/requirements) for infrastructure planning
diff --git a/pages/devs/workers/deploy-worker/allora-mdk.mdx b/pages/devs/workers/deploy-worker/allora-mdk.mdx
index c4301cd..f0566ef 100644
--- a/pages/devs/workers/deploy-worker/allora-mdk.mdx
+++ b/pages/devs/workers/deploy-worker/allora-mdk.mdx
@@ -2,17 +2,31 @@ import { Callout } from 'nextra/components'
# Build and Deploy a Worker using the Allora Model Development Kit (MDK)
-The Allora MDK is an open-source github repository that allows users to spin up an inference model for over 7,000 cryptocurrencies and stocks.
-The MDK leverages the [Tiingo API](https://www.tiingo.com) as a data feed for these cryptocurrencies and stocks, although custom datasets can be
-integrated as well.
+## What You'll Learn
+- How to use the Allora MDK for developing sophisticated inference models
+- Training and evaluating models for over 7,000 cryptocurrencies and stocks
+- Complete workflow from model development to network deployment
+- Available regression techniques and when to use them
-Let's walk through the steps needed to download, train, and evaluate a given model on a custom dataset, and then deploy this trained model onto the
-network.
+## Overview
-## Regression Techniques
+**The Allora MDK is an open-source GitHub repository that allows users to spin up an inference model for over 7,000 cryptocurrencies and stocks.** The MDK leverages the [Tiingo API](https://www.tiingo.com) as a data feed for these cryptocurrencies and stocks, although custom datasets can be integrated as well.
-Each of these regression techniques is implemented at a basic level and is available out of the box in the Model Development Kit (MDK). These models provide a foundation that you can build upon to create more advanced solutions.
+### What Makes MDK Special?
+
+The MDK provides:
+- **Pre-built models**: 9 different regression techniques ready to use
+- **Financial data integration**: Native Tiingo API support for 7,000+ assets
+- **Custom datasets**: Support for your own CSV data sources
+- **End-to-end workflow**: From training to network deployment
+
+Let's walk through the steps needed to download, train, and evaluate a given model on a custom dataset, and then deploy this trained model onto the network.
+## Available Models
+
+### Regression Techniques
+
+Each of these regression techniques is implemented at a basic level and is available out of the box in the Model Development Kit (MDK). These models provide a foundation that you can build upon to create more advanced solutions.
| **Model** | **Description** |
|-------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
@@ -30,9 +44,17 @@ Each of these regression techniques is implemented at a basic level and is avail
Although these models are already integrated into the MDK, you can add more models as well as modify existing ones to create a better inference model tailored to your specific needs.
+## Prerequisites
+
+- **Python 3.9+**: Required for running the MDK
+- **Conda**: For environment management (or manual Python setup)
+- **Tiingo API Key**: For accessing financial data (free tier available)
+- **Basic ML Knowledge**: Understanding of machine learning concepts
+- **System Resources**: Adequate RAM and CPU for model training
+
## Installation
-### Clone the MDK Repository
+### Step 1: Clone the MDK Repository
Run the following commands in a new terminal window:
@@ -41,7 +63,10 @@ git clone https://github.com/allora-network/allora-mdk.git
cd allora-mdk
```
-#### Conda not Installed?
+### Step 2: Set Up Python Environment
+
+#### Install Conda (if needed)
+
On Mac, simply use brew to install Miniconda:
@@ -50,12 +75,14 @@ brew install miniconda
```
-### Create Conda Environment
+#### Create Conda Environment
+**Automated Setup (Recommended)**:
```bash
conda env create -f environment.yml
```
+**Manual Setup (Alternative)**:
If you want to set it up manually:
```bash
@@ -64,13 +91,15 @@ pip install setuptools==72.1.0 Cython==3.0.11 numpy==1.24.3
```
-### Install Dependencies
+### Step 3: Install Dependencies
```bash
pip install -r requirements.txt
```
-### Add Tiingo API Key
+### Step 4: Configure API Access
+
+#### Add Tiingo API Key
Go to [tiingo.com](https://www.tiingo.com) and set up an API Key after creating an account, which you will input into your `.env` file:
@@ -79,9 +108,14 @@ Go to [tiingo.com](https://www.tiingo.com) and set up an API Key after creating
TIINGO_API_KEY=your_tiingo_api_key
```
-## Usage
+**API Key Setup**:
+1. Visit [tiingo.com](https://www.tiingo.com) and create a free account
+2. Generate an API key from your dashboard
+3. Add the key to your `.env` file as shown above
+
+## Model Training Workflow
-### Model Training
+### Start Training Process
```bash
make train
@@ -89,7 +123,9 @@ make train
Running the above command will guide you through a series of sub-prompts that you can use to curate a unique training set for the given cryptocurrency or stock you choose as a target variable.
-#### Select the Data Source
+### Training Configuration Steps
+
+#### Step 1: Select the Data Source
After running `make train`, the command line will prompt you to select your dataset:
@@ -101,9 +137,14 @@ Select the data source:
Enter your choice (1/2/3):
```
-- Although the MDK is natively integrated with Tiingo, a model maker can effectively configure any data set to train on from a CSV file as well.
+**Data Source Options**:
+- **Option 1**: Tiingo Stock Data - Access to 7,000+ US and international stocks
+- **Option 2**: Tiingo Crypto Data - Coverage of major cryptocurrencies
+- **Option 3**: Custom CSV - Upload your own datasets
+
+Although the MDK is natively integrated with Tiingo, a model maker can effectively configure any data set to train on from a CSV file as well.
-#### Select the Target Variable
+#### Step 2: Select the Target Variable
After selecting your data source, you will be prompted to pick a target variable for your model to provide inferences on.
@@ -111,22 +152,27 @@ After selecting your data source, you will be prompted to pick a target variable
Enter the crypto symbol (default: btcusd):
```
-#### Select the Time Interval
+**Popular Targets**:
+- **Cryptocurrencies**: btcusd, ethusd, adausd, solusd
+- **Stocks**: aapl, tsla, msft, googl
-Next, you'll have to select the time interval. The time interval determines how frequently the data points are sampled or aggregated over a given period of time.
+#### Step 3: Select the Time Interval
-- If you're dealing with smaller [epoch lengths](/devs/topic-creators/topic-life-cycle#epoch-length), shorter intervals like minutes or seconds might be necessary to capture rapid changes in the market.
-- For longer epoch lengths, you may choose daily, weekly, or monthly intervals.
+Next, you'll have to select the time interval. The time interval determines how frequently the data points are sampled or aggregated over a given period of time.
```bash
Enter the frequency (1min/5min/4hour/1day, default: 1day):
```
+**Interval Guidelines**:
+- If you're dealing with smaller [epoch lengths](/devs/topic-creators/topic-life-cycle#epoch-length), shorter intervals like minutes or seconds might be necessary to capture rapid changes in the market.
+- For longer epoch lengths, you may choose daily, weekly, or monthly intervals.
+
Using shorter time intervals increases CPU power requirements because the dataset grows significantly. More data points lead to larger memory consumption, longer data processing times, and more complex computations. The CPU has to handle more input/output operations, and models take longer to train due to the higher volume of data needed to capture patterns effectively.
-#### Start and End Date
+#### Step 4: Set Training Period
When selecting the start and end dates for your training data, keep in mind that larger time periods result in more data, requiring increased CPU power and memory. Longer timeframes capture more trends but also demand greater computational resources, especially during model training.
@@ -135,20 +181,12 @@ Enter the start date (YYYY-MM-DD, default: 2021-01-01):
Enter the end date (YYYY-MM-DD, default: 2024-10-20):
```
-#### Selecting Models to Train
-
-Now that we've set up our data source, target variable, and time interval, it's time to select the models to train on. In the prompt, you can either choose to train on all available models or make a custom selection.
-
-```bash
-Select the models to train:
-1. All models
-2. Custom selection
-Enter your choice (1/2):
-```
-
-If you opt for Custom selection, you will be prompted to choose from the regression techniques listed earlier, such as ARIMA, LSTM, Random Forest, or XGBoost.
+**Period Selection Tips**:
+- **Short period (3-6 months)**: Quick training, recent conditions only
+- **Medium period (1-2 years)**: Balanced approach, captures seasonal patterns
+- **Long period (3+ years)**: Comprehensive coverage, requires more resources
-#### Model Selection
+#### Step 5: Choose Models to Train
Now that we've set up our data source, target variable, and time interval, it's time to select the models to train on. In the prompt, you can either choose to train on all available models or make a custom selection.
@@ -159,9 +197,15 @@ Select the models to train:
Enter your choice (1/2):
```
+**Training Options**:
+- **All models**: Comprehensive comparison across all techniques
+- **Custom selection**: Choose specific models (ARIMA, LSTM, Random Forest, XGBoost, etc.)
+
If you opt for Custom selection, you will be prompted to choose from the regression techniques listed earlier, such as ARIMA, LSTM, Random Forest, or XGBoost. You can select the models that are best suited for your specific problem or dataset.
-#### Model Evaluation
+## Model Evaluation
+
+### Evaluate Trained Models
After selecting and training the models, the next step is to evaluate them. The MDK provides built-in tools to assess the performance of your model using standard metrics like Mean Absolute Error (MAE) and Root Mean Squared Error (RMSE). Simply run:
@@ -171,11 +215,16 @@ make eval
This will generate performance reports, helping you identify the best model to deploy.
-## Deployment
+**Evaluation Metrics**:
+- **MAE (Mean Absolute Error)**: Average absolute difference between predicted and actual values
+- **RMSE (Root Mean Squared Error)**: Square root of average squared differences
+- **Performance comparison**: Side-by-side results for all trained models
+
+## Model Deployment
Deploying a model requires packaging your trained model from the MDK and integrating it with a worker node repository before exposing the worker as an endpoint.
-### Package your Trained Model
+### Step 1: Package Your Trained Model
Run the following command to package your model for the Allora worker:
@@ -184,18 +233,19 @@ make package-arima
```
-Replace arima with the name of the model you’d like to package (e.g., lstm, xgboost, etc.).
+Replace arima with the name of the model you'd like to package (e.g., lstm, xgboost, etc.).
+**Packaging Process**:
This will:
-- Copy the model’s files and dependencies into the `package folder`.
-- Run test's for inference and training to validate functionality in a worker
+- Copy the model's files and dependencies into the `package folder`.
+- Run tests for inference and training to validate functionality in a worker
- Generate a configuration file, `config.py`, that contains the active model information.
-### Deploy your Worker
+### Step 2: Deploy Your Worker
#### Expose the Endpoint
@@ -207,7 +257,7 @@ cd src && uvicorn main:app --reload --port 8000
```
-Replace ARIMA with the name of the model you’d like to package (e.g., LSTM, XGBOOST, etc.).
+Replace ARIMA with the name of the model you'd like to package (e.g., LSTM, XGBOOST, etc.).
This will expose your endpoint, which will be called when a [worker nonce](/devs/topic-creators/topic-life-cycle#nonce) is available. If your endpoint is exposed successfully, you should see the following output on your command line:
@@ -216,6 +266,8 @@ This will expose your endpoint, which will be called when a [worker nonce](/devs
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
```
+#### Test Your Endpoint
+
You can query your endpoint in the CLI by running:
```bash
@@ -231,11 +283,13 @@ Now that you have a specific endpoint that can be queried for an inference outpu
1. Copy `example.config.json` and name the copy `config.json`.
2. Open `config.json` and **update** the necessary fields inside the `wallet` sub-object and `worker` config with your specific values:
-###### `wallet` Sub-object
+###### Wallet Configuration
-1. `nodeRpc`: The [RPC URL](/devs/get-started/setup-wallet#rpc-url-and-chain-id) for the corresponding network the node will be deployed on
-2. `addressKeyName`: The name you gave your wallet key when [setting up your wallet](/devs/get-started/setup-wallet)
-3. `addressRestoreMnemonic`: The mnemonic that was outputted when setting up a new key
+**`wallet` Sub-object**:
+
+1. **`nodeRpc`**: The [RPC URL](/devs/get-started/quick-start#network-configuration) for the corresponding network the node will be deployed on
+2. **`addressKeyName`**: The name you gave your wallet key when [setting up your wallet](/devs/get-started/quick-start#create-your-wallet)
+3. **`addressRestoreMnemonic`**: The mnemonic that was outputted when setting up a new key
{/*
`addressKeyName` and `addressRestoreMnemonic` are optional parameters. If you did not previously generate keys, keys will be generated for you when [running the node](/devs/workers/deploy-worker/using-docker#generate-keys-and-export-variables).
@@ -243,12 +297,14 @@ Now that you have a specific endpoint that can be queried for an inference outpu
If you have existing keys that you wish to use, you will need to provide these variables.
*/}
-###### `worker` Config
+###### Worker Configuration
+
+**`worker` Config**:
-1. `topicId`: The specific topic ID you created the worker for.
-2. `InferenceEndpoint`: The endpoint exposed by your worker node to provide inferences to the network.
-3. `Token`: The token for the specific topic you are providing inferences for. The token needs to be exposed in the inference server endpoint for retrieval.
- - The `Token` variable is specific to the endpoint you expose in your `main.py` file. It is not related to any topic parameter.
+1. **`topicId`**: The specific topic ID you created the worker for.
+2. **`InferenceEndpoint`**: The endpoint exposed by your worker node to provide inferences to the network.
+3. **`Token`**: The token for the specific topic you are providing inferences for. The token needs to be exposed in the inference server endpoint for retrieval.
+ - The `Token` variable is specific to the endpoint you expose in your `main.py` file. It is not related to any topic parameter.
The `worker` config is an array of sub-objects, each representing a different topic ID. This structure allows you to manage multiple topic IDs, each within its own sub-object.
@@ -279,6 +335,8 @@ To deploy a worker that provides inferences for multiple topics, you can duplica
```
+### Step 3: Start the Node
+
Then run:
```bash
@@ -286,7 +344,15 @@ make node-env
make compose
```
-- This will load your config into your environment and spin up your docker node, which will check for open worker nonces and submit inferences to the network.
+This will load your config into your environment and spin up your docker node, which will check for open worker nonces and submit inferences to the network.
+
+**Deployment Commands**:
+- **`make node-env`**: Sets up environment variables and configuration
+- **`make compose`**: Starts Docker containers for worker and inference server
+
+## Verification
+
+### Check Node Status
If your node is working correctly, you should see it actively checking for the active worker nonce:
@@ -294,8 +360,22 @@ If your node is working correctly, you should see it actively checking for the a
offchain_node | {"level":"debug","topicId":1,"time":1723043600,"message":"Checking for latest open worker nonce on topic"}
```
+### Successful Deployment
+
A **successful** response from your Worker should display:
```bash
{"level":"debug","msg":"Send Worker Data to chain","txHash":,"time":,"message":"Success"}
-```
\ No newline at end of file
+```
+
+**Success Indicators**:
+- Regular nonce checking messages
+- Successful transaction submissions with tx hashes
+- No persistent error messages
+- Inference server responding to requests
+
+## Next Steps
+
+- [Learn about worker performance optimization](/devs/workers/query-ema-score)
+- [Explore price prediction worker walkthroughs](/devs/workers/walkthroughs/walkthrough-price-prediction-worker)
+- [Compare with Docker deployment approach](/devs/workers/deploy-worker/using-docker)
\ No newline at end of file
diff --git a/pages/devs/workers/deploy-worker/build-and-deploy-worker-with-node-runners.mdx b/pages/devs/workers/deploy-worker/build-and-deploy-worker-with-node-runners.mdx
index 1108330..0d17677 100644
--- a/pages/devs/workers/deploy-worker/build-and-deploy-worker-with-node-runners.mdx
+++ b/pages/devs/workers/deploy-worker/build-and-deploy-worker-with-node-runners.mdx
@@ -1,13 +1,30 @@
# Build and Deploy a Worker Node With AWS Node Runners
-Welcome to the AWS Node Runners documentation! This page provides detailed instructions on how to leverage Node Runners on AWS, including benefits, setup instructions, and useful links.
+## What You'll Learn
+- How to deploy worker nodes using AWS Node Runners for production scale
+- Understanding AWS infrastructure architecture for Allora workers
+- Leveraging AWS Activate credits for startup-friendly deployment costs
## Overview
-Node Runners on AWS enables you to deploy and manage blockchain nodes efficiently using AWS infrastructure. Whether you're deploying Ethereum nodes or other blockchain networks, Node Runners simplifies the process, offering scalability, reliability, and cost-effectiveness.
+**Welcome to the AWS Node Runners documentation! This page provides detailed instructions on how to leverage Node Runners on AWS, including benefits, setup instructions, and useful links.**
+
+### What are AWS Node Runners?
+
+**Node Runners on AWS enables you to deploy and manage blockchain nodes efficiently using AWS infrastructure.** Whether you're deploying Ethereum nodes or other blockchain networks, Node Runners simplifies the process, offering scalability, reliability, and cost-effectiveness.
+
+### Why Use AWS Node Runners?
+
+AWS Node Runners provide:
+- **Scalability**: Automatically scale resources based on demand
+- **Reliability**: AWS's high availability and uptime guarantees
+- **Cost-effectiveness**: Pay-as-you-use pricing with potential startup credits
+- **Enterprise features**: Professional-grade infrastructure and support
For more detailed information and step-by-step guides, please refer to the [AWS Node Runners Documentation](https://aws-samples.github.io/aws-blockchain-node-runners/docs/Blueprints/Ethereum).
+## Architecture Overview
+
### Allora Network's AWS Infrastructure
This diagram illustrates the architecture of the integration between the Allora Network (built on a Cosmos AppChain) and an AWS-based infrastructure for handling inference requests.
@@ -16,47 +33,71 @@ This diagram illustrates the architecture of the integration between the Allora
#### Key Components
-1. **Allora Network (Cosmos AppChain)**
+**1. Allora Network (Cosmos AppChain)**
- **Public Head Node**: Acts as the entry point for the Allora Network, handling requests and responses.
-2. **AWS Account Setup**
+**2. AWS Account Setup**
- **Region**: The geographical location within AWS where the resources are deployed.
- **Virtual Private Cloud (VPC)**: Provides an isolated network environment within the AWS region.
- **Public Subnet**: A subnet within the VPC that has access to the internet through the VPC Internet Gateway.
- **VPC Internet Gateway**: Allows communication between the instances in the VPC and the internet.
-3. **EC2 Instance (Allora Worker Node)**
+**3. EC2 Instance (Allora Worker Node)**
- **Offchain Node**: This component handles network communication, receiving requests from the Allora Network and sending responses back.
- **Node Function**: Processes requests by interfacing with the private model server. It acts as an intermediary, ensuring the requests are correctly formatted and the responses are appropriately handled.
- **Model Server**: Hosts the proprietary model. It executes the main inference script (`Main.py`) to generate inferences based on the received requests.
#### Process Flow
-1. **Request Flow**:
+**1. Request Flow**:
- The Allora Network's Public Head Node sends a request for inferences to the EC2 instance within the AWS environment.
- The request passes through the VPC Internet Gateway and reaches the Offchain node in the public subnet.
- The Offchain node forwards the request to the Node Function.
- The Node Function calls `Main.py` on the Model Server to generate the required inferences.
-2. **Response Flow**:
+**2. Response Flow**:
- The Model Server processes the request and returns the inferences to the Node Function.
- The Node Function sends the inferences back to the Offchain node.
- The Offchain node communicates the inferences back to the Allora Network via the VPC Internet Gateway.
-## AWS Activate
+## AWS Activate Program
+
+### Important Notice
-Before proceeding, please note that eligibility for AWS Activate credits and terams are governed by AWS. This documentation may become outdated, so ensure you refer to the [AWS Activate program page](https://aws.amazon.com/startups/credits#hero) for the latest eligibility requirements and instructions.
+Before proceeding, please note that eligibility for AWS Activate credits and terms are governed by AWS. This documentation may become outdated, so ensure you refer to the [AWS Activate program page](https://aws.amazon.com/startups/credits#hero) for the latest eligibility requirements and instructions.
-## AWS Activate Stepwise Process
+### AWS Activate Step-by-Step Process
To receive up to $5,000 in AWS Activate credits, follow these steps:
+**Step 1: Application**
1. **Fill out our [Typeform](https://vk4z45e3hne.typeform.com/to/TVwcjiL1)**: Provide your details to receive our Activate Provider Organizational ID.
- Name (required)
- - Contact Information (optional): Email, Telegram, Discord handle, Linkedin
+ - Contact Information (optional): Email, Telegram, Discord handle, LinkedIn
- Official Company Website (required)
+**Step 2: AWS Activate Application**
2. **AWS Activate High-Level Instructions**: After obtaining our Organizational ID,
- Visit [AWS Activate Credit Packages](https://aws.amazon.com/startups/credits#packages).
- Apply through the Activate Portfolio
+### Program Benefits
+
+The AWS Activate program offers:
+- **Up to $5,000 in AWS credits**: Significant cost savings for startups
+- **Technical support**: Access to AWS technical guidance and best practices
+- **Training resources**: Educational materials and certification opportunities
+
+## Prerequisites
+
+- AWS account with appropriate permissions
+- Basic understanding of AWS services (EC2, VPC, CloudFormation)
+- Experience with blockchain node deployment concepts
+- Familiarity with [worker deployment basics](/devs/workers/deploy-worker/using-docker)
+
+## Next Steps
+
+- **Detailed Implementation**: Refer to the [AWS Node Runners Documentation](https://aws-samples.github.io/aws-blockchain-node-runners/docs/Blueprints/Ethereum) for complete setup instructions
+- **Alternative Methods**: [Compare with Docker deployment approach](/devs/workers/deploy-worker/using-docker)
+- **Model Development**: [Explore Allora MDK for specialized use cases](/devs/workers/deploy-worker/allora-mdk)
+
diff --git a/pages/devs/workers/deploy-worker/using-docker.mdx b/pages/devs/workers/deploy-worker/using-docker.mdx
index 4a5aa37..c0220c9 100644
--- a/pages/devs/workers/deploy-worker/using-docker.mdx
+++ b/pages/devs/workers/deploy-worker/using-docker.mdx
@@ -2,19 +2,33 @@ import { Callout } from 'nextra/components'
# Build and Deploy a Worker Node using Docker
-This document outlines a setup where the worker node is supported by an inference server. Communication occurs through an endpoint, allowing the worker to request inferences from the server.
+## What You'll Learn
+- How to deploy worker nodes using Docker containers
+- Setting up inference servers that communicate with worker nodes
+- Complete workflow from configuration to network registration
-To build this setup, please follow these steps:
+## Overview
+
+**This document outlines a setup where the worker node is supported by an inference server.** Communication occurs through an endpoint, allowing the worker to request inferences from the server.
+
+### Architecture Overview
+
+The Docker deployment consists of:
+- **Worker Node**: Connects to Allora Network and submits inferences
+- **Inference Server**: Hosts your AI model and provides predictions
+- **API Gateway**: Handles communication between components
## Prerequisites
Ensure you have the following installed on your machine:
-- Git
-- Go (version 1.16 or later)
-- Docker
+- **Git**: For cloning repositories
+- **Go**: Version 1.16 or later for building the worker node
+- **Docker**: For containerizing and running services
+
+## Setup Process
-## Clone the `allora-offchain-node` Repository
+### Step 1: Clone the Repository
Download the `allora-offchain-node` git repo:
@@ -23,16 +37,18 @@ git clone https://github.com/allora-network/allora-offchain-node
cd allora-offchain-node
```
-## Configure Your Environment
+### Step 2: Configure Your Environment
1. Copy `config.example.json` and name the copy `config.json`.
2. Open `config.json` and **update** the necessary fields inside the `wallet` sub-object and `worker` config with your specific values:
-### `wallet` Sub-object
+#### Wallet Configuration
-1. `nodeRpc`: The [RPC URL](/devs/get-started/setup-wallet#rpc-url-and-chain-id) for the corresponding network the node will be deployed on
-2. `addressKeyName`: The name you gave your wallet key when [setting up your wallet](/devs/get-started/setup-wallet)
-3. `addressRestoreMnemonic`: The mnemonic that was outputted when setting up a new key
+**`wallet` Sub-object**:
+
+1. **`nodeRpc`**: The [RPC URL](/devs/get-started/quick-start#network-configuration) for the corresponding network the node will be deployed on
+2. **`addressKeyName`**: The name you gave your wallet key when [setting up your wallet](/devs/get-started/quick-start#create-your-wallet)
+3. **`addressRestoreMnemonic`**: The mnemonic that was outputted when setting up a new key
{/*
`addressKeyName` and `addressRestoreMnemonic` are optional parameters. If you did not previously generate keys, keys will be generated for you when [running the node](/devs/workers/deploy-worker/using-docker#generate-keys-and-export-variables).
@@ -40,12 +56,14 @@ cd allora-offchain-node
If you have existing keys that you wish to use, you will need to provide these variables.
*/}
-### `worker` Config
+#### Worker Configuration
+
+**`worker` Config**:
-1. `topicId`: The specific topic ID you created the worker for.
-2. `InferenceEndpoint`: The endpoint exposed by your worker node to provide inferences to the network.
-3. `Token`: The token for the specific topic you are providing inferences for. The token needs to be exposed in the inference server endpoint for retrieval.
- - The `Token` variable is specific to the endpoint you expose in your `main.py` file. It is not related to any topic parameter.
+1. **`topicId`**: The specific topic ID you created the worker for.
+2. **`InferenceEndpoint`**: The endpoint exposed by your worker node to provide inferences to the network.
+3. **`Token`**: The token for the specific topic you are providing inferences for. The token needs to be exposed in the inference server endpoint for retrieval.
+ - The `Token` variable is specific to the endpoint you expose in your `main.py` file. It is not related to any topic parameter.
The `worker` config is an array of sub-objects, each representing a different topic ID. This structure allows you to manage multiple topic IDs, each within its own sub-object.
@@ -76,13 +94,13 @@ To deploy a worker that provides inferences for multiple topics, you can duplica
```
-### Reputer Config
+#### Reputer Configuration (Optional)
-The `config.example.json` file that was copied and edited in the previous steps also contains a JSON object for configuring and deploying a [reputer](/devs/reputers/reputers). To ignore the reputer and only deploy a worker, delete the reputer sub-object from the `config.json` file.
+**Reputer Config**: The `config.example.json` file that was copied and edited in the previous steps also contains a JSON object for configuring and deploying a [reputer](/devs/reputers/reputers). To ignore the reputer and only deploy a worker, delete the reputer sub-object from the `config.json` file.
-## Create the Inference Server
+### Step 3: Create the Inference Server
-### Prepare the API Gateway
+#### Prepare the API Gateway
Ensure you have an API gateway or server that can accept API requests to call your model.
@@ -92,22 +110,23 @@ The model in `allora-offchain-node` is barebones and outputs a random integer. F
A full breakdown of the components needed to build the model is available [here](/devs/workers/walkthroughs/walkthrough-price-prediction-worker).
-### Server Responsibilities
+#### Server Responsibilities
+The inference server must:
- Accept API requests from `main.go`.
- Respond with the corresponding inference obtained from the model.
-### Inference Relay
+#### Inference Relay
Below is a sample structure of what your `main.go`, `main.py` and Dockerfile will look like.
-#### `main.go`
+**`main.go`**:
`allora-offchain-node` comes preconfigured with a `main.go` file inside the [`adapter/api-worker-reputer` folder](https://github.com/allora-network/allora-offchain-node/blob/dev/adapter/api-worker-reputer/main.go).
The `main.go` file fetches the responses outputted from the Inference Endpoint based on the `InferenceEndpoint` and `Token` provided in the section above.
-#### `main.py`
+**`main.py`**:
`allora-offchain-node` comes preconfigured with a Flask application that uses a `main.py` file to expose the Inference Endpoint.
@@ -134,9 +153,7 @@ if __name__ == '__main__':
A full breakdown of the components needed to build the model is available [here](/devs/workers/walkthroughs/walkthrough-price-prediction-worker).
-
-
-#### `Dockerfile`
+**`Dockerfile`**:
A sample Dockerfile has been created in `allora-offchain-node` that can be used to deploy your model on port 8000.
@@ -162,7 +179,7 @@ CMD ["python", "main.py"]
Now that the node is configured, let's deploy and register it to the network. To run the node, follow these steps:
-### Export Variables
+### Step 1: Export Variables
Execute the following command from the root directory:
@@ -185,11 +202,11 @@ before proceeding.
-### Request from Faucet
+### Step 2: Request from Faucet
Copy your Allora address and request some tokens from the [Allora Testnet Faucet](https://faucet.testnet.allora.network/) to register your worker in the next step successfully.
-### Deploy the Node
+### Step 3: Deploy the Node
```
docker compose up --build
@@ -197,24 +214,36 @@ docker compose up --build
Both the offchain node and the source services will be started. They will communicate through endpoints attached to the internal DNS.
+## Verification
+
+### Check Node Status
+
If your node is working correctly, you should see it actively checking for the active worker nonce:
```bash
offchain_node | {"level":"debug","topicId":1,"time":1723043600,"message":"Checking for latest open worker nonce on topic"}
```
+### Successful Registration
+
A **successful** response from your Worker should display:
```bash
{"level":"debug","msg":"Send Worker Data to chain","txHash":,"time":,"message":"Success"}
```
-Congratulations! You've successfully deployed and registered your node on Allora.
+**Congratulations! You've successfully deployed and registered your node on Allora.**
-### Testing
+### Testing Your Setup
You can test your local inference server by performing a `GET` request on `http://localhost:8000/inference/`.
```bash
curl http://localhost:8000/inference/
```
+
+## Next Steps
+
+- [Learn about worker walkthroughs for specific models](/devs/workers/walkthroughs/walkthrough-price-prediction-worker)
+- [Explore alternative deployment methods](/devs/workers/deploy-worker/build-and-deploy-worker-with-node-runners)
+- [Monitor your worker performance](/devs/workers/query-worker-data)
diff --git a/pages/devs/workers/query-ema-score.mdx b/pages/devs/workers/query-ema-score.mdx
index f4eef23..136d2f6 100644
--- a/pages/devs/workers/query-ema-score.mdx
+++ b/pages/devs/workers/query-ema-score.mdx
@@ -2,20 +2,44 @@ import { Callout } from 'nextra/components'
# How to Query Worker EMA Scores
-## What is an EMA Score?
+## What You'll Learn
+- Understanding EMA scores and their role in worker performance evaluation
+- How to query individual worker EMA scores using allorad commands
+- Methods to determine active vs passive set membership for reward eligibility
+- Comparing worker performance against active set thresholds
-The EMA score (Exponential Moving Average) reflects a worker's performance over time for a given topic, balancing recent and past achievements, and helps determine whether a participant stays in the **active set** (eligible for [rewards](/home/layers/consensus/workers)) or remains
-in the **passive set**.
+## Overview
-Read about our v0.3.0 release on [Merit-Based Sortitioning](/home/release-notes#v030) for a deeper dive on what makes up the active and passive set.
+**The EMA score (Exponential Moving Average) reflects a worker's performance over time for a given topic, balancing recent and past achievements, and helps determine whether a participant stays in the active set (eligible for [rewards](/home/layers/consensus/workers)) or remains in the passive set.**
+
+### What Makes EMA Scores Important?
+
+EMA scores serve critical functions:
+- **Performance tracking**: Continuous assessment of worker accuracy over time
+- **Reward eligibility**: Determines active vs passive set membership
+- **Fair evaluation**: Balances recent performance with historical contributions
+- **Network optimization**: Ensures top-performing workers receive priority
+
+### Understanding Active vs Passive Sets
+
+**Active Set Participants**:
+- Have their EMA score updated based on current performance
+- Maintain ongoing eligibility for [rewards](/home/layers/consensus/workers)
+- Contribute during each [epoch](/home/key-terms#epochs)
-Active participants have their EMA score updated based on their current performance, which influences their ongoing
-eligibility for [rewards](/home/layers/consensus/workers). In contrast, inactive participants, who do not contribute during a given [epoch](/home/key-terms#epochs), receive an adjusted score using a
-"dummy" value, which determines whether they can re-enter the active set in future epochs and qualify for rewards.
+**Passive Set Participants**:
+- Do not contribute during a given epoch
+- Receive adjusted scores using "dummy" values
+- Can re-enter active set based on historical performance
+- Process ensures fairness while allowing re-entry opportunities
+
+Read about our v0.3.0 release on [Merit-Based Sortitioning](/home/release-notes#v030) for a deeper dive on what makes up the active and passive set.
This process ensures fairness while allowing inactive participants the chance to rejoin the active set based on their historical performance.
-## Query EMA Score for a Specific Worker
+## Querying Worker Performance
+
+### Query EMA Score for a Specific Worker
To query the EMA score for a specific worker (identified by the worker's allo address), run:
@@ -23,9 +47,16 @@ To query the EMA score for a specific worker (identified by the worker's allo ad
allorad q emissions inferer-score-ema [topic_id] [worker_address] --node https://allora-rpc.testnet.allora.network/
```
-- Replace `[topic_id]` and `[worker_address]` with your specific details.
+**Parameter Details**:
+- Replace `[topic_id]` with your specific topic identifier
+- Replace `[worker_address]` with the worker's allo address
-## Query the Lowest Worker in the Active Set's EMA Score
+**Use Cases**:
+- Monitor your worker's performance trends
+- Evaluate performance before making operational changes
+- Track score improvements after model optimizations
+
+### Query the Lowest Worker in the Active Set's EMA Score
To query the lowest EMA score for a worker in the Active Set, run:
@@ -33,9 +64,50 @@ To query the lowest EMA score for a worker in the Active Set, run:
allorad q emissions current-lowest-inferer-score [topic_id] --node https://allora-rpc.testnet.allora.network/
```
-- Replace `[topic_id]` with your specific details.
+**Parameter Details**:
+- Replace `[topic_id]` with your specific topic identifier
+
+**Use Cases**:
+- Determine the minimum threshold for active set membership
+- Assess how close your worker is to losing active status
+- Plan performance improvements to maintain eligibility
+
+## Determining Active Set Membership
+
+### Performance Comparison Method
To determine if your worker is in the active set and eligible for rewards, query your worker's EMA score and the lowest worker in the active set's EMA score and compare them.
If your worker's EMA score for a specific topic is higher than the EMA score of the lowest worker in the active set, your worker is in the active set for that topic.
-
\ No newline at end of file
+
+
+**Step-by-Step Process**:
+1. **Query your worker's EMA score** using the first command above
+2. **Query the active set threshold** using the second command above
+3. **Compare the scores**: Your score > Lowest active score = You're in the active set
+4. **Take action**: If below threshold, focus on improving model performance
+
+### Practical Implications
+
+**If Your Worker is in Active Set**:
+- Eligible for rewards in current epoch
+- Performance directly impacts EMA score updates
+- Continue optimizing to maintain position
+
+**If Your Worker is in Passive Set**:
+- Not eligible for current epoch rewards
+- Score adjusted using dummy values
+- Can re-enter active set with improved performance
+
+## Prerequisites
+
+- [`allorad` CLI](/devs/get-started/quick-start#install-the-allora-cli) installed and configured
+- Access to Allora Network RPC endpoints
+- Knowledge of your worker's allo address
+- Understanding of [topic IDs](/devs/topic-creators/how-to-create-topic) you're participating in
+
+## Next Steps
+
+- [Learn comprehensive worker data querying](/devs/workers/query-worker-data)
+- [Understand worker reward mechanisms](/home/layers/consensus/workers)
+- [Explore performance optimization strategies](/devs/workers/deploy-worker/allora-mdk)
\ No newline at end of file
diff --git a/pages/devs/workers/query-worker-data.mdx b/pages/devs/workers/query-worker-data.mdx
index 22cfc36..1fb91ce 100644
--- a/pages/devs/workers/query-worker-data.mdx
+++ b/pages/devs/workers/query-worker-data.mdx
@@ -1,129 +1,225 @@
# How to Query Worker Data using `allorad`
-Below is a list of commands to understand how to pull information about workers via [`allorad`](/devs/get-started/cli#installing-allorad):
+## What You'll Learn
+- Essential commands for querying worker data and performance metrics
+- How to check worker registration status and retrieve inference history
+- Understanding worker scores, regret calculations, and network participation
+- Troubleshooting worker issues using diagnostic queries
+
+## Overview
+
+**Below is a list of commands to understand how to pull information about workers via [`allorad`](/devs/get-started/quick-start#install-the-allora-cli):**
+
+### Why Query Worker Data?
+
+Worker data queries help you:
+- **Monitor performance**: Track your worker's scores and inference accuracy
+- **Troubleshoot issues**: Diagnose registration problems or missed rewards
+- **Optimize operations**: Understand how your worker impacts network outcomes
+- **Validate setup**: Confirm proper registration and active participation
## Prerequisites
-- [`allorad` CLI](/devs/get-started/cli)
-- A basic understanding of the Allora Network
+- **[`allorad` CLI](/devs/get-started/quick-start#install-the-allora-cli)**: Command-line interface for Allora Network
+- **A basic understanding of the Allora Network**: Familiarity with workers, topics, and inference concepts
+- **Access to RPC endpoints**: Network connectivity to query blockchain data
+- **Worker addresses**: Know the specific worker addresses you want to query
-## Query Functions
+## Query Command Structure
These functions read from the appchain only and do not write. Add the **Command** value into your query to retrieve the expected data.
+**Base Query Format**:
```bash
allorad q emissions [Command] --node
```
-## Check if Worker is Registered in a Topic
+**Command Components**:
+- **`allorad q emissions`**: Base query structure for emissions module
+- **`[Command]`**: Specific function to execute (see individual commands below)
+- **`--node `**: RPC endpoint for network connection
+## Worker Registration Commands
+
+### Check if Worker is Registered in a Topic
+
+**Query Details**:
- **RPC Method:** `IsWorkerRegisteredInTopicId`
- **Command:** `is-worker-registered [topic_id] [address]`
- **Description:** Checks whether a worker is registered in a specific topic. It returns `true` if the worker is registered in the given topic, and `false` otherwise.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic where you want to check the worker’s registration status.
- - `address`: The address of the worker you want to check.
-### Use Case:
+**Positional Arguments:**
+- **`topic_id`**: The identifier of the topic where you want to check the worker's registration status.
+- **`address`**: The address of the worker you want to check.
+
+#### Use Case
+
**Why use it?**
- This command is essential if you want to verify whether a worker is properly registered in a specific topic before submitting inferences or participating in a topic's operations.
**Example Scenario:**
- Before deploying a worker to submit inferences on a particular topic, you can confirm that the worker is registered to that topic to ensure proper functionality and avoid errors.
----
+**Practical Applications**:
+- Pre-deployment validation
+- Troubleshooting inference submission failures
+- Verifying topic participation eligibility
-## Get Worker Inferences Scores at Block
+## Performance Analysis Commands
+### Get Worker Inferences Scores at Block
+
+**Query Details**:
- **RPC Method:** `GetWorkerInferenceScoresAtBlock`
- **Command:** `inference-scores [topic_id] [block_height]`
- **Description:** Return scores for a worker at a block height.
- - Scores determine how [worker rewards](/home/layers/consensus/workers) are paid out.
+ - Scores determine how [worker rewards](/home/layers/consensus/workers) are paid out.
+
+**Positional Arguments**:
+- **`topic_id`**: Identifier of the topic whose information will be returned.
+- **`block_height`**: Block height to query.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned.
- - `block_height` Block height to query.
+#### Use Case
-### Use Case
**Why use it?**
- You may want to verify if a worker has received a high score at a specific block, particularly if you're troubleshooting worker rewards or performance discrepancies.
**Example Scenario:**
- If you believe your worker's reward for a particular topic is inaccurate, use this command to view how it was scored at a specific block.
----
+**Practical Applications**:
+- Reward verification and dispute resolution
+- Performance trend analysis over time
+- Score correlation with inference accuracy
-## Get Latest Worker Inference By Topic ID
+## Inference Data Commands
+### Get Latest Worker Inference By Topic ID
+
+**Query Details**:
- **RPC Method:** `GetWorkerLatestInferenceByTopicId`
- **Command:** `worker-latest-inference [topic_id] [worker_address]`
- **Description:** Gets the latest inference for a given worker and topic.
-- **Positional Arguments:**
- - `topic_id` Identifier of the topic whose information will be returned
- - `worker_address` Given worker to query on
+**Positional Arguments**:
+- **`topic_id`**: Identifier of the topic whose information will be returned
+- **`worker_address`**: Given worker to query on
+
+#### Use Case
-### Use Case
**Why use it?**
- This command is useful if you want to check whether a worker is actively submitting inferences for a topic and how recent those inferences are.
**Example Scenario:**
- A worker has missed rewards, and you want to verify if their latest inference was successfully submitted on time for a given topic.
----
+**Practical Applications**:
+- Verify active participation in topics
+- Check inference submission timing
+- Troubleshoot missed reward scenarios
-## Get Worker Node Info
+## Worker Information Commands
+### Get Worker Node Info
+
+**Query Details**:
- **RPC Method:** `GetWorkerNodeInfo`
- **Command:** `worker-info [address]`
- **Description:** Get node info for a specified worker node.
- Returns the **owner address** of the worker node.
- Returns the **worker node address** being queried.
-- **Positional Arguments:**
- - `address` The address of the worker node whose information will be retrieved.
+**Positional Arguments**:
+- **`address`**: The address of the worker node whose information will be retrieved.
+
+#### Use Case
-### Use Case
**Why use it?**
- This command is helpful for checking the current status of a worker node, especially if you are managing multiple nodes and want to verify the ownership or troubleshoot node configuration.
**Example Scenario:**
-- You want to ensure the node you’ve set up is operating under the correct owner and is correctly registered on the network.
+- You want to ensure the node you've set up is operating under the correct owner and is correctly registered on the network.
+
+**Practical Applications**:
+- Multi-node management and verification
+- Ownership validation for security purposes
+- Node configuration troubleshooting
----
+## Advanced Analytics Commands
-## Get Naive Inferer Network Regret
+### Get Naive Inferer Network Regret
+**Query Details**:
- **RPC Method:** `GetNaiveInfererNetworkRegret`
- **Command:** `naive-inferer-network-regret [topic_id] [inferer]`
- **Description:** Returns the network regret associated with including an inferer's naive inference in a batch for a given topic. If no specific regret is calculated, the command defaults to the topic's `InitialRegret` value.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic for which the regret will be calculated.
- - `inferer`: The address of the inferer whose naive inference is being evaluated.
-### Use Case:
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic for which the regret will be calculated.
+- **`inferer`**: The address of the inferer whose naive inference is being evaluated.
+
+#### Use Case
+
**Why use it?**
-- Use this command to assess the [regret](/home/key-terms#regrets) associated with incorporating an inferer’s naive inference into a batch. Useful for analyzing how poorly an inference may perform within the context of the network’s aggregate inference for a topic.
+- Use this command to assess the [regret](/home/key-terms#regrets) associated with incorporating an inferer's naive inference into a batch. Useful for analyzing how poorly an inference may perform within the context of the network's aggregate inference for a topic.
**Example Scenario:**
- If you want to understand how an inferer's baseline performance impacts the network outcome, this command helps quantify that penalty.
----
+**Practical Applications**:
+- Performance impact analysis
+- Inference quality assessment
+- Network contribution measurement
-## Get One-Out Inferer-Inferer Network Regret
+### Get One-Out Inferer-Inferer Network Regret
+**Query Details**:
- **RPC Method:** `GetOneOutInfererInfererNetworkRegret`
- **Command:** `one-out-inferer-inferer-network-regret [topic_id] [one_out_inferer] [inferer]`
-- **Description:** Returns the network regret when the implied outcome of the `one_out_inferer` is included in a batch alongside the `inferer`. If no specific regret value exists, it defaults to the topic’s `InitialRegret`.
-- **Positional Arguments:**
- - `topic_id`: The identifier of the topic for which the regret will be calculated.
- - `one_out_inferer`: The address of the inferer whose implied inference is being evaluated.
- - `inferer`: The address of the inferer to compare against.
+- **Description:** Returns the network regret when the implied outcome of the `one_out_inferer` is included in a batch alongside the `inferer`. If no specific regret value exists, it defaults to the topic's `InitialRegret`.
+
+**Positional Arguments**:
+- **`topic_id`**: The identifier of the topic for which the regret will be calculated.
+- **`one_out_inferer`**: The address of the inferer whose implied inference is being evaluated.
+- **`inferer`**: The address of the inferer to compare against.
+
+#### Use Case
-### Use Case:
**Why use it?**
- This command is useful when comparing how two inferers impact the network when their inferences are processed together. It helps identify the potential penalty on network performance when adding a specific inferer to a batch.
**Example Scenario:**
- You might want to compare the impact of two inferers to see how their joint performance influences the overall network regret. This is particularly useful for optimizing inference strategies.
+**Practical Applications**:
+- Comparative performance analysis
+- Multi-worker impact assessment
+- Inference strategy optimization
+
+## Common Use Cases
+
+### Troubleshooting Workflow
+
+1. **Check Registration**: Use `is-worker-registered` to verify topic participation
+2. **Verify Latest Inference**: Use `worker-latest-inference` to confirm recent submissions
+3. **Analyze Performance**: Use `inference-scores` to review reward calculations
+4. **Assess Impact**: Use regret commands to understand network contribution
+
+### Performance Monitoring
+
+- **Regular Health Checks**: Monitor inference submission frequency and timing
+- **Score Tracking**: Track performance scores across different blocks and topics
+- **Regret Analysis**: Understand how your worker affects overall network accuracy
+
+### Multi-Worker Management
+
+- **Ownership Verification**: Use `worker-info` to confirm node ownership
+- **Performance Comparison**: Compare regret values between different workers
+- **Resource Optimization**: Identify top-performing workers for resource allocation
+
+## Next Steps
+
+- [Learn about worker performance optimization](/devs/workers/query-ema-score)
+- [Explore worker deployment strategies](/devs/workers/deploy-worker/using-docker)
+- [Understand worker reward mechanisms](/home/layers/consensus/workers)
+
diff --git a/pages/devs/workers/requirements.mdx b/pages/devs/workers/requirements.mdx
index a29cf5b..6afc097 100644
--- a/pages/devs/workers/requirements.mdx
+++ b/pages/devs/workers/requirements.mdx
@@ -1,13 +1,49 @@
# System Requirements
-To participate in the Allora Network, ensure your system meets the following requirements:
+## What You'll Learn
+- Minimum hardware requirements for running Allora Network worker nodes
+- Required development and production environment tools
+- Technical prerequisites for deploying predictive models as workers
+
+## Overview
+
+**To participate in the Allora Network, ensure your system meets the following requirements:**
+
+### Why These Requirements Matter
+
+Meeting these specifications ensures:
+- **Reliable operation**: Consistent worker performance without resource constraints
+- **Network compatibility**: Proper integration with Allora Network protocols
+- **Development efficiency**: Smooth model development and deployment workflow
+- **Production readiness**: Stable operation in live network environments
+
+## Hardware Requirements
+
+### Minimum System Specifications
**Operating System**: Any modern operating system including Windows, macOS, or Linux
-**CPU**: Minimum of 1/2 core.
+**CPU**: Minimum of 0.5 core.
**Memory**: 2 to 4 GB.
**Storage**: SSD or NVMe with at least 5GB of space.
-## Technical Requirement
+### Resource Planning
+
+**CPU Considerations**:
+- Half-core minimum supports basic inference operations
+- Consider higher specs for complex models or multiple topics
+- Performance scales with computational requirements of your models
+
+**Memory Guidelines**:
+- 2GB minimum for lightweight models
+- 4GB recommended for more complex inference tasks
+- Additional memory needed for model training and data processing
+
+**Storage Requirements**:
+- SSD/NVMe provides faster model loading and data access
+- 5GB minimum covers base requirements and small models
+- Scale storage based on model size and data retention needs
+
+## Technical Requirements
Certain technical tools and platforms are required to develop and deploy your predictive models as workers within the Allora Network.
@@ -15,8 +51,39 @@ Certain technical tools and platforms are required to develop and deploy your pr
**Docker**: Essential for creating and managing containers.
+**Development Benefits**:
+- **Containerization**: Consistent environment across development and production
+- **Dependency management**: Isolated runtime environments
+- **Easy deployment**: Simplified packaging and distribution
+
### Production Environment
**Kubernetes**: A container orchestration system for automating software deployment, scaling, and management
**Helm**: A package manager for Kubernetes. _We advise the use of the Upshot Universal Helm Chart for deployment_
-**Preferred Cloud Service**: Depending on your preference, you can choose a cloud environment where your Node will be running
+**Preferred Cloud Service**: Depending on your preference, you can choose a cloud environment where your Node will be running
+
+**Production Considerations**:
+- **Kubernetes**: Provides auto-scaling, load balancing, and service discovery
+- **Helm Charts**: Standardized deployment templates for consistent configuration
+- **Cloud Services**: Choose based on latency, cost, and regional requirements
+
+### Recommended Cloud Providers
+
+Popular options include:
+- **AWS**: Comprehensive services with global presence
+- **Google Cloud**: Strong AI/ML integration and competitive pricing
+- **Azure**: Enterprise features and hybrid cloud capabilities
+- **DigitalOcean**: Simple, developer-friendly interface
+
+## Prerequisites
+
+- Basic understanding of containerization concepts
+- Familiarity with command-line interfaces
+- Knowledge of your preferred cloud platform
+- Understanding of [Allora Network architecture](/home/concepts/overview)
+
+## Next Steps
+
+- [Set up your development environment](/devs/get-started/quick-start)
+- [Learn worker deployment with Docker](/devs/workers/deploy-worker/using-docker)
+- [Explore cloud deployment options](/devs/workers/deploy-worker/build-and-deploy-worker-with-node-runners)
diff --git a/pages/devs/workers/walkthroughs/walkthrough-hugging-face-worker.mdx b/pages/devs/workers/walkthroughs/walkthrough-hugging-face-worker.mdx
index d26a6cf..a0aeb36 100644
--- a/pages/devs/workers/walkthroughs/walkthrough-hugging-face-worker.mdx
+++ b/pages/devs/workers/walkthroughs/walkthrough-hugging-face-worker.mdx
@@ -2,66 +2,177 @@ import { Callout } from 'nextra/components'
# Walkthrough: Deploying a Hugging Face Model as a Worker Node on the Allora Network
-> This guide provides a step-by-step process to deploy a Hugging Face model as a Worker Node within the Allora Network. By following these instructions, you will be able to integrate and run models from Hugging Face, contributing to the Allora decentralized machine intelligence ecosystem.
+## What You'll Learn
+- Complete process to deploy a Hugging Face model as a worker node on Allora Network
+- How to integrate the Chronos time-series forecasting model for cryptocurrency price predictions
+- Configuration management for multi-topic worker deployments and API integrations
+- Real-world implementation using CoinGecko API for BTC price forecasting on Topic 4
+## Overview
-## Prerequisites
+> **This guide provides a step-by-step process to deploy a Hugging Face model as a Worker Node within the Allora Network.** By following these instructions, you will be able to integrate and run models from Hugging Face, contributing to the Allora decentralized machine intelligence ecosystem.
-Before you start, ensure you have the following:
+### Why Hugging Face Integration Matters
-- A Docker environment with `docker compose` installed.
-- Basic knowledge of machine learning and the [Hugging Face](https://huggingface.co/) ecosystem.
-- Familiarity with Allora Network documentation on [building and deploying a worker node using Docker](/devs/workers/deploy-worker/using-docker).
+**Model Ecosystem Benefits**:
+- **Pre-trained models**: Access to thousands of state-of-the-art AI models
+- **Community-driven**: Leverage models developed and validated by the AI community
+- **Rapid deployment**: Quick integration without training models from scratch
+- **Proven performance**: Use battle-tested models with documented capabilities
-## Overview
+**Allora Network Value**:
+- **Diverse intelligence**: Contribute unique AI capabilities to the network
+- **Quality predictions**: Enhance network accuracy with specialized models
+- **Economic opportunity**: Earn rewards for providing valuable AI inferences
+- **Innovation platform**: Experiment with cutting-edge AI in a decentralized environment
+
+### Implementation Strategy
+
+**During this walkthrough, we will build a worker node from an existing Hugging Face model to deploy and participate on the Allora Network.** We will use this model to predict the price of BTC in 24h.
+
+**You can find all the files in [this Git repository](https://github.com/allora-network/allora-huggingface-walkthrough).**
+
+**Repository Benefits**:
+- **Complete implementation**: All necessary files and configurations
+- **Working example**: Tested and validated code for immediate use
+- **Best practices**: Proven patterns for Hugging Face integration
+- **Community support**: Maintained example for reference and troubleshooting
+
+## Technical Architecture
-During this walkthrough, we will build a worker node from an existing Hugging Face model to deploy and participate on the Allora Network. We will use this model to predict the price of BTC in 24h.
+### Chronos Model Overview
-You can find all the files in [this Git repository](https://github.com/allora-network/allora-huggingface-walkthrough).
+**In this example, we will use the Chronos model: [amazon/chronos-t5-tiny](https://huggingface.co/amazon/chronos-t5-tiny).** Chronos is a family of pretrained time series forecasting models based on language model architectures. In essence:
+- **A time series is transformed into a sequence of tokens via scaling and quantization, and a language model is trained on these tokens using the cross-entropy loss.**
+- **Once trained, probabilistic forecasts are obtained by sampling multiple future trajectories given the historical context.**
-In this example, we will use the Chronos model: [amazon/chronos-t5-tiny](https://huggingface.co/amazon/chronos-t5-tiny). Chronos is a family of pretrained time series forecasting models based on language model architectures. In essence:
- - A time series is transformed into a sequence of tokens via scaling and quantization, and a language model is trained on these tokens using the cross-entropy loss.
- - Once trained, probabilistic forecasts are obtained by sampling multiple future trajectories given the historical context.
+**Chronos models have been trained on a large corpus of publicly available time series data, as well as synthetic data generated using Gaussian processes.**
-Chronos models have been trained on a large corpus of publicly available time series data, as well as synthetic data generated using Gaussian processes.
+#### Model Architecture Benefits
+
+**Technical Advantages**:
+- **Language model foundation**: Leverage transformer architecture for time-series analysis
+- **Pre-trained knowledge**: Built-in understanding of time-series patterns and relationships
+- **Probabilistic forecasting**: Generate confidence intervals and uncertainty estimates
+- **Zero-shot capability**: Work on new datasets without additional training
+
+**Performance Characteristics**:
+- **Scalable**: Efficient processing of various time-series lengths and frequencies
+- **Robust**: Handles missing data and irregular time intervals
+- **Versatile**: Applicable to diverse forecasting domains beyond cryptocurrency
+- **State-of-the-art**: Competitive performance with specialized time-series models
+
+### Zero-Shot Forecasting
For simplicity, we will use Zero-shot forecasting, which refers to the ability of models to generate forecasts from unseen datasets.
-Our worker will provide inferences on the BTC 24h Prediction, which is Topic `4` on Allora Testnet.
+**Zero-Shot Benefits**:
+- **Immediate deployment**: No training required on new datasets
+- **Broad applicability**: Works across different time-series without customization
+- **Reduced complexity**: Simplified implementation and maintenance
+- **Faster time-to-market**: Quick deployment of forecasting capabilities
+
+## Network Integration
+
+### Topic Selection
+
+**Our worker will provide inferences on the BTC 24h Prediction, which is Topic `4` on Allora Testnet.**
+
+> **Note:**
+> To deploy on the Allora Network, you will need to [pick the topic ID](/devs/get-started/network-interaction#available-topics) you wish to generate inference for, or [create a new topic](/devs/topic-creators/how-to-create-topic).
-> Note:
-> To deploy on the Allora Network, you will need to [pick the topic ID](/devs/get-started/existing-topics) you wish to generate inference for, or [create a new topic](/devs/topic-creators/how-to-create-topic).
+**Topic Strategy**:
+- **Market selection**: Choose high-value, liquid markets for maximum network impact
+- **Competition analysis**: Understand existing participants and differentiation opportunities
+- **Data availability**: Ensure reliable data sources for consistent inference generation
+- **Economic viability**: Evaluate potential rewards versus operational costs
-We will use [Coingecko](https://www.coingecko.com/en/api) to fetch the data. You will need to [create an API key](https://www.coingecko.com/en/api/pricing).
+### Data Source Integration
-## Clone the repo
+**We will use [Coingecko](https://www.coingecko.com/en/api) to fetch the data.** You will need to [create an API key](https://www.coingecko.com/en/api/pricing).
-Clone the [basic-coin-prediction-node](https://github.com/allora-network/basic-coin-prediction-node) repository. It will serve as the base sample for your quick setup.
+**CoinGecko Benefits**:
+- **Reliable data**: High-quality, consistently updated cryptocurrency market data
+- **Comprehensive coverage**: Wide range of cryptocurrencies and trading pairs
+- **API stability**: Well-documented and maintained API for production use
+- **Rate limits**: Reasonable usage limits for development and production
+
+## Prerequisites
+
+**Before you start, ensure you have the following:**
+
+- **A Docker environment with `docker compose` installed.**
+- **Basic knowledge of machine learning and the [Hugging Face](https://huggingface.co/) ecosystem.**
+- **Familiarity with Allora Network documentation on [building and deploying a worker node using Docker](/devs/workers/deploy-worker/using-docker).**
+
+### Technical Requirements
+
+**Development Environment**:
+- **Docker proficiency**: Understanding of containerization and Docker Compose
+- **Python knowledge**: Familiarity with Python for AI/ML development
+- **API integration**: Experience with REST API consumption and error handling
+- **Blockchain basics**: Understanding of Allora Network concepts and worker operations
+
+**Infrastructure Requirements**:
+- **Stable internet**: Reliable connection for API calls and network participation
+- **Computational resources**: Adequate CPU and memory for model inference
+- **Storage capacity**: Sufficient disk space for model weights and data caching
+- **Monitoring capability**: Tools for tracking worker performance and health
+
+## Setup Process
+
+### Step 1: Clone the Repository
+
+**Clone the [basic-coin-prediction-node](https://github.com/allora-network/basic-coin-prediction-node) repository. It will serve as the base sample for your quick setup.**
```bash
git clone https://github.com/allora-network/basic-coin-prediction-node
cd basic-coin-prediction-node
```
-## Configure Your Environment
+**Repository Structure Benefits**:
+- **Production ready**: Battle-tested foundation for worker deployment
+- **Complete configuration**: All necessary files and settings included
+- **Docker integration**: Pre-configured containerization for easy deployment
+- **Documentation**: Comprehensive guides and examples
+
+### Step 2: Configure Your Environment
+
+1. **Copy `config.example.json` and name the copy `config.json`.**
+2. **Open `config.json` and **update** the necessary fields inside the `wallet` sub-object and `worker` config with your specific values:**
+
+#### Wallet Configuration
+
+##### `wallet` Sub-object
-1. Copy `config.example.json` and name the copy `config.json`.
-2. Open `config.json` and **update** the necessary fields inside the `wallet` sub-object and `worker` config with your specific values:
+1. **`nodeRpc`**: The [RPC URL](/devs/get-started/quick-start#network-configuration) for the corresponding network the node will be deployed on
+2. **`addressKeyName`**: The name you gave your wallet key when [setting up your wallet](/devs/get-started/quick-start#create-your-wallet)
+3. **`addressRestoreMnemonic`**: The mnemonic that was outputted when setting up a new key
-### `wallet` Sub-object
+**Wallet Security**:
+- **Secure storage**: Keep mnemonic phrases in secure, offline locations
+- **Access control**: Limit access to wallet configuration files
+- **Backup procedures**: Maintain multiple secure backups of wallet information
+- **Key rotation**: Consider periodic key updates for enhanced security
-1. `nodeRpc`: The [RPC URL](/devs/get-started/setup-wallet#rpc-url-and-chain-id) for the corresponding network the node will be deployed on
-2. `addressKeyName`: The name you gave your wallet key when [setting up your wallet](/devs/get-started/setup-wallet)
-3. `addressRestoreMnemonic`: The mnemonic that was outputted when setting up a new key
+#### Worker Configuration
-### `worker` Config
+##### `worker` Config
-1. `topicId`: The specific topic ID you created the worker for.
-2. `InferenceEndpoint`: The endpoint exposed by your worker node to provide inferences to the network.
-3. `Token`: The token for the specific topic you are providing inferences for. The token needs to be exposed in the inference server endpoint for retrieval.
- - The `Token` variable is specific to the endpoint you expose in your `main.py` file. It is not related to any topic parameter.
+1. **`topicId`**: The specific topic ID you created the worker for.
+2. **`InferenceEndpoint`**: The endpoint exposed by your worker node to provide inferences to the network.
+3. **`Token`**: The token for the specific topic you are providing inferences for. The token needs to be exposed in the inference server endpoint for retrieval.
+ - The `Token` variable is specific to the endpoint you expose in your `main.py` file. It is not related to any topic parameter.
+
+**Configuration Strategy**:
+- **Topic focus**: Align worker configuration with chosen prediction market
+- **Endpoint design**: Create clear, RESTful endpoints for inference delivery
+- **Token management**: Use meaningful token identifiers for easy maintenance
+- **Performance tuning**: Configure parameters for optimal inference delivery
+
+##### Multi-Topic Support
The `worker` config is an array of sub-objects, each representing a different topic ID. This structure allows you to manage multiple topic IDs, each within its own sub-object.
@@ -92,11 +203,19 @@ To deploy a worker that provides inferences for multiple topics, you can duplica
```
-## Creating the inference server
+**Multi-Topic Benefits**:
+- **Revenue diversification**: Participate in multiple markets for increased rewards
+- **Risk distribution**: Spread operational risk across different prediction categories
+- **Resource efficiency**: Maximize infrastructure utilization
+- **Market opportunity**: Capture value from various prediction markets
+
+## Implementation Development
+
+### Step 3: Creating the Inference Server
-We will create a very simple Flask application to serve inferences from the Hugging Face model.
+**We will create a very simple Flask application to serve inferences from the Hugging Face model.**
-Here is an example of our newly created `app.py`:
+**Here is an example of our newly created `app.py`:**
```python
from flask import Flask, Response
@@ -183,9 +302,30 @@ if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=True)
```
-## Modifying requirements.txt
+#### Code Architecture Analysis
+
+**Application Structure**:
+- **Flask framework**: Lightweight web framework for rapid API development
+- **Modular design**: Separate functions for data retrieval, model inference, and error handling
+- **RESTful endpoints**: Clean URL structure for inference requests
+- **Error handling**: Comprehensive error management for production reliability
+
+**Data Processing Pipeline**:
+1. **API integration**: Fetch historical price data from CoinGecko
+2. **Data transformation**: Convert API response to pandas DataFrame for analysis
+3. **Model preparation**: Initialize Chronos pipeline with appropriate device mapping
+4. **Inference generation**: Generate probabilistic forecasts using historical context
+5. **Response formatting**: Return predictions in network-compatible format
+
+**Technical Implementation Details**:
+- **Token mapping**: Support for multiple cryptocurrencies through symbol translation
+- **Device optimization**: Automatic device selection for GPU acceleration when available
+- **Memory efficiency**: Use bfloat16 precision for reduced memory usage
+- **Context window**: Use 30 days of daily price data for forecasting
+
+### Step 4: Update Dependencies
-Update the `requirements.txt` to include the necessary packages for the inference server:
+**Update the `requirements.txt` to include the necessary packages for the inference server:**
```
flask[async]
@@ -195,64 +335,127 @@ pandas
git+https://github.com/amazon-science/chronos-forecasting.git
```
-## Deployment
+**Dependency Strategy**:
+- **Core framework**: Flask with async support for concurrent request handling
+- **Production server**: Gunicorn with threading for scalable deployment
+- **AI libraries**: Transformers and PyTorch for model inference
+- **Data processing**: Pandas for efficient time-series data manipulation
+- **Model source**: Direct installation from Chronos repository for latest features
-Now that the node is configured, let's deploy and register it to the network. To run the node, follow these steps:
+**Production Considerations**:
+- **Version pinning**: Consider pinning specific versions for reproducible deployments
+- **Security updates**: Regularly update dependencies for security patches
+- **Performance optimization**: Monitor dependency impact on inference speed
+- **Resource usage**: Balance feature richness with memory and CPU requirements
-### Export Variables
+## Deployment Process
-Execute the following command from the root directory:
+### Step 5: Deploy Your Worker
-```sh
-chmod +x init.config
-./init.config
-```
+**Now that the node is configured, let's deploy and register it to the network. To run the node, follow these steps:**
-This command will automatically export the necessary variables from the account created. These variables are used by the offchain node and are bundled with your provided `config.json`, then passed to the node as environment variables.
+#### Deployment Workflow
-
-If you need to **make changes** to your `config.json` file after you ran the `init.config` command, rerun:
+**Pre-Deployment Checklist**:
+- **Configuration validation**: Verify all configuration files are properly set
+- **API credentials**: Ensure CoinGecko API key is configured and valid
+- **Network connectivity**: Test connection to Allora Network RPC endpoints
+- **Resource availability**: Confirm adequate system resources for model operation
-```sh
-chmod +x init.config
-./init.config
-```
+**Launch Process**:
+1. **Environment initialization**: Export configuration variables and prepare runtime environment
+2. **Docker orchestration**: Use Docker Compose to coordinate all service containers
+3. **Service startup**: Launch inference server, worker node, and monitoring components
+4. **Network registration**: Register worker with Allora Network for topic participation
+5. **Health verification**: Confirm all services are operational and responding correctly
-before proceeding.
+#### Monitoring and Maintenance
-
+**Operational Monitoring**:
+- **Performance tracking**: Monitor inference response times and accuracy metrics
+- **Resource utilization**: Track CPU, memory, and network usage patterns
+- **Error monitoring**: Set up alerts for API failures and model errors
+- **Network participation**: Verify continuous participation in consensus rounds
-### Request from Faucet
+**Maintenance Procedures**:
+- **Log rotation**: Manage log files to prevent disk space issues
+- **Dependency updates**: Keep AI models and libraries current with security patches
+- **Performance optimization**: Tune parameters based on operational metrics
+- **Backup procedures**: Maintain backups of configuration and operational data
-Copy your Allora address and request some tokens from the [Allora Testnet Faucet](https://faucet.testnet.allora.network/) to register your worker in the next step successfully.
+## Advanced Configuration
-### Deploy the Node
+### Model Optimization
-```
-docker compose up --build
-```
+**Performance Tuning**:
+- **Device selection**: Optimize GPU usage for faster inference generation
+- **Batch processing**: Process multiple requests efficiently when possible
+- **Memory management**: Balance model size with available system resources
+- **Caching strategies**: Cache frequently accessed data to reduce API calls
-Both the offchain node and the source services will be started. They will communicate through endpoints attached to the internal DNS.
+### Multi-Asset Support
-If your node is working correctly, you should see it actively checking for the active worker nonce:
+**Scaling Strategy**:
+- **Asset expansion**: Add support for additional cryptocurrencies and trading pairs
+- **Topic diversification**: Participate in multiple prediction markets simultaneously
+- **Load balancing**: Distribute computational load across available resources
+- **Quality assurance**: Maintain prediction quality across different asset classes
-```bash
-offchain_node | {"level":"debug","topicId":1,"time":1723043600,"message":"Checking for latest open worker nonce on topic"}
-```
+### Error Recovery
-A **successful** response from your Worker should display:
+**Resilience Patterns**:
+- **Graceful degradation**: Handle API failures without complete service interruption
+- **Retry mechanisms**: Implement exponential backoff for transient failures
+- **Circuit breakers**: Prevent cascade failures in external dependencies
+- **Health checks**: Automated detection and recovery from service issues
-```bash
-{"level":"debug","msg":"Send Worker Data to chain","txHash":,"time":,"message":"Success"}
-```
+## Best Practices
-Congratulations! You've successfully deployed and registered your node on Allora.
+### Security Considerations
-### Testing
+**Production Security**:
+- **API key management**: Secure storage and rotation of external API credentials
+- **Network security**: Proper firewall configuration and access controls
+- **Container security**: Regular updates and security scanning of Docker images
+- **Data protection**: Secure handling of sensitive configuration and operational data
-You can test your local inference server by performing a `GET` request on `http://localhost:8000/inference/`.
+### Economic Optimization
-```bash
-curl http://localhost:8000/inference/
-```
+**Profitability Strategies**:
+- **Cost management**: Optimize infrastructure costs while maintaining performance
+- **Reward maximization**: Fine-tune inference quality for maximum network rewards
+- **Competition analysis**: Monitor and adapt to competitive landscape changes
+- **Resource efficiency**: Balance computational costs with prediction accuracy
+
+## Troubleshooting
+
+### Common Issues
+
+**Deployment Problems**:
+- **Configuration errors**: Validate JSON syntax and required field completion
+- **Network connectivity**: Test RPC endpoints and API access
+- **Resource constraints**: Ensure adequate CPU, memory, and storage
+- **Permission issues**: Verify Docker and file system permissions
+
+### Performance Issues
+
+**Optimization Solutions**:
+- **Model loading**: Optimize model initialization and caching
+- **API rate limits**: Implement proper rate limiting and request batching
+- **Memory leaks**: Monitor and address memory usage patterns
+- **Network latency**: Optimize network calls and connection pooling
+
+## Prerequisites
+
+- **Docker expertise**: Proficiency with Docker and Docker Compose
+- **Python development**: Strong Python programming skills for AI/ML applications
+- **API integration**: Experience with REST API consumption and error handling
+- **Blockchain knowledge**: Understanding of Allora Network concepts and operations
+
+## Next Steps
+
+- [Explore price prediction worker walkthrough](/devs/workers/walkthroughs/walkthrough-price-prediction-worker) for alternative implementation approaches
+- [Learn about worker data querying](/devs/workers/query-worker-data) for performance monitoring
+- [Study worker requirements](/devs/workers/requirements) for infrastructure optimization
+- [Review existing topics](/devs/get-started/network-interaction#available-topics) for additional market opportunities
diff --git a/pages/devs/workers/walkthroughs/walkthrough-price-prediction-worker.mdx b/pages/devs/workers/walkthroughs/walkthrough-price-prediction-worker.mdx
index a066bd0..7b7e351 100644
--- a/pages/devs/workers/walkthroughs/walkthrough-price-prediction-worker.mdx
+++ b/pages/devs/workers/walkthroughs/walkthrough-price-prediction-worker.mdx
@@ -2,21 +2,57 @@ import { Callout } from 'nextra/components'
# Walkthrough: Build and Deploy Price Prediction Worker Node
-> How to build a node that predicts the future price of Ether
+## What You'll Learn
+- Complete process to build and deploy a price prediction worker node for Ethereum
+- Comprehensive environment configuration with multiple data providers and ML models
+- Understanding of multi-topic worker deployment and configuration management
+- Real-world deployment steps from setup to successful network registration
-## Prerequisites
+## Overview
-1. Make sure you have checked the documentation on how to [build and deploy a worker node using Docker](/devs/workers/deploy-worker/using-docker).
-2. Clone the [basic-coin-prediction-node](https://github.com/allora-network/basic-coin-prediction-node) repository. It will serve as the base sample for your quick setup.
+> **How to build a node that predicts the future price of Ether**
+
+**This walkthrough provides a complete guide for building a worker node that specializes in cryptocurrency price prediction.** You'll learn to configure multiple data sources, machine learning models, and deployment strategies for optimal network participation.
+
+### Why Price Prediction Workers Matter
+
+**Market Value**:
+- **High demand**: Price predictions are among the most requested inferences on prediction networks
+- **Economic utility**: Critical information for traders, DeFi protocols, and financial applications
+- **Network growth**: Popular topics attract more participants and increase network activity
+- **Revenue potential**: High-value predictions can command premium rewards
+
+**Technical Benefits**:
+- **Proven models**: Well-established machine learning approaches for time-series forecasting
+- **Data availability**: Multiple reliable data sources for training and validation
+- **Scalable architecture**: Framework supports multiple tokens and timeframes
+- **Community support**: Active community developing and sharing prediction strategies
+
+## Prerequisites
+
+**Before you start, ensure you have:**
+
+1. **Make sure you have checked the documentation on how to [build and deploy a worker node using Docker](/devs/workers/deploy-worker/using-docker).**
+2. **Clone the [basic-coin-prediction-node](https://github.com/allora-network/basic-coin-prediction-node) repository.** It will serve as the base sample for your quick setup.
```bash
git clone https://github.com/allora-network/basic-coin-prediction-node
cd basic-coin-prediction-node
```
-## Explainer Video
+### Setup Benefits
-Please see the video below to get a full deep-dive on how to deploy a price-prediction worker:
+**Repository Advantages**:
+- **Production ready**: Battle-tested codebase with proven performance
+- **Multi-model support**: Various machine learning algorithms pre-configured
+- **Data provider flexibility**: Support for multiple data sources and APIs
+- **Docker integration**: Containerized deployment for consistent environments
+
+## Educational Resources
+
+### Explainer Video
+
+**Please see the video below to get a full deep-dive on how to deploy a price-prediction worker:**
-## Configure Your Environment
+**Video Learning Benefits**:
+- **Visual demonstration**: Step-by-step walkthrough of the entire deployment process
+- **Best practices**: Expert recommendations for optimal worker configuration
+- **Troubleshooting**: Common issues and their solutions demonstrated in real-time
+- **Advanced techniques**: Tips for improving model performance and network rewards
+
+## Configuration Management
+
+### Configure Your Environment
+
+**Proper configuration is critical for optimal worker performance.** The system supports multiple configuration approaches for maximum flexibility.
+
+#### Environment Variables (.env File Configuration)
-### `.env` File Configuration
+**When setting up your environment, please follow the guidelines below for configuring your `.env` file:**
-When setting up your environment, please follow the guidelines below for configuring your `.env` file:
+##### Core Configuration Parameters
-- **`TOKEN`**: Specifies the cryptocurrency token to use. Must be one of the following:
- - `'ETH'` (Ethereum)
- - `'SOL'` (Solana)
- - `'BTC'` (Bitcoin)
- - `'BNB'` (Binance Coin)
- - `'ARB'` (Arbitrum)
-
- > **Note**: If you are using Binance as the data provider, any token can be used. However, if you are using Coingecko, you should add its `coin_id` in the [token map](https://github.com/allora-network/basic-coin-prediction-node/blob/70cf49d0a2317769d883ae882c146efbb915f5c0/updater.py#L107). Find more information [here](https://docs.coingecko.com/reference/simple-price) and the list [here](https://docs.google.com/spreadsheets/d/1wTTuxXt8n9q7C4NDXqQpI3wpKu1_5bGVmP9Xz0XGSyU/edit?gid=0#gid=0).
+###### TOKEN Selection
+**`TOKEN`: Specifies the cryptocurrency token to use. Must be one of the following:**
+- **`'ETH'` (Ethereum)**
+- **`'SOL'` (Solana)**
+- **`'BTC'` (Bitcoin)**
+- **`'BNB'` (Binance Coin)**
+- **`'ARB'` (Arbitrum)**
-- **`TRAINING_DAYS`**: Represents the number of days of historical data to use for training. Must be an integer greater than or equal to 1.
+> **Note**: If you are using Binance as the data provider, any token can be used. However, if you are using Coingecko, you should add its `coin_id` in the [token map](https://github.com/allora-network/basic-coin-prediction-node/blob/70cf49d0a2317769d883ae882c146efbb915f5c0/updater.py#L107). Find more information [here](https://docs.coingecko.com/reference/simple-price) and the list [here](https://docs.google.com/spreadsheets/d/1wTTuxXt8n9q7C4NDXqQpI3wpKu1_5bGVmP9Xz0XGSyU/edit?gid=0#gid=0).
-- **`TIMEFRAME`**: Defines the timeframe of the data used in the format like `10min` (minutes), `1h` (hours), `1d` (days), etc.
-
- - For Coingecko, the data granularity (candle's body) is automatic. To avoid downsampling when using Coingecko:
- - Use a **`TIMEFRAME`** of `>= 30min` if **`TRAINING_DAYS`** is `<= 2`.
- - Use a **`TIMEFRAME`** of `>= 4h` if **`TRAINING_DAYS`** is `<= 30`.
- - Use a **`TIMEFRAME`** of `>= 4d` if **`TRAINING_DAYS`** is `>= 31`.
+**Token Selection Strategy**:
+- **Market liquidity**: Choose tokens with high liquidity for better price data
+- **Data availability**: Ensure your data provider supports the chosen token
+- **Competition analysis**: Research existing workers for competitive positioning
+- **Network demand**: Select tokens with strong demand for predictions
-- **`MODEL`**: Specifies the machine learning model to use. Must be one of the following:
- - `'LinearRegression'`
- - `'SVR'` (Support Vector Regression)
- - `'KernelRidge'`
- - `'BayesianRidge'`
-
- > You can easily add support for other models by adding them to the configuration [here](https://github.com/allora-network/basic-coin-prediction-node/blob/main/model.py#L133).
+###### Training Configuration
+**`TRAINING_DAYS`: Represents the number of days of historical data to use for training. Must be an integer greater than or equal to 1.**
-- **`REGION`**: Defines the region for the Binance API. Must be `'EU'` or `'US'`.
+**Training Period Strategy**:
+- **Short periods (1-7 days)**: Better for capturing recent trends and market volatility
+- **Medium periods (7-30 days)**: Balance between trend capture and noise reduction
+- **Long periods (30+ days)**: Better for stable, long-term pattern recognition
+- **Market conditions**: Adjust based on current market volatility and stability
-- **`DATA_PROVIDER`**: Specifies the data provider to use. Must be either `'Binance'` or `'Coingecko'`.
-
- - Feel free to add support for other data providers to personalize your model!
+###### Timeframe Configuration
+**`TIMEFRAME`: Defines the timeframe of the data used in the format like `10min` (minutes), `1h` (hours), `1d` (days), etc.**
-- **`CG_API_KEY`**: Your Coingecko API key, required if you've set **`DATA_PROVIDER`** to `'coingecko'`.
+**For Coingecko, the data granularity (candle's body) is automatic. To avoid downsampling when using Coingecko:**
+- **Use a `TIMEFRAME` of `>= 30min` if `TRAINING_DAYS` is `<= 2`.**
+- **Use a `TIMEFRAME` of `>= 4h` if `TRAINING_DAYS` is `<= 30`.**
+- **Use a `TIMEFRAME` of `>= 4d` if `TRAINING_DAYS` is `>= 31`.**
-#### Sample Configuration (.env file)
+**Timeframe Selection Benefits**:
+- **Higher frequency**: Better for short-term predictions and rapid market changes
+- **Lower frequency**: Reduced noise and better for longer-term trend analysis
+- **Data alignment**: Proper timeframe selection prevents data quality issues
+- **Performance optimization**: Appropriate granularity improves model accuracy
-Below is an example configuration for your `.env` file:
+##### Machine Learning Configuration
+
+###### Model Selection
+**`MODEL`: Specifies the machine learning model to use. Must be one of the following:**
+- **`'LinearRegression'`**
+- **`'SVR'` (Support Vector Regression)**
+- **`'KernelRidge'`**
+- **`'BayesianRidge'`**
+
+> You can easily add support for other models by adding them to the configuration [here](https://github.com/allora-network/basic-coin-prediction-node/blob/main/model.py#L133).
+
+**Model Selection Strategy**:
+- **LinearRegression**: Fast, interpretable, good for linear relationships
+- **SVR**: Powerful for non-linear patterns, handles outliers well
+- **KernelRidge**: Good balance between complexity and performance
+- **BayesianRidge**: Provides uncertainty estimates, robust to overfitting
+
+##### Data Provider Configuration
+
+###### Provider Settings
+**`REGION`: Defines the region for the Binance API. Must be `'EU'` or `'US'`.**
+
+**`DATA_PROVIDER`: Specifies the data provider to use. Must be either `'Binance'` or `'Coingecko'`.**
+- **Feel free to add support for other data providers to personalize your model!**
+
+**`CG_API_KEY`: Your Coingecko API key, required if you've set `DATA_PROVIDER` to `'coingecko'`.**
+
+**Data Provider Comparison**:
+
+**Binance Benefits**:
+- **High frequency data**: More granular data for precise predictions
+- **Low latency**: Real-time data access for timely predictions
+- **Comprehensive coverage**: Wide range of trading pairs and markets
+- **Regional optimization**: Region selection for improved performance
+
+**CoinGecko Benefits**:
+- **Broader coverage**: More cryptocurrencies and historical data
+- **Market data**: Additional market metrics beyond price data
+- **Free tier**: Generous free usage limits for development
+- **Historical depth**: Extensive historical data for backtesting
+
+#### Sample Environment Configuration
+
+##### Sample Configuration (.env file)
+
+**Below is an example configuration for your `.env` file:**
```bash
TOKEN=ETH
@@ -82,16 +178,27 @@ DATA_PROVIDER=binance
CG_API_KEY=
```
-### `config.json` Configuration
+**Configuration Rationale**:
+- **ETH**: High liquidity and demand for Ethereum predictions
+- **30 days**: Good balance of historical context and recent relevance
+- **4h timeframe**: Appropriate granularity for 30-day training period
+- **SVR model**: Robust performance for cryptocurrency price prediction
+- **Binance US**: High-quality data with regional optimization
+
+### Network Configuration (config.json)
-1. Copy `config.example.json` and name the copy `config.json`.
-2. Open `config.json` and **update** the necessary fields inside the `wallet` sub-object and `worker` config with your specific values:
+#### JSON Configuration Setup
-#### `wallet` Sub-object
+1. **Copy `config.example.json` and name the copy `config.json`.**
+2. **Open `config.json` and **update** the necessary fields inside the `wallet` sub-object and `worker` config with your specific values:**
-1. `nodeRpc`: The [RPC URL](/devs/get-started/setup-wallet#rpc-url-and-chain-id) for the corresponding network the node will be deployed on
-2. `addressKeyName`: The name you gave your wallet key when [setting up your wallet](/devs/get-started/setup-wallet)
-3. `addressRestoreMnemonic`: The mnemonic that was outputted when setting up a new key
+#### Wallet Configuration
+
+##### `wallet` Sub-object
+
+1. **`nodeRpc`**: The [RPC URL](/devs/get-started/quick-start#network-configuration) for the corresponding network the node will be deployed on
+2. **`addressKeyName`**: The name you gave your wallet key when [setting up your wallet](/devs/get-started/quick-start#create-your-wallet)
+3. **`addressRestoreMnemonic`**: The mnemonic that was outputted when setting up a new key
{/*
`addressKeyName` and `addressRestoreMnemonic` are optional parameters. If you did not previously generate keys, keys will be generated for you when [running the node](/devs/workers/deploy-worker/using-docker#generate-keys-and-export-variables).
@@ -99,12 +206,28 @@ CG_API_KEY=
If you have existing keys that you wish to use, you will need to provide these variables.
*/}
-#### `worker` Config
+**Wallet Security Best Practices**:
+- **Secure storage**: Keep mnemonic phrases in secure, offline locations
+- **Access control**: Limit access to wallet configuration files
+- **Backup procedures**: Maintain multiple secure backups of wallet information
+- **Key rotation**: Consider periodic key updates for enhanced security
+
+#### Worker Configuration
+
+##### `worker` Config
-1. `topicId`: The specific topic ID you created the worker for.
-2. `InferenceEndpoint`: The endpoint exposed by your worker node to provide inferences to the network.
-3. `Token`: The token for the specific topic you are providing inferences for. The token needs to be exposed in the inference server endpoint for retrieval.
- - The `Token` variable is specific to the endpoint you expose in your `main.py` file. It is not related to any topic parameter.
+1. **`topicId`**: The specific topic ID you created the worker for.
+2. **`InferenceEndpoint`**: The endpoint exposed by your worker node to provide inferences to the network.
+3. **`Token`**: The token for the specific topic you are providing inferences for. The token needs to be exposed in the inference server endpoint for retrieval.
+ - The `Token` variable is specific to the endpoint you expose in your `main.py` file. It is not related to any topic parameter.
+
+**Worker Configuration Strategy**:
+- **Topic alignment**: Ensure worker configuration matches chosen prediction market
+- **Endpoint design**: Create clear, RESTful endpoints for inference delivery
+- **Token consistency**: Maintain consistent token naming across configurations
+- **Performance optimization**: Configure parameters for optimal inference delivery
+
+##### Multi-Topic Support
The `worker` config is an array of sub-objects, each representing a different topic ID. This structure allows you to manage multiple topic IDs, each within its own sub-object.
@@ -135,24 +258,46 @@ To deploy a worker that provides inferences for multiple topics, you can duplica
```
-## Building a Custom Model
+**Multi-Topic Benefits**:
+- **Revenue diversification**: Participate in multiple markets for increased rewards
+- **Risk distribution**: Spread operational risk across different prediction categories
+- **Resource efficiency**: Maximize infrastructure utilization across topics
+- **Market opportunity**: Capture value from various prediction markets simultaneously
+
+## Model Development
-`basic-coin-prediction-node` comes preconfigured with a model that uses regression to predict the price of Ethereum, and contribute an inference to topic 1 on Allora. Learn more about how this model is built from the ground up and how you can customize your model to give a unique inference to the network in the [next section](/devs/workers/walkthroughs/walkthrough-price-prediction-worker/modelpy).
+### Building a Custom Model
-## Deployment
+**`basic-coin-prediction-node` comes preconfigured with a model that uses regression to predict the price of Ethereum, and contribute an inference to topic 1 on Allora.** Learn more about how this model is built from the ground up and how you can customize your model to give a unique inference to the network in the [next section](/devs/workers/walkthroughs/walkthrough-price-prediction-worker/modelpy).
-Now that the node is configured, let's deploy and register it to the network. To run the node, follow these steps:
+**Model Customization Benefits**:
+- **Competitive advantage**: Unique models can outperform standard approaches
+- **Specialized knowledge**: Incorporate domain expertise into prediction algorithms
+- **Performance optimization**: Fine-tune models for specific market conditions
+- **Innovation opportunity**: Experiment with cutting-edge AI/ML techniques
-### Export Variables
+**Development Path**:
+- **Understanding foundations**: Learn how the base model works
+- **Incremental improvements**: Make targeted enhancements to existing model
+- **Custom implementations**: Develop entirely new modeling approaches
+- **Performance validation**: Test and validate model improvements thoroughly
-Execute the following command from the root directory:
+## Deployment Process
+
+### Deploy Your Worker Node
+
+**Now that the node is configured, let's deploy and register it to the network. To run the node, follow these steps:**
+
+#### Step 1: Export Variables
+
+**Execute the following command from the root directory:**
```sh
chmod +x init.config
./init.config
```
-This command will automatically export the necessary variables from the account created. These variables are used by the offchain node and are bundled with your provided `config.json`, then passed to the node as environment variables.
+**This command will automatically export the necessary variables from the account created. These variables are used by the offchain node and are bundled with your provided `config.json`, then passed to the node as environment variables.**
If you need to **make changes** to your `config.json` file after you ran the `init.config` command, rerun:
@@ -166,37 +311,105 @@ before proceeding.
-### Request from Faucet
+**Variable Export Benefits**:
+- **Environment isolation**: Secure separation of configuration data
+- **Docker integration**: Seamless container environment setup
+- **Security enhancement**: Prevents credential exposure in command line
+- **Configuration validation**: Ensures all required parameters are present
-Copy your Allora address and request some tokens from the [Allora Testnet Faucet](https://faucet.testnet.allora.network/) to register your worker in the next step successfully.
+#### Step 2: Request from Faucet
-### Deploy the Node
+**Copy your Allora address and request some tokens from the [Allora Testnet Faucet](https://faucet.testnet.allora.network/) to register your worker in the next step successfully.**
+
+**Funding Requirements**:
+- **Registration fees**: Sufficient tokens for worker registration transactions
+- **Operational buffer**: Extra funds for ongoing network participation
+- **Transaction costs**: Additional tokens for network operation fees
+- **Emergency reserves**: Backup funds for unexpected operational needs
+
+#### Step 3: Deploy the Node
```
docker compose up --build
```
-Both the offchain node and the source services will be started. They will communicate through endpoints attached to the internal DNS.
+**Both the offchain node and the source services will be started. They will communicate through endpoints attached to the internal DNS.**
+
+**Deployment Process**:
+- **Container orchestration**: All services start in coordinated fashion
+- **Internal networking**: Secure communication between containers
+- **Service discovery**: Automatic endpoint resolution and connection
+- **Health monitoring**: Built-in checks for service availability
-If your node is working correctly, you should see it actively checking for the active worker nonce:
+## Verification and Testing
+
+### Monitoring Deployment Success
+
+**If your node is working correctly, you should see it actively checking for the active worker nonce:**
```bash
offchain_node | {"level":"debug","topicId":1,"time":1723043600,"message":"Checking for latest open worker nonce on topic"}
```
-A **successful** response from your Worker should display:
+**A **successful** response from your Worker should display:**
```bash
{"level":"debug","msg":"Send Worker Data to chain","txHash":,"time":,"message":"Success"}
```
-Congratulations! You've successfully deployed and registered your node on Allora.
+**Congratulations! You've successfully deployed and registered your node on Allora.**
+
+**Success Indicators**:
+- **Network connectivity**: Active communication with Allora Network
+- **Transaction confirmation**: Valid transaction hashes in logs
+- **Topic participation**: Worker actively participating in assigned topics
+- **Inference delivery**: Successful delivery of predictions to network
+
+### Testing Your Deployment
-### Testing
+#### Local Testing
-You can test your local inference server by performing a `GET` request on `http://localhost:8000/inference/`.
+**You can test your local inference server by performing a `GET` request on `http://localhost:8000/inference/`.**
```bash
curl http://localhost:8000/inference/
```
+**Testing Benefits**:
+- **Endpoint validation**: Confirm inference server is responding correctly
+- **Response verification**: Check prediction format and data quality
+- **Performance testing**: Measure response times and system performance
+- **Debugging support**: Identify issues before network deployment
+
+## Optimization and Maintenance
+
+### Performance Tuning
+
+**Model Optimization**:
+- **Parameter tuning**: Adjust ML model parameters for better accuracy
+- **Feature engineering**: Enhance input features for improved predictions
+- **Ensemble methods**: Combine multiple models for robust predictions
+- **Backtesting**: Validate model performance on historical data
+
+### Operational Excellence
+
+**Monitoring and Maintenance**:
+- **Performance tracking**: Monitor prediction accuracy and network rewards
+- **System health**: Track resource usage and system performance
+- **Error monitoring**: Set up alerts for service failures and issues
+- **Regular updates**: Keep models and infrastructure current
+
+## Prerequisites
+
+- **Machine learning basics**: Understanding of regression models and time-series analysis
+- **Docker proficiency**: Experience with containerization and Docker Compose
+- **API integration**: Knowledge of REST API consumption and data processing
+- **Cryptocurrency markets**: Basic understanding of cryptocurrency trading and markets
+
+## Next Steps
+
+- [Study the model implementation](/devs/workers/walkthroughs/walkthrough-price-prediction-worker/modelpy) for deep customization
+- [Explore worker data querying](/devs/workers/query-worker-data) for performance monitoring
+- [Learn about worker requirements](/devs/workers/requirements) for infrastructure optimization
+- [Review existing topics](/devs/get-started/network-interaction#available-topics) for additional market opportunities
+
diff --git a/pages/devs/workers/walkthroughs/walkthrough-price-prediction-worker/modelpy.mdx b/pages/devs/workers/walkthroughs/walkthrough-price-prediction-worker/modelpy.mdx
index 9091e26..e98c028 100644
--- a/pages/devs/workers/walkthroughs/walkthrough-price-prediction-worker/modelpy.mdx
+++ b/pages/devs/workers/walkthroughs/walkthrough-price-prediction-worker/modelpy.mdx
@@ -2,44 +2,82 @@ import { Callout } from 'nextra/components'
# Model.py
-## Introduction
+## What You'll Learn
+- Complete breakdown of the model.py file structure and its key components
+- How to download and format historical cryptocurrency price data from Binance
+- Understanding the machine learning training pipeline for price prediction models
+- Customization strategies for extending the model to support multiple cryptocurrencies
-The [`model.py` file](https://github.com/allora-network/basic-coin-prediction-node/blob/main/model.py) in `basic-coin-prediction-node` consists of several key components:
+## Overview
+**The [`model.py` file](https://github.com/allora-network/basic-coin-prediction-node/blob/main/model.py) in `basic-coin-prediction-node` consists of several key components:**
+
+### Architecture Components
+
+**Core System Elements**:
- **Imports and Configuration:** Sets up necessary libraries and configuration variables.
- **Paths Configuration:** Generates paths for storing data dynamically based on coin symbols.
- **Downloading Data:** Downloads historical price data for the specified symbols, intervals, years, and months.
- **Formatting Data:** Reads, formats, and saves the downloaded data as CSV files.
- **Training the Model:** Trains a linear regression model on the formatted price data and saves the trained model.
-While the import and path configuration processes are straightforward, downloading and formatting the data, as well as training the model, require specific steps.
+**While the import and path configuration processes are straightforward, downloading and formatting the data, as well as training the model, require specific steps.**
-This documentation will guide you through creating models for different coins, making it easy to extend the script for general-purpose use.
+**This documentation will guide you through creating models for different coins, making it easy to extend the script for general-purpose use.**
-## Downloading the Data
+### Why This Architecture Matters
-The [`download_data`](https://github.com/allora-network/basic-coin-prediction-node/blob/5d70e9feee7d1e7725c7602427b6856e7ffbe479/model.py#L16) function is designed to automate the process of downloading historical market data from Binance, a popular cryptocurrency exchange.
-This function focuses on fetching data for a specified set of symbols (in this case, the trading pair `"ETHUSDT"`) across various time intervals and storing them in a defined directory.
+**Modular Design Benefits**:
+- **Separation of concerns**: Each component handles a specific aspect of the ML pipeline
+- **Reusability**: Components can be reused for different cryptocurrencies and timeframes
+- **Maintainability**: Clear structure makes code easy to understand and modify
+- **Scalability**: Framework supports extension to multiple trading pairs and data sources
-### How to Use for Downloading Data of Any Coin
+**Production Readiness**:
+- **Error handling**: Robust error management for production deployment
+- **Data validation**: Comprehensive data quality checks and preprocessing
+- **Model persistence**: Proper saving and loading of trained models
+- **Configuration flexibility**: Easy adaptation to different requirements
-#### Update the Symbols List
+## Data Acquisition Pipeline
-Replace `["ETHUSDT"]` with the desired trading pair(s), e.g., `["BTCUSDT", "LTCUSDT"]`.
+### Downloading the Data
-#### Adjust Time Intervals
+**The [`download_data`](https://github.com/allora-network/basic-coin-prediction-node/blob/5d70e9feee7d1e7725c7602427b6856e7ffbe479/model.py#L16) function is designed to automate the process of downloading historical market data from Binance, a popular cryptocurrency exchange.** This function focuses on fetching data for a specified set of symbols (in this case, the trading pair `"ETHUSDT"`) across various time intervals and storing them in a defined directory.
-Modify the intervals list if you need different time intervals. Binance supports various intervals like `["1m", "5m", "1h", "1d", "1w", "1M"]`.
+#### Data Download Strategy
-#### Extend Date Ranges
+**Function Benefits**:
+- **Comprehensive coverage**: Downloads both monthly and daily data for complete historical coverage
+- **Flexible intervals**: Supports multiple timeframes from minutes to months
+- **Reliable source**: Uses Binance, one of the world's largest cryptocurrency exchanges
+- **Automated process**: Reduces manual data collection effort and ensures consistency
-Update the years and months lists to match the historical range you need.
+**Data Quality Advantages**:
+- **High liquidity**: Binance data represents high-volume, liquid market activity
+- **Real-time updates**: Daily data downloads keep training data current
+- **Historical depth**: Monthly data provides extensive historical context
+- **Market coverage**: Comprehensive trading pair support for various cryptocurrencies
+
+#### How to Use for Downloading Data of Any Coin
+
+##### Configuration Customization
+
+**Update the Symbols List**:
+Replace `["ETHUSDT"]` with the desired trading pair(s), e.g., `["BTCUSDT", "LTCUSDT"]`.
-#### Define the Download Path
+**Adjust Time Intervals**:
+Modify the intervals list if you need different time intervals. Binance supports various intervals like `["1m", "5m", "1h", "1d", "1w", "1M"]`.
+
+**Extend Date Ranges**:
+Update the years and months lists to match the historical range you need.
+**Define the Download Path**:
Ensure `binance_data_path` is set to the directory where you want the data to be saved.
-Here’s a quick **example** of how to adjust the script for downloading data for multiple trading pairs:
+##### Implementation Example
+
+**Here's a quick **example** of how to adjust the script for downloading data for multiple trading pairs:**
```python
def download_data():
@@ -62,122 +100,171 @@ def download_data():
print(f"Downloaded daily data to {download_path}.")
```
+#### Multi-Asset Strategy
+
+**Implementation Benefits**:
+- **Diversification**: Support multiple cryptocurrencies for broader market coverage
+- **Risk management**: Reduce dependence on single asset performance
+- **Market opportunities**: Capitalize on different market conditions across assets
+- **Scalable framework**: Easy addition of new trading pairs and symbols
+
+**Configuration Best Practices**:
+- **Symbol selection**: Choose liquid, actively traded pairs for reliable data
+- **Interval matching**: Align data granularity with prediction timeframes
+- **Historical coverage**: Ensure sufficient historical data for meaningful training
+- **Storage optimization**: Organize data structure for efficient access and processing
+
+## Data Processing Pipeline
+
### Formatting the Data
-The [`format_data`](https://github.com/allora-network/basic-coin-prediction-node/blob/5d70e9feee7d1e7725c7602427b6856e7ffbe479/model.py#L36) function processes raw data files downloaded from Binance, transforming them into a consistent format for analysis. Here are the key steps:
+**The [`format_data`](https://github.com/allora-network/basic-coin-prediction-node/blob/5d70e9feee7d1e7725c7602427b6856e7ffbe479/model.py#L36) function processes raw data files downloaded from Binance, transforming them into a consistent format for analysis.** Here are the key steps:
-1. **File Handling:**
- - Lists and sorts all files in the `binance_data_path` directory.
- - Exits if no files are found.
+#### Data Processing Workflow
-2. **Initialize DataFrame:**
- - An empty DataFrame `price_df` is created to store the combined data.
+**1. File Handling:**
+- **Lists and sorts all files in the `binance_data_path` directory.**
+- **Exits if no files are found.**
-3. **Process Each File:**
- - Filters for `.zip` files and reads the contained CSV file.
- - Retains the first 11 columns and renames them to: `["start_time", "open", "high", "low", "close", "volume", "end_time", "volume_usd", "n_trades", "taker_volume", "taker_volume_usd"]`.
- - Sets the DataFrame index to the `end_time` column, converted to a timestamp.
+**2. Initialize DataFrame:**
+- **An empty DataFrame `price_df` is created to store the combined data.**
-4. **Concatenate Data:**
- - Combines data from each file into the `price_df` DataFrame.
+**3. Process Each File:**
+- **Filters for `.zip` files and reads the contained CSV file.**
+- **Retains the first 11 columns and renames them to: `["start_time", "open", "high", "low", "close", "volume", "end_time", "volume_usd", "n_trades", "taker_volume", "taker_volume_usd"]`.**
+- **Sets the DataFrame index to the `end_time` column, converted to a timestamp.**
-5. **Sort and Save:**
- - Sorts the final DataFrame by date and saves it to `training_price_data_path`.
+#### Data Standardization Benefits
-### Column Descriptions
+**Format Consistency**:
+- **Column standardization**: Uniform column names across all data files
+- **Time indexing**: Proper timestamp indexing for time-series analysis
+- **Data type conversion**: Appropriate data types for numerical analysis
+- **Missing data handling**: Robust processing of incomplete or malformed data
-- **start_time**: The start of the trading period.
-- **open**: Opening price.
-- **high**: Highest price during the period.
-- **low**: Lowest price during the period.
-- **close**: Closing price.
-- **volume**: Trading volume.
-- **end_time**: End of the trading period.
-- **volume_usd**: Trading volume in USD.
-- **n_trades**: Number of trades.
-- **taker_volume**: Taker buy volume.
-- **taker_volume_usd**: Taker buy volume in USD.
+**Analysis Preparation**:
+- **Feature engineering**: Structured data ready for feature extraction
+- **Time-series alignment**: Proper temporal ordering for predictive modeling
+- **Volume analysis**: Comprehensive trading volume metrics for enhanced modeling
+- **Price action**: Full OHLC (Open, High, Low, Close) data for technical analysis
-This function consolidates and formats the historical price data, making it ready for analysis or machine learning tasks.
+## Machine Learning Implementation
-### Training the Model
+### Model Training Architecture
-The [`train_model`](https://github.com/allora-network/basic-coin-prediction-node/blob/5d70e9feee7d1e7725c7602427b6856e7ffbe479/model.py#L75) function trains a **linear regression model** using historical price data and saves the trained model to a file. Here's a breakdown of the process:
+**Training Pipeline Components**:
+- **Data preprocessing**: Clean and prepare formatted data for machine learning
+- **Feature engineering**: Extract relevant features from price and volume data
+- **Model selection**: Choose appropriate algorithm based on data characteristics
+- **Training execution**: Train model on historical data with proper validation
+- **Model persistence**: Save trained model for inference deployment
-1. **Load the Data:**
- - Reads the price data from a CSV file specified by `training_price_data_path`.
+### Algorithm Selection Strategy
-2. **Prepare the DataFrame:**
- - Converts the `date` column to a timestamp and stores it as a numerical value.
- - Computes the average price using the `open`, `close`, `high`, and `low` columns.
+**Model Choice Considerations**:
+- **Linear regression**: Good baseline for trend-following strategies
+- **Complexity vs. performance**: Balance model sophistication with interpretability
+- **Overfitting prevention**: Avoid models that memorize rather than generalize
+- **Computational efficiency**: Consider inference speed for real-time predictions
-3. **Reshape Data for Regression:**
- - Extracts the `date` column as the feature (`x`) and the computed average price as the target (`y`).
- - Reshapes these arrays to the format expected by `scikit-learn`.
+### Training Best Practices
-4. **Split the Data:**
- - Splits the data into a training set and a test set using an 80/20 split. However, the test set is not used further in this function.
+**Validation Strategies**:
+- **Time-series splits**: Use temporal validation to prevent look-ahead bias
+- **Walk-forward analysis**: Progressive training and testing on chronological data
+- **Performance metrics**: Track relevant metrics like MAE, RMSE, and directional accuracy
+- **Model robustness**: Test performance across different market conditions
-5. **Train the Model:**
- - Initializes and trains a `LinearRegression` model using the training data.
+## Customization and Extension
-6. **Save the Model:**
- - Creates the directory for the model file if it doesn't exist.
- - Saves the trained model to a file specified by `model_file_path` using `pickle`.
+### Multi-Cryptocurrency Support
-7. **Print Confirmation:**
- - Prints a message indicating that the trained model has been saved.
+**Extension Strategies**:
+- **Symbol parameterization**: Make cryptocurrency symbols configurable parameters
+- **Unified data pipeline**: Process multiple assets through same pipeline
+- **Model sharing**: Train shared models across similar asset classes
+- **Individual optimization**: Fine-tune models for specific cryptocurrency characteristics
-### Modifying the Function for Different Models
+### Advanced Features
-
-#### Understanding the Target Variable in Regression Models
+**Enhancement Opportunities**:
+- **External indicators**: Incorporate technical indicators and market sentiment
+- **Multiple timeframes**: Combine predictions across different time horizons
+- **Ensemble methods**: Combine multiple models for improved accuracy
+- **Real-time updates**: Implement streaming data updates for live predictions
-The **target variable (y)** in regression models is a critical component that determines the type of analysis and predictions that can be performed. In this context, the target variable represents continuous data—in this case, the average of financial metrics such as the open, close, high, and low prices over time. The y-axis points on the regression graph correspond to these continuous values, which can take on any numerical value within a defined range.
-
+### Configuration Management
-To change the model used for training, replace the `LinearRegression` model with another machine learning algorithm. Here is an example:
+**Flexible Configuration**:
+- **Environment variables**: Use environment settings for different deployment scenarios
+- **Configuration files**: Maintain separate configs for different assets and strategies
+- **Parameter tuning**: Systematic approach to hyperparameter optimization
+- **Version control**: Track model versions and configuration changes
-#### Using Polynomial Regression
+## Performance Optimization
-```python
-from sklearn.preprocessing import PolynomialFeatures
-from sklearn.linear_model import LinearRegression
-from sklearn.pipeline import make_pipeline
-import os
-import pickle
-import pandas as pd
-from sklearn.model_selection import train_test_split
-
-def train_model():
- # Load the eth price data
- price_data = pd.read_csv(training_price_data_path)
- df = pd.DataFrame()
-
- # Convert 'date' to a numerical value (timestamp) we can use for regression
- df["date"] = pd.to_datetime(price_data["date"])
- df["date"] = df["date"].map(pd.Timestamp.timestamp)
-
- # Calculate the mean price as the target variable
- df["price"] = price_data[["open", "close", "high", "low"]].mean(axis=1)
-
- # Reshape the data to the shape expected by sklearn
- x = df["date"].values.reshape(-1, 1)
- y = df["price"].values.reshape(-1, 1)
-
- # Split the data into training set and test set
- x_train, _, y_train, _ = train_test_split(x, y, test_size=0.2, random_state=0)
-
- # Create a pipeline that first transforms the input data to polynomial features and then fits a linear model
- model = make_pipeline(PolynomialFeatures(degree=2), LinearRegression())
- model.fit(x_train, y_train)
-
- # Create the model's parent directory if it doesn't exist
- os.makedirs(os.path.dirname(model_file_path), exist_ok=True)
-
- # Save the trained model to a file
- with open(model_file_path, "wb") as f:
- pickle.dump(model, f)
-
- print(f"Trained polynomial regression model saved to {model_file_path}")
-```
\ No newline at end of file
+### Computational Efficiency
+
+**Optimization Techniques**:
+- **Vectorized operations**: Use pandas and numpy for efficient data processing
+- **Memory management**: Optimize data structures for large datasets
+- **Parallel processing**: Leverage multiprocessing for data downloading and processing
+- **Caching strategies**: Cache processed data to avoid redundant computations
+
+### Data Management
+
+**Storage Optimization**:
+- **Compressed formats**: Use efficient storage formats for large datasets
+- **Incremental updates**: Download only new data to minimize bandwidth usage
+- **Data archival**: Implement strategies for managing historical data growth
+- **Quality monitoring**: Continuous monitoring of data quality and completeness
+
+## Troubleshooting and Debugging
+
+### Common Issues
+
+**Data Download Problems**:
+- **Network connectivity**: Handle API rate limits and connection failures
+- **Missing data**: Graceful handling of gaps in historical data
+- **Format changes**: Adapt to potential changes in Binance data format
+- **Storage issues**: Manage disk space and file permissions
+
+### Model Training Issues
+
+**Training Challenges**:
+- **Insufficient data**: Ensure adequate historical data for meaningful training
+- **Data quality**: Validate data integrity before model training
+- **Convergence problems**: Handle cases where models fail to converge
+- **Performance degradation**: Monitor and address model performance over time
+
+## Production Deployment
+
+### Model Serving
+
+**Deployment Considerations**:
+- **Model loading**: Efficient loading of trained models for inference
+- **Prediction pipeline**: Streamlined process from data input to prediction output
+- **Error handling**: Robust error management for production environments
+- **Monitoring**: Comprehensive monitoring of model performance and system health
+
+### Continuous Improvement
+
+**Model Maintenance**:
+- **Retraining schedules**: Regular model updates with new data
+- **Performance tracking**: Monitor prediction accuracy over time
+- **A/B testing**: Compare different model versions and configurations
+- **Feedback integration**: Incorporate performance feedback into model improvements
+
+## Prerequisites
+
+- **Python programming**: Proficiency in Python and data manipulation with pandas
+- **Machine learning basics**: Understanding of regression models and time-series analysis
+- **Cryptocurrency markets**: Basic knowledge of cryptocurrency trading and market dynamics
+- **Data processing**: Experience with data cleaning, formatting, and feature engineering
+
+## Next Steps
+
+- [Return to the main walkthrough](/devs/workers/walkthroughs/walkthrough-price-prediction-worker) for complete deployment
+- [Explore worker data querying](/devs/workers/query-worker-data) for performance monitoring
+- [Learn about worker requirements](/devs/workers/requirements) for infrastructure optimization
+- [Study Hugging Face integration](/devs/workers/walkthroughs/walkthrough-hugging-face-worker) for advanced AI models
\ No newline at end of file
diff --git a/pages/home/_meta.json b/pages/home/_meta.json
index 9c9c22c..2e34d1b 100644
--- a/pages/home/_meta.json
+++ b/pages/home/_meta.json
@@ -1,15 +1,25 @@
{
+ "---documentation": {
+ "type": "separator",
+ "title": "DOCUMENTATION"
+ },
"explore": "Explore Allora",
- "overview": "Overview",
- "key-terms": "Key Terminology",
- "participants": "Participants",
- "layers": "Layers of the Network",
- "tokenomics": "Tokenomics",
- "delegating-stake": "Delegating Stake",
- "confidence-intervals": "Confidence Intervals",
- "release-notes": "Release Notes",
+ "concepts": "Concepts",
+ "sample-projects": "Browse Sample Projects",
"whitepaper": {
"title": "Whitepaper",
"href": "https://research.assets.allora.network/allora.0x10001.pdf"
- }
+ },
+ "---developers": {
+ "type": "separator",
+ "title": "DEVELOPERS"
+ },
+ "dev-get-started": "Get Started",
+ "dev-topic-creators": "Topic Creators",
+ "dev-consumers": "Consumers",
+ "dev-sdks": "SDKs",
+ "dev-workers": "Workers",
+ "dev-reputers": "Reputers",
+ "dev-validators": "Validators",
+ "dev-reference": "Reference"
}
\ No newline at end of file
diff --git a/pages/home/concepts/_meta.json b/pages/home/concepts/_meta.json
new file mode 100644
index 0000000..d11c453
--- /dev/null
+++ b/pages/home/concepts/_meta.json
@@ -0,0 +1,9 @@
+{
+ "overview": "What is Allora?",
+ "key-terms": "Key Terminology",
+ "participants": "Participants",
+ "layers": "Layers of the Network",
+ "tokenomics": "Tokenomics",
+ "delegating-stake": "Delegating Stake",
+ "confidence-intervals": "Confidence Intervals"
+}
\ No newline at end of file
diff --git a/pages/home/confidence-intervals.mdx b/pages/home/concepts/confidence-intervals.mdx
similarity index 100%
rename from pages/home/confidence-intervals.mdx
rename to pages/home/concepts/confidence-intervals.mdx
diff --git a/pages/home/delegating-stake.mdx b/pages/home/concepts/delegating-stake.mdx
similarity index 100%
rename from pages/home/delegating-stake.mdx
rename to pages/home/concepts/delegating-stake.mdx
diff --git a/pages/home/key-terms.mdx b/pages/home/concepts/key-terms.mdx
similarity index 100%
rename from pages/home/key-terms.mdx
rename to pages/home/concepts/key-terms.mdx
diff --git a/pages/home/layers.mdx b/pages/home/concepts/layers.mdx
similarity index 100%
rename from pages/home/layers.mdx
rename to pages/home/concepts/layers.mdx
diff --git a/pages/home/layers/_meta.json b/pages/home/concepts/layers/_meta.json
similarity index 100%
rename from pages/home/layers/_meta.json
rename to pages/home/concepts/layers/_meta.json
diff --git a/pages/home/layers/consensus.mdx b/pages/home/concepts/layers/consensus.mdx
similarity index 100%
rename from pages/home/layers/consensus.mdx
rename to pages/home/concepts/layers/consensus.mdx
diff --git a/pages/home/layers/consensus/_meta.json b/pages/home/concepts/layers/consensus/_meta.json
similarity index 100%
rename from pages/home/layers/consensus/_meta.json
rename to pages/home/concepts/layers/consensus/_meta.json
diff --git a/pages/home/layers/consensus/reputers.mdx b/pages/home/concepts/layers/consensus/reputers.mdx
similarity index 100%
rename from pages/home/layers/consensus/reputers.mdx
rename to pages/home/concepts/layers/consensus/reputers.mdx
diff --git a/pages/home/layers/consensus/topic-rewards.mdx b/pages/home/concepts/layers/consensus/topic-rewards.mdx
similarity index 100%
rename from pages/home/layers/consensus/topic-rewards.mdx
rename to pages/home/concepts/layers/consensus/topic-rewards.mdx
diff --git a/pages/home/layers/consensus/total-rewards.mdx b/pages/home/concepts/layers/consensus/total-rewards.mdx
similarity index 100%
rename from pages/home/layers/consensus/total-rewards.mdx
rename to pages/home/concepts/layers/consensus/total-rewards.mdx
diff --git a/pages/home/layers/consensus/workers.mdx b/pages/home/concepts/layers/consensus/workers.mdx
similarity index 100%
rename from pages/home/layers/consensus/workers.mdx
rename to pages/home/concepts/layers/consensus/workers.mdx
diff --git a/pages/home/layers/forecast-synthesis.mdx b/pages/home/concepts/layers/forecast-synthesis.mdx
similarity index 100%
rename from pages/home/layers/forecast-synthesis.mdx
rename to pages/home/concepts/layers/forecast-synthesis.mdx
diff --git a/pages/home/layers/forecast-synthesis/_meta.json b/pages/home/concepts/layers/forecast-synthesis/_meta.json
similarity index 100%
rename from pages/home/layers/forecast-synthesis/_meta.json
rename to pages/home/concepts/layers/forecast-synthesis/_meta.json
diff --git a/pages/home/layers/forecast-synthesis/forecast.mdx b/pages/home/concepts/layers/forecast-synthesis/forecast.mdx
similarity index 100%
rename from pages/home/layers/forecast-synthesis/forecast.mdx
rename to pages/home/concepts/layers/forecast-synthesis/forecast.mdx
diff --git a/pages/home/layers/forecast-synthesis/synthesis.mdx b/pages/home/concepts/layers/forecast-synthesis/synthesis.mdx
similarity index 100%
rename from pages/home/layers/forecast-synthesis/synthesis.mdx
rename to pages/home/concepts/layers/forecast-synthesis/synthesis.mdx
diff --git a/pages/home/layers/inference-consumption.mdx b/pages/home/concepts/layers/inference-consumption.mdx
similarity index 90%
rename from pages/home/layers/inference-consumption.mdx
rename to pages/home/concepts/layers/inference-consumption.mdx
index e5a85d3..7b986b8 100644
--- a/pages/home/layers/inference-consumption.mdx
+++ b/pages/home/concepts/layers/inference-consumption.mdx
@@ -5,7 +5,7 @@ At it's core, Allora facilitates the exchange of inferences, enabling _Consumers

-Learn how to query data and inferences [offchain](/devs/consumers/allora-api-endpoint) and [onchain](/devs/get-started/query-network-data) for a given [topic](/home/key-terms#topics).
+Learn how to query data and inferences [offchain](/devs/consumers/allora-api-endpoint) and [onchain](/devs/get-started/network-interaction#querying-network-data) for a given [topic](/home/key-terms#topics).
## Topic Coordination
diff --git a/pages/home/overview.mdx b/pages/home/concepts/overview.mdx
similarity index 99%
rename from pages/home/overview.mdx
rename to pages/home/concepts/overview.mdx
index 88157e0..eda34bd 100644
--- a/pages/home/overview.mdx
+++ b/pages/home/concepts/overview.mdx
@@ -1,6 +1,6 @@
import { Callout } from 'nextra/components'
-# Overview
+# What is Allora?
> Overcoming information inefficiency with Allora
diff --git a/pages/home/participants.mdx b/pages/home/concepts/participants.mdx
similarity index 100%
rename from pages/home/participants.mdx
rename to pages/home/concepts/participants.mdx
diff --git a/pages/home/tokenomics.mdx b/pages/home/concepts/tokenomics.mdx
similarity index 100%
rename from pages/home/tokenomics.mdx
rename to pages/home/concepts/tokenomics.mdx
diff --git a/pages/home/dev-consumers/_meta.json b/pages/home/dev-consumers/_meta.json
new file mode 100644
index 0000000..484cb57
--- /dev/null
+++ b/pages/home/dev-consumers/_meta.json
@@ -0,0 +1,22 @@
+{
+ "allora-api-endpoint": {
+ "title": "Allora API Endpoint",
+ "href": "/devs/consumers/allora-api-endpoint"
+ },
+ "rpc-data-access": {
+ "title": "RPC Data Access",
+ "href": "/devs/consumers/rpc-data-access"
+ },
+ "consumer-contracts": {
+ "title": "Consumer Contracts",
+ "href": "/devs/consumers/consumer-contracts"
+ },
+ "existing-consumers": {
+ "title": "Existing Consumers",
+ "href": "/devs/consumers/existing-consumers"
+ },
+ "walkthrough-use-topic-inference": {
+ "title": "Walkthrough: Using a Topic Inference on-chain",
+ "href": "/devs/consumers/walkthrough-use-topic-inference"
+ }
+}
\ No newline at end of file
diff --git a/pages/home/dev-get-started/_meta.json b/pages/home/dev-get-started/_meta.json
new file mode 100644
index 0000000..c25b7c2
--- /dev/null
+++ b/pages/home/dev-get-started/_meta.json
@@ -0,0 +1,14 @@
+{
+ "overview": {
+ "title": "Quick Start",
+ "href": "/devs/get-started/quick-start"
+ },
+ "setup-wallet": {
+ "title": "Network Interaction",
+ "href": "/devs/get-started/network-interaction"
+ },
+ "cli": {
+ "title": "Model Forge Competition",
+ "href": "/devs/get-started/model-forge"
+ }
+}
\ No newline at end of file
diff --git a/pages/home/dev-reference/_meta.json b/pages/home/dev-reference/_meta.json
new file mode 100644
index 0000000..9452733
--- /dev/null
+++ b/pages/home/dev-reference/_meta.json
@@ -0,0 +1,14 @@
+{
+ "allorad": {
+ "title": "allorad",
+ "href": "/devs/reference/allorad"
+ },
+ "module-accounts": {
+ "title": "Module Accounts",
+ "href": "/devs/reference/module-accounts"
+ },
+ "params": {
+ "title": "Parameters",
+ "href": "/devs/reference/params"
+ }
+}
\ No newline at end of file
diff --git a/pages/home/dev-reputers/_meta.json b/pages/home/dev-reputers/_meta.json
new file mode 100644
index 0000000..1e84cf5
--- /dev/null
+++ b/pages/home/dev-reputers/_meta.json
@@ -0,0 +1,22 @@
+{
+ "reputers": {
+ "title": "Deploy a Simple Reputer using Docker",
+ "href": "/devs/reputers/reputers"
+ },
+ "coin-prediction-reputer": {
+ "title": "Deploy a Pre-configured Reputer",
+ "href": "/devs/reputers/coin-prediction-reputer"
+ },
+ "set-and-adjust-stake": {
+ "title": "Set and Adjust Stake",
+ "href": "/devs/reputers/set-and-adjust-stake"
+ },
+ "query-reputer-data": {
+ "title": "How to Query Reputer Data using allorad",
+ "href": "/devs/reputers/query-reputer-data"
+ },
+ "query-ema-score": {
+ "title": "Query EMA Score for a Reputer using allorad",
+ "href": "/devs/reputers/query-ema-score"
+ }
+}
\ No newline at end of file
diff --git a/pages/home/dev-sdks/_meta.json b/pages/home/dev-sdks/_meta.json
new file mode 100644
index 0000000..a93d9f8
--- /dev/null
+++ b/pages/home/dev-sdks/_meta.json
@@ -0,0 +1,14 @@
+{
+ "overview": {
+ "title": "Overview",
+ "href": "/devs/sdk/overview"
+ },
+ "allora-sdk-ts": {
+ "title": "TypeScript SDK",
+ "href": "/devs/sdk/allora-sdk-ts"
+ },
+ "allora-sdk-py": {
+ "title": "Python SDK",
+ "href": "/devs/sdk/allora-sdk-py"
+ }
+}
\ No newline at end of file
diff --git a/pages/home/dev-topic-creators/_meta.json b/pages/home/dev-topic-creators/_meta.json
new file mode 100644
index 0000000..3a41d07
--- /dev/null
+++ b/pages/home/dev-topic-creators/_meta.json
@@ -0,0 +1,14 @@
+{
+ "topic-life-cycle": {
+ "title": "Topic Life Cycle",
+ "href": "/devs/topic-creators/topic-life-cycle"
+ },
+ "how-to-create-topic": {
+ "title": "How to Create/Fund a Topic using allorad",
+ "href": "/devs/topic-creators/how-to-create-topic"
+ },
+ "query-topic-data": {
+ "title": "How to Query Topic Data using allorad",
+ "href": "/devs/topic-creators/query-topic-data"
+ }
+}
\ No newline at end of file
diff --git a/pages/home/dev-validators/_meta.json b/pages/home/dev-validators/_meta.json
new file mode 100644
index 0000000..776e612
--- /dev/null
+++ b/pages/home/dev-validators/_meta.json
@@ -0,0 +1,26 @@
+{
+ "nop-requirements": {
+ "title": "System Requirements",
+ "href": "/devs/validators/nop-requirements"
+ },
+ "deploy-chain": {
+ "title": "Deploy Allora Chain",
+ "href": "/devs/validators/deploy-chain"
+ },
+ "run-full-node": {
+ "title": "Run a Full Node",
+ "href": "/devs/validators/run-full-node"
+ },
+ "stake-a-validator": {
+ "title": "Stake a Validator",
+ "href": "/devs/validators/stake-a-validator"
+ },
+ "validator-operations": {
+ "title": "Validator Operations",
+ "href": "/devs/validators/validator-operations"
+ },
+ "software-upgrades": {
+ "title": "Software Upgrades",
+ "href": "/devs/validators/software-upgrades"
+ }
+}
\ No newline at end of file
diff --git a/pages/home/dev-workers/_meta.json b/pages/home/dev-workers/_meta.json
new file mode 100644
index 0000000..3105a79
--- /dev/null
+++ b/pages/home/dev-workers/_meta.json
@@ -0,0 +1,26 @@
+{
+ "requirements": {
+ "title": "System Requirements",
+ "href": "/devs/workers/requirements"
+ },
+ "deploy-worker": {
+ "title": "Build/Deploy an Inference Worker",
+ "href": "/devs/workers/deploy-worker"
+ },
+ "walkthroughs": {
+ "title": "Walkthroughs",
+ "href": "/devs/workers/walkthroughs"
+ },
+ "deploy-forecaster": {
+ "title": "Build and Deploy a Forecaster",
+ "href": "/devs/workers/deploy-forecaster"
+ },
+ "query-worker-data": {
+ "title": "How To Query Worker Data using allorad",
+ "href": "/devs/workers/query-worker-data"
+ },
+ "query-ema-score": {
+ "title": "Query EMA Score of a Worker using allorad",
+ "href": "/devs/workers/query-ema-score"
+ }
+}
\ No newline at end of file
diff --git a/pages/home/explore.mdx b/pages/home/explore.mdx
index 667ee98..0dacc07 100644
--- a/pages/home/explore.mdx
+++ b/pages/home/explore.mdx
@@ -1,9 +1,13 @@
import { Cards, Card, Callout } from 'nextra/components'
-# Explore Allora
+# What is Allora?
-Allora is an open-source, decentralized marketplace for intelligence.
+Allora is an open-source marketplace that connects AI models in a decentralized network that learns from each other to deliver superior predictions
+
+- Collective Intelligence: The network gets smarter as different models compete and improve performance
+- Merit-Based Rewards: Top performers earn rewards and network access
+- Real-World Impact: Build for use-cases in finance, weather and other critical applications
Examples of intelligence include, but are not limited to:
@@ -12,44 +16,22 @@ Examples of intelligence include, but are not limited to:
- Sentiment Analysis
- Generative and Reinforcement Problems
-## Learn More
-
-
-
-
-
-
-
-
-
-
-
-
-
-{/* Allora brings together
-- Consumers who pay for and acquire inferences or expertise to be revealed
-- Workers who reveal inferences
-- Reputers who determine how accurate workers are after a ground truth is revealed
-- Validators who secure protocol state, history, and reward distributions
-With these ingredients, Allora is able to improve itself over time and produce inferences that are more accurate than the most accurate participant. */}
## How to Interact with the Network
There are a few easy things a user could do to interact with the Allora Network quickly and efficiently, including installing CLI tools, creating a topic, and querying inferences off and on chain.
-
-
+
+
-
-
-
+
## Participants
-[Participants](/home/participants) can permissionlessly integrate with Allora to consume, supply, or verify the accuracy of exchanged inferences.
+Participants can permissionlessly integrate with Allora to consume, supply, or verify the accuracy of exchanged inferences.
Allora [consumer contracts](/devs/consumers/consumer-contracts/dev-consumers) are currently live on Sepolia and Arbitrum One, and will be deployed to additional chains.
@@ -57,11 +39,96 @@ Allora [consumer contracts](/devs/consumers/consumer-contracts/dev-consumers) ar
Here we'll help you find exactly what you're looking for.
- Discover the best way to participate, for:
- - [Data Scientists (Workers)](/devs/workers): Experts in machine learning or domain-specific insights who want to contribute their knowledge to the network.
- - [Developers (Consumers)](/devs/consumers): Individuals or organizations seeking crowdsourced predictions to integrate into their applications.
- - [Validators](/devs/validators): Those with the skills and resources to run hardware and ensure the security and integrity of the network.
- - [Data Providers (Reputers)](/devs/reputers): Contributors who supply reliable data to evaluate and ensure the accuracy of predictions.
+ - [Data Scientists (Workers)](/devs/workers/requirements): Experts in machine learning or domain-specific insights who want to contribute their knowledge to the network.
+ - [Developers (Consumers)](/devs/consumers/allora-api-endpoint): Individuals or organizations seeking crowdsourced predictions to integrate into their applications.
+ - [Validators](/devs/validators/nop-requirements): Those with the skills and resources to run hardware and ensure the security and integrity of the network.
+ - [Data Providers (Reputers)](/devs/reputers/reputers): Contributors who supply reliable data to evaluate and ensure the accuracy of predictions.
-## Questions?
+## More Resources
-Join the Allora Network [Community](/community/contribute) to ask for support, help improve Allora, or showcase what you built with Allora.
+
+
+
+ Browse Sample Projects
+ >
+ }
+ href="/home/sample-projects"
+ >
+ Use one of our sample projects to get started using Allora
+
+
+
+ Allora Explorer Studio
+ >
+ }
+ href="http://localhost:4000/allora-explorer-studio"
+ >
+ Understand your model metrics after you have deployed your model to the network
+
+
+
+
+
+
+ YouTube Resources
+ >
+ }
+ href="https://www.youtube.com/@AlloraNetwork"
+ >
+ Watch tutorials, new features, and fireside chats
+
+
+
+ Developer Resources
+ >
+ }
+ href="mailto:devs@allora.network"
+ >
+ Sign up for the newsletter to get highlights and updates
+
+
+
+
+
+
+ X Updates
+ >
+ }
+ href="https://twitter.com/alloranetwork"
+ >
+ Follow us on X (twitter) to get updates and join our community
+
+
+
+ Discord Community
+ >
+ }
+ href="https://discord.gg/alloranetwork"
+ >
+ Chat live with other model makers and members on the official Allora Discord
+
+
diff --git a/pages/home/sample-projects.mdx b/pages/home/sample-projects.mdx
new file mode 100644
index 0000000..0d5fa6a
--- /dev/null
+++ b/pages/home/sample-projects.mdx
@@ -0,0 +1,78 @@
+import { Cards, Card, Callout } from 'nextra/components'
+
+# Browse Sample Projects
+
+Explore these sample projects to get started with building on the Allora network. These examples demonstrate different approaches to creating worker nodes and implementing prediction models.
+
+## Official Sample Projects
+
+
+
+ An official Allora network worker node example that provides price predictions using a basic linear regression model. This repository demonstrates the complete setup including worker, inference, and updater components.
+
+ **Features:**
+ - Complete Docker-compose setup
+ - Multiple ML models (LinearRegression, SVR, KernelRidge, BayesianRidge)
+ - Support for ETH, SOL, BTC, BNB, ARB tokens
+ - Automated data fetching and model updates
+ - Integration with Binance and CoinGecko APIs
+
+
+ The official Allora Model Development Kit provides a comprehensive framework for building, training, and deploying AI models on the Allora network. Perfect for developers who want to create custom prediction models.
+
+ **Features:**
+ - Multiple regression strategies (ARIMA, LSTM, etc.)
+ - Tiingo data integration for real-time market data
+ - FastAPI endpoints for inference serving
+ - Automated model training and packaging
+ - Docker deployment support
+ - Makefile for easy development workflow
+
+
+
+## Community Spotlight
+
+
+
+ A community-contributed base template for building Allora worker nodes. This project provides a clean starting point for developers who want to implement their own prediction models.
+
+ **Features:**
+ - Simplified base structure
+ - Easy customization for different models
+ - Docker-based deployment
+ - Clear documentation for getting started
+ - Community-maintained and supported
+
+
+
+## Getting Started
+
+
+Before deploying any worker node, make sure you have:
+- Set up your wallet with the required tokens
+- Obtained faucet funds for your worker
+- Configured your environment variables properly
+
+
+### Quick Start Steps
+
+1. **Choose a Template**: Select either the official basic coin prediction node or the community base template
+2. **Clone the Repository**: Download the code to your local environment
+3. **Configure Environment**: Set up your `.env` file with API keys and configuration
+4. **Initialize Worker**: Run the setup script to create your Allora keys
+5. **Deploy**: Use Docker Compose to start your worker node
+
+### Need Help?
+
+- Check out our [Worker Documentation](/devs/workers) for detailed guides
+- Join our [community discussions](https://discord.gg/allora) for support
+- Explore more [developer resources](/devs/get-started/quick-start)
\ No newline at end of file
diff --git a/pages/release-notes/_meta.json b/pages/release-notes/_meta.json
new file mode 100644
index 0000000..e48675a
--- /dev/null
+++ b/pages/release-notes/_meta.json
@@ -0,0 +1,3 @@
+{
+ "index": "Release Notes"
+}
\ No newline at end of file
diff --git a/pages/home/release-notes.mdx b/pages/release-notes/index.mdx
similarity index 99%
rename from pages/home/release-notes.mdx
rename to pages/release-notes/index.mdx
index 53027b0..b552c18 100644
--- a/pages/home/release-notes.mdx
+++ b/pages/release-notes/index.mdx
@@ -311,4 +311,4 @@ The Allora Network v0.3.0 introduced a significant update focused on enhancing p
### Merit-Based Participant Selection
-The merit-based sortition system helps maintain the quality of network inferences while giving fresh talent the opportunity to participate and prove their value. This mechanism is designed to strike a balance between performance-based selection and inclusivity for new participants.
+The merit-based sortition system helps maintain the quality of network inferences while giving fresh talent the opportunity to participate and prove their value. This mechanism is designed to strike a balance between performance-based selection and inclusivity for new participants.
\ No newline at end of file
diff --git a/theme.config.tsx b/theme.config.tsx
index 7c6bf10..569f6da 100644
--- a/theme.config.tsx
+++ b/theme.config.tsx
@@ -1,7 +1,5 @@
-// theme.config.tsx
import React from 'react'
import { useRouter } from 'next/router'
-import Link from 'next/link'
import { DocsThemeConfig, useConfig } from 'nextra-theme-docs'
import AiButton from './components/AiButton.js'
@@ -14,17 +12,20 @@ const config: DocsThemeConfig = {
}
}
},
+ logo: () => null,
docsRepositoryBase: 'https://github.com/allora-network/docs',
- logo: () => {
- return (
- <>
-
- >
+ logoLink: "/",
+ navbar: {
+ extraContent: (
+