From d9a064057e6a2333d9123e7f6477f3e8701b1d86 Mon Sep 17 00:00:00 2001 From: Castrozan Date: Tue, 3 Mar 2026 21:16:03 -0300 Subject: [PATCH] feat(openclaw): add Ollama provider with GLM-4 and Llama 3.2 Integrates local Ollama inference into OpenClaw via configPatches: - Provider: http://localhost:11434 with native ollama API - Models: glm4 (5.5 GB) and llama3.2 (2 GB), both already pulled - Aliases: /model glm and /model llama for quick switching Ollama systemd user service is managed by home/modules/ollama. Run 'systemctl --user start ollama' until next rebuild makes it persistent. --- users/lucas.zanoni/home/openclaw.nix | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/users/lucas.zanoni/home/openclaw.nix b/users/lucas.zanoni/home/openclaw.nix index 6da857e4..11fda6a9 100644 --- a/users/lucas.zanoni/home/openclaw.nix +++ b/users/lucas.zanoni/home/openclaw.nix @@ -9,6 +9,8 @@ let opusModel = "anthropic/claude-opus-4-6"; sonnetModel = "anthropic/claude-sonnet-4-6"; codexModel = "openai-codex/gpt-5.3-codex"; + glmModel = "ollama/glm4"; + llamaModel = "ollama/llama3.2"; robsonModelPrimary = opusModel; jennyModelPrimary = sonnetModel; @@ -25,6 +27,30 @@ in openclaw = { configPatches = { ".channels.discord.accounts.robson.guilds.${robsonDiscordGuildId}.users" = [ lucasDiscordUserId ]; + + # Ollama — local inference provider + ".models.providers.ollama" = { + baseUrl = "http://localhost:11434"; + api = "ollama"; + models = [ + { + id = "glm4"; + name = "GLM-4 (local)"; + contextWindow = 131072; + maxTokens = 8192; + } + { + id = "llama3.2"; + name = "Llama 3.2 (local)"; + contextWindow = 131072; + maxTokens = 8192; + } + ]; + }; + + # Model aliases for quick /model switching + ".agents.defaults.models.\"${glmModel}\"".alias = "glm"; + ".agents.defaults.models.\"${llamaModel}\"".alias = "llama"; }; memorySync = {