From 72be1fb8518b3894695a699c0de0c64ac695a94a Mon Sep 17 00:00:00 2001
From: xiewoc <70128845+xiewoc@users.noreply.github.com>
Date: Sun, 9 Nov 2025 13:02:36 +0800
Subject: [PATCH] =?UTF-8?q?[feat]=20=E6=B7=BB=E5=8A=A0=E4=BA=86=E5=AF=B9?=
=?UTF-8?q?=E5=A4=9A=E8=AF=AD=E8=A8=80=E7=9A=84=E6=94=AF=E6=8C=81=E5=B9=B6?=
=?UTF-8?q?=E6=9B=B4=E6=94=B9=E4=BA=86Cl?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
README.md | 45 +-
README_EN.md | 91 ++--
config.json | 3 +
main.py | 10 +-
meshbot/config/config_loader.py | 605 +++++++++++++++++---------
meshbot/core/bot.py | 49 +--
meshbot/core/message_processor.py | 69 +--
meshbot/handlers/signal_handlers.py | 3 +-
meshbot/localizations/localization.py | 528 ++++++++++++++++++++++
meshbot/utils/ai_client_factory.py | 13 +-
meshbot/utils/localize.py | 27 ++
11 files changed, 1084 insertions(+), 359 deletions(-)
create mode 100644 meshbot/localizations/localization.py
create mode 100644 meshbot/utils/localize.py
diff --git a/README.md b/README.md
index fdb4b68..08b1fb4 100644
--- a/README.md
+++ b/README.md
@@ -27,32 +27,22 @@
- 调用本地运行的 Ollama 服务(或其他 AI 接口)
- 生成回复后通过同一网络回传
-### 启动方式
-
-1. **启动 Ollama 服务**:
- ```bash
- ollama serve
- ```
- > 这会启动 Ollama 后台服务,默认监听 `11434` 端口。
-
-2. (可选)提前下载模型:
- ```bash
- ollama pull qwen2.5:7b
- ```
- 或使用其他轻量模型如 `phi3`、`tinyllama`。
-
-3. 运行 AI 节点程序:
- ```bash
- python main.py
- ```
-
-> 注:Ollama 在首次请求时会自动下载并加载模型(如果未提前 pull)。确保设备有足够存储和内存。
+## 技术规格
+| 连接方式 | 串口 |
+| ----- | -----|
+| 记忆 | 未实现|
+| LLM Tool | 未实现 |
+| 语言支持 | 中、英、日、法、俄、韩、西、德|
+| 服务提供商 | OpenAI(及类似,如:DeepSeek、Ollama)、web sockets、fastapi|
### 当前配置示例
```json
{
"platform": "ollama",
+ "localization":{
+ "language": "zh_CN"
+ },
"api_keys": {
"openai": "your-openai-api-key",
"deepseek": "your-deepseek-api-key",
@@ -84,7 +74,7 @@
>
>如果你在使用OpenRouter,请参照[README_OPENROUTER](README_OPENROUTER.md)
>
->若要接入 `AstrBot` ,可以使用 [AstrBot适配器](https://github.com/xiewoc/astrbot_plugin_adapter_meshbot)
+>若要接入 `AstrBot` ,可以使用 [AstrBot适配器](https://github.com/xiewoc/astrbot_plugin_adapter_meshbot) (*推荐*)
完全可以在树莓派 + TTGO T-Beam 上跑起来,边走边聊。
@@ -100,6 +90,11 @@
```bash
ollama serve
```
+
+ > 这会启动 Ollama 后台服务,默认监听 `11434` 端口。
+ >
+ > 注:Ollama 在首次请求时会自动下载并加载模型(如果未提前 pull)。确保设备有足够存储和内存。
+
5. 运行主程序:
```bash
python main.py
@@ -111,16 +106,16 @@
## 🎈当前版本
-V 1.0.3
+V 1.0.3 - pre 1
-- 重构了文件夹结构
-- 添加了`Gemini`, `SiliconFlow`, `Claude`和`Fastapi`的适配器
-- 重构了`config.json`
+- 添加了`localization` ,支持了 `en`, `zh_CN`, `ru`, `jp`, `ko`, `es`, `de`
## 🌱 后续想法
- 引入上下文记忆,让对话更连贯
- 添加一个WebUI
+- 添加LLM Tool
+- 优化`api`文件夹
## 🙏 写在最后
diff --git a/README_EN.md b/README_EN.md
index 7906f62..3cdc137 100644
--- a/README_EN.md
+++ b/README_EN.md
@@ -1,58 +1,48 @@
-[**简体中文**](README.md) | **English**
+**English** | [ **简体中文** ](README.md)
# Mesh AI Assistant
-A small AI node that quietly resides in the Mesh network.
-You send it a message, and it replies with a sentence.
+A small AI node quietly residing in the Mesh network.
+You send it a message, and it replies.
Unobtrusive, offline, and serverless.
-Just for those times when you're in the mountains, in the wild, or somewhere with no signal, and you can still ask, "What do you think?" and receive an answer.
+Just for those times when you're in the mountains, the wilderness, or places with no signal – you can still ask, "What do you think?" and receive an answer.
## 🧩 What Can It Do?
-- Receive private messages sent to it (peer-to-peer messages)
-- Generate short replies using a local AI model
-- Send the response back the same way, as if it's always online waiting for you
+- Receive private messages sent to it (peer-to-peer)
+- Generate short replies using a local AI model
+- Send the response back through the same path, as if it's always online waiting for you
-All processing is done locally, ensuring privacy and control.
+All processing is done locally, ensuring privacy control.
## ⚙️ Technical Implementation
-- Uses Python to listen for serial port messages from Meshtastic devices
-- Extracts content when a private message for this node is received
-- Calls a locally running Ollama service (or other AI interfaces)
-- Sends the generated reply back through the same network
+- Uses Python to listen for serial port messages from Meshtastic devices
+- Extracts content when a private message for this node is received
+- Calls a locally running Ollama service (or other AI interface)
+- Sends the generated reply back through the same network
-### How to Start
-
-1. **Start the Ollama Service**:
- ```bash
- ollama serve
- ```
- > This starts the Ollama background service, listening on port `11434` by default.
-
-2. (Optional) Download a model in advance:
- ```bash
- ollama pull qwen2.5:7b
- ```
- Or use other lightweight models like `phi3` or `tinyllama`.
-
-3. Run the AI node program:
- ```bash
- python main.py
- ```
-
-> Note: Ollama will automatically download and load the model on the first request (if not pulled in advance). Ensure your device has sufficient storage and memory.
+## Technical Specifications
+| Connection | Serial Port |
+| ----- | -----|
+| Memory | Not Implemented |
+| LLM Tools | Not Implemented |
+| Language Support | Chinese, English, Japanese, French, Russian, Korean, Spanish, German |
+| Service Providers | OpenAI (and similar, e.g., DeepSeek, Ollama), web sockets, FastAPI |
### Current Configuration Example
```json
{
"platform": "ollama",
+ "localization":{
+ "language": "en"
+ },
"api_keys": {
"openai": "your-openai-api-key",
"deepseek": "your-deepseek-api-key",
@@ -79,12 +69,12 @@ All processing is done locally, ensuring privacy and control.
}
```
-> [!IMPORTANT]
-> Please replace `your-api-key` with your actual API key when using services like `openai`, `deepseek`, etc.
+>[!IMPORTANT]
+>Please replace `your-api-key` with your actual API key when using services like `openai`, `deepseek`, etc.
>
-> If you are using OpenRouter, please refer to [README_OPENROUTER](README_OPENROUTER.md)
+>If you are using OpenRouter, please refer to [README_OPENROUTER](README_OPENROUTER.md)
>
-> To integrate with `AstrBot`, you can use the [AstrBot Adapter](https://github.com/xiewoc/astrbot_plugin_adapter_meshbot)
+>To integrate with `AstrBot`, you can use the [AstrBot Adapter](https://github.com/xiewoc/astrbot_plugin_adapter_meshbot) (*Recommended*)
It can easily run on a Raspberry Pi + TTGO T-Beam, allowing you to chat on the go.
@@ -100,37 +90,42 @@ It can easily run on a Raspberry Pi + TTGO T-Beam, allowing you to chat on the g
```bash
ollama serve
```
+
+ > This starts the Ollama background service, listening on port `11434` by default.
+ >
+ > Note: Ollama will automatically download and load the model on the first request (if not pulled beforehand). Ensure the device has sufficient storage and memory.
+
5. Run the main program:
```bash
python main.py
```
-6. Send a private message to it from another device and wait for a reply.
+6. Send a private message to it from another device and wait for the reply.
-> [!IMPORTANT]
-> Please pay attention to the runtime path when executing the main program; it must be run from within the project folder.
+>[!IMPORTANT]
+>Please pay attention to the working directory when running the main program; it must be within the project folder.
## 🎈 Current Version
-V 1.0.3
+V 1.0.3 - pre 1
-- Refactored the folder structure
-- Added adapters for `Gemini`, `SiliconFlow`, `Claude`, and `Fastapi`
-- Refactored `config.json`
+- Added `localization`, supporting `en`, `zh_CN`, `ru`, `jp`, `ko`, `es`, `de`
## 🌱 Future Ideas
-- Introduce context memory for more coherent conversations
-- Add a WebUI
+- Introduce context memory for more coherent conversations
+- Add a WebUI
+- Add LLM Tools
+- Optimize the `api` folder
## 🙏 Final Words
This project isn't meant to replace anyone, nor is it about creating an overly intelligent AI.
-It's just about leaving a voice that can respond to you in those quiet places.
+It's simply about leaving a voice that can respond to you in those quiet places.
If you also appreciate this concept, you're welcome to help improve it.
-Simultaneously, thanks to the developers who have contributed to this project; we appreciate your support and efforts.
+Special thanks to the developers who have contributed to this project – we appreciate your support and dedication.
-May your Meshtastic node run stably in the mountains and wilds, where every reply is like a quietly lit signal lamp. 📡💡
+May your Meshtastic node run stably out in the wilds, where every reply is like a quietly lit signal lamp. 📡💡
Happy Exploring! ✨
\ No newline at end of file
diff --git a/config.json b/config.json
index 0dc5cd1..e00ec1d 100644
--- a/config.json
+++ b/config.json
@@ -1,5 +1,8 @@
{
"platform": "ollama",
+ "localization":{
+ "language": "zh_CN"
+ },
"api_keys": {
"openai": "your-openai-api-key",
"deepseek": "your-deepseek-api-key",
diff --git a/main.py b/main.py
index f5626ea..e11932b 100644
--- a/main.py
+++ b/main.py
@@ -4,9 +4,11 @@
import os
from pathlib import Path
-from meshbot.core.bot import MeshAIBot
+from meshbot.core.bot import MeshBot
from meshbot.handlers.signal_handlers import setup_signal_handlers
+from meshbot.config.config_loader import load_config
from meshbot.config.config_loader import create_example_config
+from meshbot.utils.localize import i18n
# 日志配置
logging.basicConfig(
@@ -15,7 +17,6 @@
)
logger = logging.getLogger(__name__)
-
def check_config():
"""检查配置文件是否存在,如果不存在则创建示例"""
config_path = Path(__file__).parent / "config.json"
@@ -33,12 +34,13 @@ async def main() -> None:
if not check_config():
return
- bot = MeshAIBot()
+ load_config()
+ bot = MeshBot()
setup_signal_handlers(bot)
try:
await bot.run()
except Exception as e:
- logger.error(f"💥 机器人运行异常: {e}")
+ logger.error(i18n.gettext('bot_running_error',err = e))
finally:
await bot.shutdown()
diff --git a/meshbot/config/config_loader.py b/meshbot/config/config_loader.py
index dc7ac64..c5b3b5d 100644
--- a/meshbot/config/config_loader.py
+++ b/meshbot/config/config_loader.py
@@ -1,229 +1,402 @@
# config/config_loader.py
import json
import logging
-from pathlib import Path
+from pathlib import Path
+from typing import Dict, Any, Optional
+from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
-# 默认配置(不常修改的部分)
-DEFAULT_CONFIG = {
- "system": {
- "system_prompt": "你是一个助手,请用简洁的语言(小于200字符)回复。",
- "max_response_length": 200,
- "message_queue_timeout": 1
- },
- "clients": {
- "ollama": {
- "module": "meshbot.api.ollama_api",
- "class": "AsyncOllamaChatClient",
- "kwargs": {
- "default_model": "qwen2.5:7b" # 会被用户配置覆盖
- }
- },
- "openai": {
- "module": "meshbot.api.openai_api",
- "class": "AsyncOpenAIChatClient",
- "kwargs": {
- "api_key": "your-api-key", # 会被用户配置覆盖
- "default_model": "gpt-3.5-turbo" # 会被用户配置覆盖
- }
- },
- "deepseek": {
- "module": "meshbot.api.deepseek_api",
- "class": "AsyncDeepSeekChatClient",
- "kwargs": {
- "api_key": "your-api-key", # 会被用户配置覆盖
- "default_model": "deepseek-chat" # 会被用户配置覆盖
- }
- },
- "openrouter": {
- "module": "meshbot.api.openrouter_api",
- "class": "AsyncOpenRouterChatClient",
- "kwargs": {
- "app_name": "MeshBot",
- "api_key": "your-api-key" # 会被用户配置覆盖
- }
- },"gemini": {
- "module": "meshbot.api.gemini_api",
- "class": "AsyncGeminiChatClient",
- "kwargs": {
- "api_key": "your-gemini-api-key",
- "default_model": "gemini-pro"
- }
- },
- "claude": {
- "module": "meshbot.api.claude_api",
- "class": "AsyncClaudeChatClient",
- "kwargs": {
- "api_key": "your-claude-api-key",
- "default_model": "claude-3-sonnet-20240229"
- }
- },
- "siliconflow": {
- "module": "meshbot.api.siliconflow_api",
- "class": "AsyncSiliconFlowChatClient",
- "kwargs": {
- "api_key": "your-siliconflow-api-key",
- "default_model": "deepseek-ai/DeepSeek-V2-Chat"
- }
+# Pydantic 模型定义
+class ClientConfig(BaseModel):
+ """客户端配置模型"""
+ module: str
+ class_name: str = Field(alias="class") # 解决 'class' 关键字冲突
+ kwargs: Dict[str, Any] = Field(default_factory=dict)
+
+ def __getitem__(self, item):
+ if item == "class":
+ return getattr(self, "class_name")
+ return getattr(self, item)
+
+class SystemConfig(BaseModel):
+ """系统配置模型"""
+ system_prompt: str = "你是一个助手,请用简洁的语言(小于200字符)回复。"
+ max_response_length: int = 200
+ message_queue_timeout: int = 1
+
+class LocalizationConfig(BaseModel):
+ """本地化配置模型"""
+ language: str = "zh_CN"
+ timezone: str = "Asia/Shanghai"
+ encoding: str = "utf-8"
+
+class AppConfig(BaseModel):
+ """应用配置模型"""
+ platform: str = "ollama"
+ api_keys: Dict[str, str] = Field(default_factory=dict)
+ model_settings: Dict[str, str] = Field(default_factory=dict)
+ service_urls: Dict[str, str] = Field(default_factory=dict)
+ system_prompt: Optional[str] = None
+
+class FullConfig(BaseModel):
+ """完整配置模型"""
+ system: SystemConfig = Field(default_factory=SystemConfig)
+ localization: LocalizationConfig = Field(default_factory=LocalizationConfig)
+ clients: Dict[str, ClientConfig] = Field(default_factory=dict)
+ app: AppConfig = Field(default_factory=AppConfig)
+
+class ConfigManager:
+ """配置管理器(使用 Pydantic 验证)"""
+
+ # 默认配置
+ DEFAULT_CONFIG: Dict[str, Any] = {
+ "system": {
+ "system_prompt": "你是一个助手,请用简洁的语言(小于200字符)回复。",
+ "max_response_length": 200,
+ "message_queue_timeout": 1
},
- "websockets": {
- "module": "meshbot.api.ws_platform",
- "class": "AsyncWebSocketsClient",
- "kwargs": {
- "uri": "ws://localhost:9238" # 会被用户配置覆盖
- }
+ "localization": {
+ "language": "zh_CN",
+ "timezone": "Asia/Shanghai",
+ "encoding": "utf-8"
},
- "fastapi": {
- "module": "meshbot.api.fastapi_client",
- "class": "AsyncFastAPIChatClient",
- "kwargs": {
- "base_url": "http://127.0.0.1:8000",
- "api_key": "your-fastapi-token" # 可选
+ "clients": {
+ "ollama": {
+ "module": "meshbot.api.ollama_api",
+ "class": "AsyncOllamaChatClient",
+ "kwargs": {
+ "default_model": "qwen2.5:7b"
+ }
+ },
+ "openai": {
+ "module": "meshbot.api.openai_api",
+ "class": "AsyncOpenAIChatClient",
+ "kwargs": {
+ "api_key": "your-api-key",
+ "default_model": "gpt-3.5-turbo"
+ }
+ },
+ "deepseek": {
+ "module": "meshbot.api.deepseek_api",
+ "class": "AsyncDeepSeekChatClient",
+ "kwargs": {
+ "api_key": "your-api-key",
+ "default_model": "deepseek-chat"
+ }
+ },
+ "openrouter": {
+ "module": "meshbot.api.openrouter_api",
+ "class": "AsyncOpenRouterChatClient",
+ "kwargs": {
+ "app_name": "MeshBot",
+ "api_key": "your-api-key"
+ }
+ },
+ "gemini": {
+ "module": "meshbot.api.gemini_api",
+ "class": "AsyncGeminiChatClient",
+ "kwargs": {
+ "api_key": "your-gemini-api-key",
+ "default_model": "gemini-pro"
+ }
+ },
+ "claude": {
+ "module": "meshbot.api.claude_api",
+ "class": "AsyncClaudeChatClient",
+ "kwargs": {
+ "api_key": "your-claude-api-key",
+ "default_model": "claude-3-sonnet-20240229"
+ }
+ },
+ "siliconflow": {
+ "module": "meshbot.api.siliconflow_api",
+ "class": "AsyncSiliconFlowChatClient",
+ "kwargs": {
+ "api_key": "your-siliconflow-api-key",
+ "default_model": "deepseek-ai/DeepSeek-V2-Chat"
+ }
+ },
+ "websockets": {
+ "module": "meshbot.api.ws_platform",
+ "class": "AsyncWebSocketsClient",
+ "kwargs": {
+ "uri": "ws://localhost:9238"
+ }
+ },
+ "fastapi": {
+ "module": "meshbot.api.fastapi_client",
+ "class": "AsyncFastAPIChatClient",
+ "kwargs": {
+ "base_url": "http://127.0.0.1:8000",
+ "api_key": "your-fastapi-token"
+ }
}
- },
- }
-}
-
-# 合并后的配置
-CONFIG = None
-SYSTEM_PROMPT = None
-PLATFORM = None
-MAX_RESPONSE_LENGTH = None
-MESSAGE_QUEUE_TIMEOUT = None
-AI_CLIENT_CONFIG = None
-
-
-def load_config(config_path: str = str((Path(__file__).parent / "../../config.json").resolve())) -> None:
- """从 JSON 文件加载配置并与默认配置合并"""
- global CONFIG, SYSTEM_PROMPT, PLATFORM, MAX_RESPONSE_LENGTH, MESSAGE_QUEUE_TIMEOUT, AI_CLIENT_CONFIG
-
- try:
- with open(config_path, "r", encoding="utf-8") as f:
- user_config = json.load(f)
- except FileNotFoundError:
- raise RuntimeError("配置文件 config.json 未找到,请确保文件存在。")
- except json.JSONDecodeError as e:
- raise RuntimeError(f"配置文件格式错误:{e}")
-
- # 合并配置
- CONFIG = _merge_configs(DEFAULT_CONFIG, user_config)
-
- # 解析系统配置
- SYSTEM_PROMPT = CONFIG["system"]["system_prompt"]
- PLATFORM = user_config.get("platform", "ollama") # 从用户配置获取平台
- MAX_RESPONSE_LENGTH = CONFIG["system"]["max_response_length"]
- MESSAGE_QUEUE_TIMEOUT = CONFIG["system"]["message_queue_timeout"]
-
- # AI 客户端配置
- AI_CLIENT_CONFIG = CONFIG["clients"]
-
- logger.info("✅ 配置加载成功")
- logger.info(f"🎯 当前平台: {PLATFORM}")
-
-
-def _merge_configs(default_config: dict, user_config: dict) -> dict:
- """深度合并默认配置和用户配置"""
- result = default_config.copy()
-
- # 处理 API keys
- if "api_keys" in user_config:
- for platform, api_key in user_config["api_keys"].items():
- if platform in result["clients"] and api_key != "your-api-key":
- if "kwargs" in result["clients"][platform]:
- result["clients"][platform]["kwargs"]["api_key"] = api_key
-
- # 处理模型设置
- if "model_settings" in user_config:
- for platform, model in user_config["model_settings"].items():
- if platform in result["clients"]:
- if "kwargs" in result["clients"][platform]:
- result["clients"][platform]["kwargs"]["default_model"] = model
-
- # 处理服务 URLs
- if "service_urls" in user_config:
- # WebSocket
- ws_url = user_config["service_urls"].get("websockets")
- if ws_url and ws_url != "ws://localhost:9238" and "websockets" in result["clients"]:
- result["clients"]["websockets"]["kwargs"]["uri"] = ws_url
-
- # FastAPI
- fastapi_url = user_config["service_urls"].get("fastapi")
- if fastapi_url and fastapi_url != "http://127.0.0.1:8000" and "fastapi" in result["clients"]:
- result["clients"]["fastapi"]["kwargs"]["base_url"] = fastapi_url
-
- # 处理系统提示(可选,如果用户想要自定义)
- if "system_prompt" in user_config:
- result["system"]["system_prompt"] = user_config["system_prompt"]
-
- return result
-
-
-def get_ai_client_config():
- """获取 AI 客户端配置"""
- if AI_CLIENT_CONFIG is None:
- raise RuntimeError("配置未加载,请先调用 load_config()")
- return AI_CLIENT_CONFIG
-
-
-def get_platform():
- """获取平台配置"""
- if PLATFORM is None:
- raise RuntimeError("配置未加载,请先调用 load_config()")
- return PLATFORM
-
-
-def get_system_prompt():
- """获取系统提示"""
- if SYSTEM_PROMPT is None:
- raise RuntimeError("配置未加载,请先调用 load_config()")
- return SYSTEM_PROMPT
-
-
-def get_max_response_length():
- """获取最大响应长度"""
- if MAX_RESPONSE_LENGTH is None:
- raise RuntimeError("配置未加载,请先调用 load_config()")
- return MAX_RESPONSE_LENGTH
-
-
-def get_message_queue_timeout():
- """获取消息队列超时时间"""
- if MESSAGE_QUEUE_TIMEOUT is None:
- raise RuntimeError("配置未加载,请先调用 load_config()")
- return MESSAGE_QUEUE_TIMEOUT
-
-
-def create_example_config():
- """创建示例配置文件"""
- example_config = {
- "platform": "ollama",
- "api_keys": {
- "openai": "your-openai-api-key",
- "deepseek": "your-deepseek-api-key",
- "openrouter": "your-openrouter-api-key",
- "gemini": "your-gemini-api-key",
- "claude": "your-claude-api-key",
- "siliconflow": "your-siliconflow-api-key",
- "fastapi": "your-fastapi-token"
- },
- "model_settings": {
- "ollama": "qwen2.5:7b",
- "openai": "gpt-3.5-turbo",
- "deepseek": "deepseek-chat",
- "openrouter": "openai/gpt-3.5-turbo",
- "gemini": "gemini-pro",
- "claude": "claude-3-sonnet-20240229",
- "siliconflow": "deepseek-ai/DeepSeek-V2-Chat",
- "fastapi": "fastapi-default"
- },
- "service_urls": {
- "websockets": "ws://localhost:9238",
- "fastapi": "http://127.0.0.1:8000"
}
}
- with open("config.json", "w", encoding="utf-8") as f:
- json.dump(example_config, f, indent=2, ensure_ascii=False)
+ _instance: Optional['ConfigManager'] = None
+
+ def __new__(cls):
+ if cls._instance is None:
+ cls._instance = super().__new__(cls)
+ return cls._instance
+
+ def __init__(self):
+ self._config: Optional[FullConfig] = None
+ self._user_config: Optional[Dict[str, Any]] = None
+ self._config_path: Optional[Path] = None
+
+ def load(self, config_path: Optional[str] = None) -> None:
+ """从 JSON 文件加载配置并与默认配置合并"""
+ if config_path is None:
+ config_path = self.get_default_config_path()
+
+ self._config_path = Path(config_path)
+
+ try:
+ with open(self._config_path, "r", encoding="utf-8") as f:
+ self._user_config = json.load(f)
+ except FileNotFoundError:
+ raise RuntimeError(f"配置文件未找到: {config_path}")
+ except json.JSONDecodeError as e:
+ raise RuntimeError(f"配置文件格式错误: {e}")
+ except Exception as e:
+ raise RuntimeError(f"读取配置文件失败: {e}")
+
+ # 合并配置
+ merged_config = self._deep_merge(self.DEFAULT_CONFIG.copy(), self._user_config or {})
+
+ # 使用 Pydantic 验证和转换
+ try:
+ self._config = FullConfig(**merged_config)
+ except Exception as e:
+ raise RuntimeError(f"配置验证失败: {e}")
+
+ # 应用用户配置覆盖
+ self._apply_user_overrides()
+
+ logger.info("✅ 配置加载成功")
+ logger.info(f"🎯 当前平台: {self.platform}")
+ logger.info(f"🌐 语言设置: {self.language}")
- logger.info("📝 示例配置文件 config.json 已创建")
\ No newline at end of file
+ def _deep_merge(self, base: Dict[str, Any], update: Dict[str, Any]) -> Dict[str, Any]:
+ """深度合并两个字典"""
+ for key, value in update.items():
+ if (key in base and
+ isinstance(base[key], dict) and
+ isinstance(value, dict)):
+ base[key] = self._deep_merge(base[key], value)
+ else:
+ base[key] = value
+ return base
+
+ def _apply_user_overrides(self) -> None:
+ """应用用户特定的配置覆盖"""
+ if not self._user_config or not self._config:
+ return
+
+ # 应用 API keys
+ if "api_keys" in self._user_config:
+ for platform, api_key in self._user_config["api_keys"].items():
+ if (platform in self._config.clients and
+ api_key not in ["your-api-key", "your-openai-api-key", ""]):
+ if self._config.clients[platform].kwargs.get("api_key", "").startswith("your-"):
+ self._config.clients[platform].kwargs["api_key"] = api_key
+
+ # 应用模型设置
+ if "model_settings" in self._user_config:
+ for platform, model in self._user_config["model_settings"].items():
+ if platform in self._config.clients:
+ if "default_model" in self._config.clients[platform].kwargs:
+ self._config.clients[platform].kwargs["default_model"] = model
+
+ # 应用服务 URLs
+ if "service_urls" in self._user_config:
+ ws_url = self._user_config["service_urls"].get("websockets")
+ if ws_url and ws_url != "ws://localhost:9238" and "websockets" in self._config.clients:
+ self._config.clients["websockets"].kwargs["uri"] = ws_url
+
+ fastapi_url = self._user_config["service_urls"].get("fastapi")
+ if fastapi_url and fastapi_url != "http://127.0.0.1:8000" and "fastapi" in self._config.clients:
+ self._config.clients["fastapi"].kwargs["base_url"] = fastapi_url
+
+ # 应用系统提示
+ if "system_prompt" in self._user_config and self._user_config["system_prompt"]:
+ self._config.system.system_prompt = self._user_config["system_prompt"]
+
+ def get_default_config_path(self) -> str:
+ """获取默认配置文件路径"""
+ return str((Path(__file__).parent / "../../config.json").resolve())
+
+ @property
+ def platform(self) -> str:
+ """获取当前平台"""
+ if self._user_config is None:
+ raise RuntimeError("配置未加载")
+ return self._user_config.get("platform", "ollama")
+
+ @property
+ def system_prompt(self) -> str:
+ """获取系统提示"""
+ if self._config is None:
+ raise RuntimeError("配置未加载")
+ return self._config.system.system_prompt
+
+ @property
+ def max_response_length(self) -> int:
+ """获取最大响应长度"""
+ if self._config is None:
+ raise RuntimeError("配置未加载")
+ return self._config.system.max_response_length
+
+ @property
+ def message_queue_timeout(self) -> int:
+ """获取消息队列超时时间"""
+ if self._config is None:
+ raise RuntimeError("配置未加载")
+ return self._config.system.message_queue_timeout
+
+ @property
+ def ai_client_config(self) -> Dict[str, ClientConfig]:
+ """获取 AI 客户端配置"""
+ if self._config is None:
+ raise RuntimeError("配置未加载")
+ return self._config.clients
+
+ @property
+ def language(self) -> str:
+ """获取语言设置"""
+ if self._config is None:
+ raise RuntimeError("配置未加载")
+ return self._config.localization.language
+
+ @property
+ def timezone(self) -> str:
+ """获取时区设置"""
+ if self._config is None:
+ raise RuntimeError("配置未加载")
+ return self._config.localization.timezone
+
+ @property
+ def encoding(self) -> str:
+ """获取编码设置"""
+ if self._config is None:
+ raise RuntimeError("配置未加载")
+ return self._config.localization.encoding
+
+ def get_client_config(self, client_name: str) -> Optional[ClientConfig]:
+ """获取特定客户端的配置"""
+ clients = self.ai_client_config
+ return clients.get(client_name)
+
+ def reload(self, config_path: Optional[str] = None) -> None:
+ """重新加载配置"""
+ self._config = None
+ self._user_config = None
+ self.load(config_path)
+ logger.info("🔄 配置重新加载成功")
+
+ def create_example_config(self, overwrite: bool = False) -> str:
+ """创建示例配置文件
+
+ Args:
+ overwrite: 是否覆盖已存在的文件
+
+ Returns:
+ 配置文件路径
+ """
+ config_path = Path(self.get_default_config_path())
+
+ if config_path.exists() and not overwrite:
+ raise FileExistsError(f"配置文件已存在: {config_path}")
+
+ example_config = {
+ "platform": "ollama",
+ "api_keys": {
+ "openai": "your-openai-api-key",
+ "deepseek": "your-deepseek-api-key",
+ "openrouter": "your-openrouter-api-key",
+ "gemini": "your-gemini-api-key",
+ "claude": "your-claude-api-key",
+ "siliconflow": "your-siliconflow-api-key",
+ "fastapi": "your-fastapi-token"
+ },
+ "model_settings": {
+ "ollama": "qwen2.5:7b",
+ "openai": "gpt-3.5-turbo",
+ "deepseek": "deepseek-chat",
+ "openrouter": "openai/gpt-3.5-turbo",
+ "gemini": "gemini-pro",
+ "claude": "claude-3-sonnet-20240229",
+ "siliconflow": "deepseek-ai/DeepSeek-V2-Chat",
+ "fastapi": "fastapi-default"
+ },
+ "service_urls": {
+ "websockets": "ws://localhost:9238",
+ "fastapi": "http://127.0.0.1:8000"
+ },
+ "system_prompt": "你是一个助手,请用简洁的语言(小于200字符)回复。",
+ "localization": {
+ "language": "zh_CN",
+ "timezone": "Asia/Shanghai",
+ "encoding": "utf-8"
+ }
+ }
+
+ config_path.parent.mkdir(parents=True, exist_ok=True)
+
+ with open(config_path, "w", encoding="utf-8") as f:
+ json.dump(example_config, f, indent=2, ensure_ascii=False)
+
+ logger.info(f"📝 示例配置文件已创建: {config_path}")
+ return str(config_path)
+
+ def get_current_config(self) -> FullConfig:
+ """获取当前配置(用于调试)"""
+ if self._config is None:
+ raise RuntimeError("配置未加载")
+ return self._config
+
+
+# 全局单例实例
+_config_manager = ConfigManager()
+
+# 兼容旧接口的快捷函数
+def load_config(config_path: Optional[str] = None) -> None:
+ """加载配置(兼容旧接口)"""
+ _config_manager.load(config_path)
+
+def get_platform() -> str:
+ """获取平台配置(兼容旧接口)"""
+ return _config_manager.platform
+
+def get_system_prompt() -> str:
+ """获取系统提示(兼容旧接口)"""
+ return _config_manager.system_prompt
+
+def get_max_response_length() -> int:
+ """获取最大响应长度(兼容旧接口)"""
+ return _config_manager.max_response_length
+
+def get_message_queue_timeout() -> int:
+ """获取消息队列超时时间(兼容旧接口)"""
+ return _config_manager.message_queue_timeout
+
+def get_ai_client_config() -> Dict[str, ClientConfig]:
+ """获取 AI 客户端配置(兼容旧接口)"""
+ return _config_manager.ai_client_config
+
+def get_localization_config() -> LocalizationConfig:
+ """获取本地化配置"""
+ if _config_manager._config is None:
+ raise RuntimeError("配置未加载")
+ return _config_manager._config.localization
+
+def create_example_config(overwrite: bool = False) -> str:
+ """创建示例配置文件(兼容旧接口)"""
+ return _config_manager.create_example_config(overwrite)
+
+def reload_config(config_path: Optional[str] = None) -> None:
+ """重新加载配置(兼容旧接口)"""
+ _config_manager.reload(config_path)
\ No newline at end of file
diff --git a/meshbot/core/bot.py b/meshbot/core/bot.py
index 1e73db8..c1b066c 100644
--- a/meshbot/core/bot.py
+++ b/meshbot/core/bot.py
@@ -8,21 +8,20 @@
import meshtastic.serial_interface
from pubsub import pub
-from meshbot.config.config_loader import PLATFORM, MESSAGE_QUEUE_TIMEOUT, load_config
+from meshbot.config.config_loader import _config_manager
from meshbot.utils.ai_client_factory import create_ai_client
from meshbot.core.message_processor import MessageProcessor
+from meshbot.utils.localize import i18n
logger = logging.getLogger(__name__)
-class MeshAIBot:
+class MeshBot:
"""Mesh AI 机器人主类,基于 Meshtastic 与 AI 交互"""
def __init__(self):
- # 先加载配置
- load_config()
- self.client = create_ai_client(PLATFORM)
+ self.client = create_ai_client(_config_manager.platform)
self.interface: Optional[meshtastic.serial_interface.SerialInterface] = None
self.running = False
self.nodes = None
@@ -35,7 +34,7 @@ def __init__(self):
async def initialize(self) -> None:
"""初始化机器人组件"""
- logger.info("正在初始化 Mesh AI 机器人...")
+ logger.info(i18n.gettext('bot_initializing'))
self._loop = asyncio.get_running_loop()
await self._initialize_ai_client()
await self._initialize_meshtastic()
@@ -49,14 +48,14 @@ async def _initialize_ai_client(self) -> None:
if hasattr(self.client, "get_models"):
models = await self.client.get_models()
if models:
- model_names = [m.get('name', '未知') for m in models]
+ model_names = [m.get('name', i18n.gettext('unknown')) for m in models]
logger.info(
- f"✅ 可用 AI 模型: {model_names}"
+ i18n.gettext('available_models', model_names=model_names)
)
else:
- logger.warning("⚠️ 未找到可用模型,请检查服务")
+ logger.warning(i18n.gettext('no_models_warning'))
except Exception as e:
- logger.warning(f"⚠️ 获取模型列表失败: {e}")
+ logger.warning(i18n.gettext('model_list_failed', error=e))
async def _initialize_meshtastic(self) -> None:
"""连接 Meshtastic 设备"""
@@ -67,12 +66,12 @@ async def _initialize_meshtastic(self) -> None:
if node_info and 'num' in node_info:
self._node_id = node_info['num']
self.message_processor = MessageProcessor(self.nodes, self._node_id)
- logger.info(f"✅ Meshtastic 连接成功,节点 ID: {self._node_id}")
+ logger.info(i18n.gettext('meshtastic_connected', node_id=self._node_id))
else:
- logger.error("❌ 无法获取 Meshtastic 节点信息")
- raise RuntimeError("无法获取 Meshtastic 节点信息")
+ logger.error(i18n.gettext('node_info_error'))
+ raise RuntimeError(i18n.gettext('node_info_error'))
except Exception as e:
- logger.error(f"❌ Meshtastic 连接失败: {e}")
+ logger.error(i18n.gettext('meshtastic_connect_failed', error=e))
raise
def _register_event_handlers(self) -> None:
@@ -82,7 +81,7 @@ def _register_event_handlers(self) -> None:
def _on_connection(self, interface, topic=pub.AUTO_TOPIC) -> None:
"""连接建立事件"""
- logger.info("🔗 Mesh 设备连接已建立")
+ logger.info(i18n.gettext('connection_established'))
def _on_receive(self, packet: dict, interface) -> None:
"""接收消息事件(同步回调)"""
@@ -101,15 +100,15 @@ def _schedule_async_processing(self, message_data: tuple, interface) -> None:
self._loop
)
else:
- logger.warning("⚠️ 事件循环未运行,无法处理消息")
+ logger.warning(i18n.gettext('event_loop_not_running'))
async def _queue_message(self, message_data: tuple, interface) -> None:
"""将消息加入异步队列"""
try:
await self._message_queue.put((message_data, interface))
- logger.debug(f"📩 消息已入队,来自: {message_data[0]}")
+ logger.debug(i18n.gettext('message_queued', sender=message_data[0]))
except Exception as e:
- logger.error(f"❌ 消息入队失败: {e}")
+ logger.error(i18n.gettext('queue_failed', error=e))
async def _process_message_queue(self) -> None:
"""持续处理消息队列"""
@@ -117,7 +116,7 @@ async def _process_message_queue(self) -> None:
try:
message_data, interface = await asyncio.wait_for(
self._message_queue.get(),
- timeout=MESSAGE_QUEUE_TIMEOUT
+ timeout=_config_manager.message_queue_timeout
)
async with self._processing_lock:
if self.message_processor is not None:
@@ -130,19 +129,19 @@ async def _process_message_queue(self) -> None:
except asyncio.CancelledError:
break
except Exception as e:
- logger.error(f"❌ 消息队列处理异常: {e}")
+ logger.error(i18n.gettext('queue_processing_error', error=e))
async def run(self) -> None:
"""启动机器人主循环"""
self.running = True
await self.initialize()
- logger.info("🚀 Mesh AI 机器人已启动,按 Ctrl+C 退出...")
+ logger.info(i18n.gettext('bot_started'))
try:
while self.running:
await asyncio.sleep(1)
except KeyboardInterrupt:
- logger.info("🛑 收到中断信号,正在关闭...")
+ logger.info(i18n.gettext('interrupt_received'))
finally:
await self.shutdown()
@@ -151,13 +150,13 @@ async def shutdown(self) -> None:
if not self.running:
return
self.running = False
- logger.info("🔧 正在关闭 Mesh AI 机器人...")
+ logger.info(i18n.gettext('bot_shutting_down'))
if self.interface:
self.interface.close()
- logger.info("🔌 Meshtastic 连接已关闭")
+ logger.info(i18n.gettext('meshtastic_closed'))
await self.client.close()
- logger.info("🧠 AI 客户端已关闭")
+ logger.info(i18n.gettext('ai_client_closed'))
self._executor.shutdown(wait=False)
\ No newline at end of file
diff --git a/meshbot/core/message_processor.py b/meshbot/core/message_processor.py
index c63fec2..8fe90b0 100644
--- a/meshbot/core/message_processor.py
+++ b/meshbot/core/message_processor.py
@@ -4,6 +4,7 @@
from meshbot.config.config_loader import get_system_prompt, get_max_response_length
from meshbot.utils.text_utils import truncate_by_sentences
+from meshbot.utils.localize import i18n
logger = logging.getLogger(__name__)
@@ -32,14 +33,14 @@ def __init__(self, nodes, node_id):
def analyze_packet(self, packet: Dict[str, Any]) -> Optional[Tuple]:
"""解析数据包"""
if 'decoded' not in packet:
- logger.warning("⚠️ 数据包缺少 'decoded' 字段")
+ logger.warning(i18n.gettext('packet_missing_decoded'))
return None
- from_id = packet.get('from', '未知')
- from_id_hex = packet.get('fromId', '未知')
- to_id = packet.get('to', '未知')
+ from_id = packet.get('from', i18n.gettext('unknown'))
+ from_id_hex = packet.get('fromId', i18n.gettext('unknown'))
+ to_id = packet.get('to', i18n.gettext('unknown'))
decoded = packet['decoded']
- message_type = decoded.get('portnum', '未知类型')
+ message_type = decoded.get('portnum', i18n.gettext('unknown_type'))
if message_type == 'TEXT_MESSAGE_APP':
# 处理所有文本消息,包括私聊和群发
@@ -126,14 +127,14 @@ def _should_respond_to_broadcast(self, text: str, long_name: str, is_mention: bo
"""判断是否应该回复群发消息"""
# 如果明确提及,总是回复
if is_mention:
- logger.info("🎯 检测到提及,将回复群发消息")
+ logger.info(i18n.gettext('mention_detected'))
return True
# 检查消息是否包含问题或请求
question_indicators = ["吗?", "?", "怎么办", "如何", "为什么", "什么", "怎样", "能不能", "是否可以"]
for indicator in question_indicators:
if indicator in text:
- logger.info("❓ 检测到问题,将回复群发消息")
+ logger.info(i18n.gettext('question_detected'))
return True
# 对于其他群发消息,可以根据配置决定是否回复
@@ -142,7 +143,7 @@ def _should_respond_to_broadcast(self, text: str, long_name: str, is_mention: bo
response_keywords = ["帮助", "求助", "问题", "请教", "建议", "意见"]
for keyword in response_keywords:
if keyword in text:
- logger.info(f"🔍 检测到关键词 '{keyword}',将回复群发消息")
+ logger.info(i18n.gettext('keyword_detected', keyword=keyword))
return True
return False
@@ -156,11 +157,11 @@ def _get_sender_name(self, from_id_hex: str) -> str:
long_name = node_info.get('user', {}).get('longName', '')
if long_name:
logger.info(
- f"👤 节点 {from_id_hex} 名称: {long_name}"
+ i18n.gettext('node_name', node_id=from_id_hex, long_name=long_name)
)
return long_name
else:
- logger.warning(f"⚠️ 节点 {from_id_hex} 信息非字典类型")
+ logger.warning(i18n.gettext('node_info_type_warning', node_id=from_id_hex))
return ""
def _log_message_reception(
@@ -179,16 +180,16 @@ def _log_message_reception(
# 判断消息类型并添加相应标识
is_broadcast = self._is_broadcast_message(to_id)
- message_type = "📢 群发" if is_broadcast else "📩 私聊"
+ message_type = i18n.gettext('broadcast_message_received') if is_broadcast else i18n.gettext('private_message_received')
logger.info(
- f"{message_type} 来自 {from_id}{name_info}: {short_text}"
+ message_type.format(from_id=from_id, name_info=name_info, short_text=short_text)
)
if rssi is not None:
- logger.debug(f"📶 RSSI: {rssi} dBm")
+ logger.debug(i18n.gettext('rssi_debug', rssi=rssi))
if snr is not None:
- logger.debug(f"🔊 SNR: {snr} dB")
+ logger.debug(i18n.gettext('snr_debug', snr=snr))
def _process_position_message(self, packet: Dict[str, Any], from_id: str) -> None:
"""处理位置消息"""
@@ -202,7 +203,7 @@ def _process_position_message(self, packet: Dict[str, Any], from_id: str) -> Non
# 始终记录非敏感信息
logger.info(
- f"📍 收到 {from_id} 的位置信息"
+ i18n.gettext('position_received', from_id=from_id)
)
# 仅在 DEBUG 模式下记录详细坐标
@@ -210,7 +211,7 @@ def _process_position_message(self, packet: Dict[str, Any], from_id: str) -> Non
lat = pos['latitude']
lon = pos['longitude']
logger.debug(
- f"详细位置: {lat:.6f}, {lon:.6f}"
+ i18n.gettext('detailed_position', lat=lat, lon=lon)
)
def _parse_from_and_position(
@@ -221,7 +222,7 @@ def _parse_from_and_position(
result = {}
from_id_int = packet.get('from')
if not from_id_int:
- logger.error("❌ 缺少 'from' 字段")
+ logger.error(i18n.gettext('missing_from_field'))
return None
node_hex = f"{from_id_int:08x}".lower()
@@ -245,7 +246,7 @@ def _extract_position_data(
) -> Optional[Dict[str, Any]]:
"""提取位置字段"""
if not position:
- logger.warning("⚠️ 位置数据为空")
+ logger.warning(i18n.gettext('position_data_empty'))
return None
lat = position.get('latitude')
@@ -253,7 +254,7 @@ def _extract_position_data(
alt = position.get('altitude')
if lat is None or lon is None:
- logger.error("❌ 缺失经纬度")
+ logger.error(i18n.gettext('missing_lat_lon'))
return None
return {'latitude': lat, 'longitude': lon, 'altitude': alt}
@@ -265,7 +266,7 @@ async def handle_incoming_message(self, message_data: Tuple, interface, client)
try:
# 对于群发消息,检查是否需要回复
if is_broadcast and not self._should_respond_to_broadcast(text, long_name, is_mention):
- logger.info("⏭️ 忽略群发消息(未触发回复条件)")
+ logger.info(i18n.gettext('ignore_broadcast'))
return
# 构建系统提示(针对群发消息添加额外上下文)
@@ -286,9 +287,9 @@ async def handle_incoming_message(self, message_data: Tuple, interface, client)
# 为群发消息添加前缀标识
if is_broadcast:
response = f"💬 {response}"
- logger.info(f"🤖 AI 回复群发消息: {response}")
+ logger.info(i18n.gettext('ai_broadcast_response', response=response))
else:
- logger.info(f"🤖 AI 回复私聊消息: {response}")
+ logger.info(i18n.gettext('ai_private_response', response=response))
# 基于 UTF-8 字节长度判断是否需要分片
try:
@@ -314,24 +315,23 @@ async def handle_incoming_message(self, message_data: Tuple, interface, client)
# 私聊消息回复给发送者
interface.sendText(response, from_id)
else:
- error_msg = result.get('error', '未知错误')
+ error_msg = result.get('error', i18n.gettext('unknown_error'))
logger.error(
- f"❌ AI 处理失败: {error_msg}"
+ i18n.gettext('ai_processing_failed', error_msg=error_msg)
)
# 错误消息也根据消息类型发送
+ error_response = i18n.gettext('processing_failed', error_msg=error_msg)
if is_broadcast:
- interface.sendText(f"❌ 处理失败: {error_msg}")
+ interface.sendText(error_response)
else:
- interface.sendText(
- f"❌ 处理失败: {error_msg}",
- from_id
- )
+ interface.sendText(error_response, from_id)
except Exception as e:
- logger.error(f"❌ 消息处理异常: {e}")
+ logger.error(i18n.gettext('message_processing_error', error=e))
+ error_response = i18n.gettext('processing_exception')
if is_broadcast:
- interface.sendText("❌ 处理异常,请稍后重试")
+ interface.sendText(error_response)
else:
- interface.sendText("❌ 处理异常,请稍后重试", from_id)
+ interface.sendText(error_response, from_id)
def _build_contextual_prompt(self, base_prompt: str, is_broadcast: bool, sender_name: str) -> str:
"""构建上下文相关的系统提示"""
@@ -356,8 +356,9 @@ def update_broadcast_settings(self, enabled: bool = False, keywords: List[str] =
"""更新群发消息设置"""
if enabled is not None:
self.broadcast_enabled = enabled
- logger.info(f"🔄 群发消息处理: {'启用' if enabled else '禁用'}")
+ status = i18n.gettext('enabled') if enabled else i18n.gettext('disabled')
+ logger.info(i18n.gettext('broadcast_settings_updated', status=status))
if keywords is not None:
self.broadcast_keywords = keywords
- logger.info(f"🔄 更新群发触发关键词: {keywords}")
\ No newline at end of file
+ logger.info(i18n.gettext('keywords_updated', keywords=keywords))
\ No newline at end of file
diff --git a/meshbot/handlers/signal_handlers.py b/meshbot/handlers/signal_handlers.py
index 2974c87..2626d44 100644
--- a/meshbot/handlers/signal_handlers.py
+++ b/meshbot/handlers/signal_handlers.py
@@ -3,6 +3,7 @@
import sys
import asyncio
import logging
+from meshbot.utils.localize import i18n
logger = logging.getLogger(__name__)
@@ -10,7 +11,7 @@
def setup_signal_handlers(bot) -> None:
"""注册信号处理器以优雅关闭"""
def signal_handler(sig, frame):
- logger.info(f"🛑 收到信号 {sig},正在关闭...")
+ logger.info(i18n.gettext('recieced_sig_closing',sig = sig))
if bot._loop and bot._loop.is_running():
asyncio.run_coroutine_threadsafe(bot.shutdown(), bot._loop)
sys.exit(0)
diff --git a/meshbot/localizations/localization.py b/meshbot/localizations/localization.py
new file mode 100644
index 0000000..17bca5a
--- /dev/null
+++ b/meshbot/localizations/localization.py
@@ -0,0 +1,528 @@
+from typing import Dict
+
+# 定义多语言消息
+MESSAGES: Dict[str, Dict[str, str]] = {
+ 'en': {
+ 'config_not_found': "⚠️ Config file config.json not found",
+ 'creating_example_config': "📝 Creating example config file...",
+ 'plz_edit_and_restart': "ℹ️ Please edit config.json and restart the program",
+ 'bot_running_error': "💥 Bot runtime error: {err}",
+ 'recieced_sig_closing': "🛑 Received signal {sig}, closing...",
+ 'platform_not_found': "❌ Platform '{platform}' not found or default platform '{default_platform}' configuration missing",
+ 'back_to_ollama': "🫥 Falling back to built-in Ollama client",
+
+ # From core/bot.py
+ 'bot_initializing': "🥳 Initializing Mesh AI Bot...",
+ 'ai_client_initialized': "✅ AI client initialized",
+ 'available_models': "✅ Available AI models: {model_names}",
+ 'no_models_warning': "⚠️ No available models found, please check service",
+ 'model_list_failed': "⚠️ Failed to get model list: {error}",
+ 'meshtastic_connected': "✅ Meshtastic connected, Node ID: {node_id}",
+ 'node_info_error': "❌ Unable to get Meshtastic node information",
+ 'meshtastic_connect_failed': "❌ Meshtastic connection failed: {error}",
+ 'connection_established': "🔗 Mesh device connection established",
+ 'event_loop_not_running': "⚠️ Event loop not running, unable to process messages",
+ 'message_queued': "📩 Message queued, from: {sender}",
+ 'queue_failed': "❌ Message queue failed: {error}",
+ 'queue_processing_error': "❌ Message queue processing error: {error}",
+ 'bot_started': "🚀 Mesh AI Bot started, press Ctrl+C to exit...",
+ 'interrupt_received': "🛑 Interrupt received, closing...",
+ 'bot_shutting_down': "🔧 Shutting down Mesh AI Bot...",
+ 'meshtastic_closed': "🔌 Meshtastic connection closed",
+ 'ai_client_closed': "🧠 AI client closed",
+ 'processing_message': "🔍 Processing message from {sender}",
+ 'sending_response': "📤 Sending response to {target}",
+ 'response_sent': "✅ Response sent successfully",
+ 'send_failed': "❌ Message send failed: {error}",
+ 'lock_acquired': "🔒 Processing lock acquired",
+ 'lock_released': "🔓 Processing lock released",
+
+ # message_processor.py
+ 'packet_missing_decoded': "⚠️ Packet missing 'decoded' field",
+ 'node_name': "👤 Node {node_id} name: {long_name}",
+ 'node_info_type_warning': "⚠️ Node {node_id} info is not dictionary type",
+ 'broadcast_message_received': "📢 Broadcast from {from_id}{name_info}: {short_text}",
+ 'private_message_received': "📩 Private message from {from_id}{name_info}: {short_text}",
+ 'rssi_debug': "📶 RSSI: {rssi} dBm",
+ 'snr_debug': "🔊 SNR: {snr} dB",
+ 'position_received': "📍 Position received from {from_id}",
+ 'detailed_position': "Detailed position: {lat:.6f}, {lon:.6f}",
+ 'missing_from_field': "❌ Missing 'from' field",
+ 'position_data_empty': "⚠️ Position data empty",
+ 'missing_lat_lon': "❌ Missing latitude/longitude",
+ 'mention_detected': "🎯 Mention detected, will reply to broadcast",
+ 'question_detected': "❓ Question detected, will reply to broadcast",
+ 'keyword_detected': "🔍 Keyword '{keyword}' detected, will reply to broadcast",
+ 'ignore_broadcast': "⏭️ Ignoring broadcast (no reply condition triggered)",
+ 'ai_broadcast_response': "🤖 AI broadcast response: {response}",
+ 'ai_private_response': "🤖 AI private response: {response}",
+ 'ai_processing_failed': "❌ AI processing failed: {error_msg}",
+ 'message_processing_error': "❌ Message processing error: {error}",
+ 'broadcast_settings_updated': "🔄 Broadcast message processing: {status}",
+ 'keywords_updated': "🔄 Updated broadcast trigger keywords: {keywords}",
+ 'processing_failed': "❌ Processing failed: {error_msg}",
+ 'processing_exception': "❌ Processing exception, please try again later",
+ 'unknown': "Unknown",
+ 'unknown_type': "Unknown type",
+ 'unknown_error': "Unknown error",
+ 'enabled': "Enabled",
+ 'disabled': "Disabled",
+ 'other_message': "Another message"
+ },
+ 'zh_CN': {
+ 'config_not_found': "⚠️ 未找到 config.json 配置文件",
+ 'creating_example_config': "📝 正在创建示例配置文件...",
+ 'plz_edit_and_restart': "ℹ️ 请编辑 config.json 文件并重新启动程序",
+ 'bot_running_error': "💥 机器人运行异常: {err}",
+ 'recieced_sig_closing': "🛑 收到信号 {sig},正在关闭...",
+ 'platform_not_found': "❌未找到平台 '{platform}' 或默认平台 '{default_platform}' 的配置",
+ 'back_to_ollama': "🫥回退到内置 Ollama 客户端",
+
+ # 从 core/bot.py 中提取的翻译键
+ 'bot_initializing': "🥳正在初始化 Mesh AI 机器人...",
+ 'ai_client_initialized': "✅ AI 客户端初始化完成",
+ 'available_models': "✅ 可用 AI 模型: {model_names}",
+ 'no_models_warning': "⚠️ 未找到可用模型,请检查服务",
+ 'model_list_failed': "⚠️ 获取模型列表失败: {error}",
+ 'meshtastic_connected': "✅ Meshtastic 连接成功,节点 ID: {node_id}",
+ 'node_info_error': "❌ 无法获取 Meshtastic 节点信息",
+ 'meshtastic_connect_failed': "❌ Meshtastic 连接失败: {error}",
+ 'connection_established': "🔗 Mesh 设备连接已建立",
+ 'event_loop_not_running': "⚠️ 事件循环未运行,无法处理消息",
+ 'message_queued': "📩 消息已入队,来自: {sender}",
+ 'queue_failed': "❌ 消息入队失败: {error}",
+ 'queue_processing_error': "❌ 消息队列处理异常: {error}",
+ 'bot_started': "🚀 Mesh AI 机器人已启动,按 Ctrl+C 退出...",
+ 'interrupt_received': "🛑 收到中断信号,正在关闭...",
+ 'bot_shutting_down': "🔧 正在关闭 Mesh AI 机器人...",
+ 'meshtastic_closed': "🔌 Meshtastic 连接已关闭",
+ 'ai_client_closed': "🧠 AI 客户端已关闭",
+ 'processing_message': "🔍 正在处理来自 {sender} 的消息",
+ 'sending_response': "📤 正在发送回复到 {target}",
+ 'response_sent': "✅ 回复发送成功",
+ 'send_failed': "❌ 消息发送失败: {error}",
+ 'lock_acquired': "🔒 获取处理锁",
+ 'lock_released': "🔓 释放处理锁",
+ # message_processor.py
+ 'packet_missing_decoded': "⚠️ 数据包缺少 'decoded' 字段",
+ 'node_name': "👤 节点 {node_id} 名称: {long_name}",
+ 'node_info_type_warning': "⚠️ 节点 {node_id} 信息非字典类型",
+ 'broadcast_message_received': "📢 群发 来自 {from_id}{name_info}: {short_text}",
+ 'private_message_received': "📩 私聊 来自 {from_id}{name_info}: {short_text}",
+ 'rssi_debug': "📶 RSSI: {rssi} dBm",
+ 'snr_debug': "🔊 SNR: {snr} dB",
+ 'position_received': "📍 收到 {from_id} 的位置信息",
+ 'detailed_position': "详细位置: {lat:.6f}, {lon:.6f}",
+ 'missing_from_field': "❌ 缺少 'from' 字段",
+ 'position_data_empty': "⚠️ 位置数据为空",
+ 'missing_lat_lon': "❌ 缺失经纬度",
+ 'mention_detected': "🎯 检测到提及,将回复群发消息",
+ 'question_detected': "❓ 检测到问题,将回复群发消息",
+ 'keyword_detected': "🔍 检测到关键词 '{keyword}',将回复群发消息",
+ 'ignore_broadcast': "⏭️ 忽略群发消息(未触发回复条件)",
+ 'ai_broadcast_response': "🤖 AI 回复群发消息: {response}",
+ 'ai_private_response': "🤖 AI 回复私聊消息: {response}",
+ 'ai_processing_failed': "❌ AI 处理失败: {error_msg}",
+ 'message_processing_error': "❌ 消息处理异常: {error}",
+ 'broadcast_settings_updated': "🔄 群发消息处理: {status}",
+ 'keywords_updated': "🔄 更新群发触发关键词: {keywords}",
+ 'processing_failed': "❌ 处理失败: {error_msg}",
+ 'processing_exception': "❌ 处理异常,请稍后重试",
+ 'unknown': "未知",
+ 'unknown_type': "未知类型",
+ 'unknown_error': "未知错误",
+ 'enabled': "启用",
+ 'disabled': "禁用",
+ 'other_message': "Another message"
+ },
+ 'es': {
+ 'config_not_found': "⚠️ Archivo de configuración config.json no encontrado",
+ 'creating_example_config': "📝 Creando archivo de configuración de ejemplo...",
+ 'plz_edit_and_restart': "ℹ️ Por favor edita config.json y reinicia el programa",
+ 'bot_running_error': "💥 Error de ejecución del bot: {err}",
+ 'recieced_sig_closing': "🛑 Señal {sig} recibida, cerrando...",
+ 'platform_not_found': "❌ No se encontró la configuración para la plataforma '{platform}' ni para la plataforma predeterminada '{default_platform}'",
+ 'back_to_ollama': "🫥 Volviendo al cliente Ollama integrado",
+
+ 'bot_initializing': "🥳 Inicializando el bot Mesh AI...",
+ 'ai_client_initialized': "✅ Cliente de IA inicializado",
+ 'available_models': "✅ Modelos de IA disponibles: {model_names}",
+ 'no_models_warning': "⚠️ No se encontraron modelos disponibles, verifica el servicio",
+ 'model_list_failed': "⚠️ Falló al obtener la lista de modelos: {error}",
+ 'meshtastic_connected': "✅ Conexión a Meshtastic exitosa, ID de nodo: {node_id}",
+ 'node_info_error': "❌ No se pudo obtener la información del nodo Meshtastic",
+ 'meshtastic_connect_failed': "❌ Falló la conexión a Meshtastic: {error}",
+ 'connection_established': "🔗 Conexión al dispositivo Mesh establecida",
+ 'event_loop_not_running': "⚠️ El bucle de eventos no está en ejecución, no se pueden procesar mensajes",
+ 'message_queued': "📩 Mensaje encolado, de: {sender}",
+ 'queue_failed': "❌ Falló al encolar mensaje: {error}",
+ 'queue_processing_error': "❌ Excepción al procesar la cola de mensajes: {error}",
+ 'bot_started': "🚀 Bot Mesh AI iniciado, presiona Ctrl+C para salir...",
+ 'interrupt_received': "🛑 Señal de interrupción recibida, cerrando...",
+ 'bot_shutting_down': "🔧 Apagando el bot Mesh AI...",
+ 'meshtastic_closed': "🔌 Conexión a Meshtastic cerrada",
+ 'ai_client_closed': "🧠 Cliente de IA cerrado",
+ 'processing_message': "🔍 Procesando mensaje de {sender}",
+ 'sending_response': "📤 Enviando respuesta a {target}",
+ 'response_sent': "✅ Respuesta enviada con éxito",
+ 'send_failed': "❌ Falló el envío del mensaje: {error}",
+ 'lock_acquired': "🔒 Bloqueo de procesamiento adquirido",
+ 'lock_released': "🔓 Bloqueo de procesamiento liberado",
+
+ 'packet_missing_decoded': "⚠️ El paquete carece del campo 'decoded'",
+ 'node_name': "👤 Nodo {node_id} nombre: {long_name}",
+ 'node_info_type_warning': "⚠️ La información del nodo {node_id} no es de tipo diccionario",
+ 'broadcast_message_received': "📢 Mensaje grupal de {from_id}{name_info}: {short_text}",
+ 'private_message_received': "📩 Mensaje privado de {from_id}{name_info}: {short_text}",
+ 'rssi_debug': "📶 RSSI: {rssi} dBm",
+ 'snr_debug': "🔊 SNR: {snr} dB",
+ 'position_received': "📍 Recibida ubicación de {from_id}",
+ 'detailed_position': "Ubicación detallada: {lat:.6f}, {lon:.6f}",
+ 'missing_from_field': "❌ Falta el campo 'from'",
+ 'position_data_empty': "⚠️ Datos de posición vacíos",
+ 'missing_lat_lon': "❌ Faltan latitud y longitud",
+ 'mention_detected': "🎯 Mención detectada, responderá al mensaje grupal",
+ 'question_detected': "❓ Pregunta detectada, responderá al mensaje grupal",
+ 'keyword_detected': "🔍 Palabra clave '{keyword}' detectada, responderá al mensaje grupal",
+ 'ignore_broadcast': "⏭️ Ignorando mensaje grupal (condiciones de respuesta no cumplidas)",
+ 'ai_broadcast_response': "🤖 Respuesta IA a mensaje grupal: {response}",
+ 'ai_private_response': "🤖 Respuesta IA a mensaje privado: {response}",
+ 'ai_processing_failed': "❌ Falló el procesamiento por IA: {error_msg}",
+ 'message_processing_error': "❌ Excepción al procesar mensaje: {error}",
+ 'broadcast_settings_updated': "🔄 Configuración de mensajes grupales: {status}",
+ 'keywords_updated': "🔄 Palabras clave actualizadas: {keywords}",
+ 'processing_failed': "❌ Falló el procesamiento: {error_msg}",
+ 'processing_exception': "❌ Excepción durante el procesamiento, inténtalo más tarde",
+ 'unknown': "Desconocido",
+ 'unknown_type': "Tipo desconocido",
+ 'unknown_error': "Error desconocido",
+ 'enabled': "Habilitado",
+ 'disabled': "Deshabilitado",
+ 'other_message': "Otro mensaje"
+ },
+ 'fr': {
+ 'config_not_found': "⚠️ Fichier de configuration config.json introuvable",
+ 'creating_example_config': "📝 Création du fichier de configuration exemple...",
+ 'plz_edit_and_restart': "ℹ️ Veuillez modifier config.json et redémarrer le programme",
+ 'bot_running_error': "💥 Erreur d'exécution du bot : {err}",
+ 'recieced_sig_closing': "🛑 Signal {sig} reçu, fermeture...",
+ 'platform_not_found': "❌ Configuration introuvable pour la plateforme « {platform} » ou la plateforme par défaut « {default_platform} »",
+ 'back_to_ollama': "🫥 Retour au client Ollama intégré",
+
+ 'bot_initializing': "🥳 Initialisation du bot Mesh AI...",
+ 'ai_client_initialized': "✅ Client IA initialisé",
+ 'available_models': "✅ Modèles IA disponibles : {model_names}",
+ 'no_models_warning': "⚠️ Aucun modèle disponible trouvé, vérifiez le service",
+ 'model_list_failed': "⚠️ Échec de la récupération de la liste des modèles : {error}",
+ 'meshtastic_connected': "✅ Connexion à Meshtastic réussie, ID du nœud : {node_id}",
+ 'node_info_error': "❌ Impossible d’obtenir les informations du nœud Meshtastic",
+ 'meshtastic_connect_failed': "❌ Échec de la connexion à Meshtastic : {error}",
+ 'connection_established': "🔗 Connexion à l’appareil Mesh établie",
+ 'event_loop_not_running': "⚠️ La boucle d’événements n’est pas active, impossible de traiter les messages",
+ 'message_queued': "📩 Message mis en file d’attente, provenant de : {sender}",
+ 'queue_failed': "❌ Échec de la mise en file d’attente du message : {error}",
+ 'queue_processing_error': "❌ Exception lors du traitement de la file d’attente : {error}",
+ 'bot_started': "🚀 Bot Mesh AI démarré, appuyez sur Ctrl+C pour quitter...",
+ 'interrupt_received': "🛑 Signal d’interruption reçu, fermeture en cours...",
+ 'bot_shutting_down': "🔧 Arrêt du bot Mesh AI...",
+ 'meshtastic_closed': "🔌 Connexion à Meshtastic fermée",
+ 'ai_client_closed': "🧠 Client IA fermé",
+ 'processing_message': "🔍 Traitement du message de {sender}",
+ 'sending_response': "📤 Envoi de la réponse à {target}",
+ 'response_sent': "✅ Réponse envoyée avec succès",
+ 'send_failed': "❌ Échec de l’envoi du message : {error}",
+ 'lock_acquired': "🔒 Verrou de traitement acquis",
+ 'lock_released': "🔓 Verrou de traitement libéré",
+
+ 'packet_missing_decoded': "⚠️ Paquet sans champ « decoded »",
+ 'node_name': "👤 Nœud {node_id} nom : {long_name}",
+ 'node_info_type_warning': "⚠️ Les informations du nœud {node_id} ne sont pas de type dictionnaire",
+ 'broadcast_message_received': "📢 Message diffusé de {from_id}{name_info} : {short_text}",
+ 'private_message_received': "📩 Message privé de {from_id}{name_info} : {short_text}",
+ 'rssi_debug': "📶 RSSI : {rssi} dBm",
+ 'snr_debug': "🔊 SNR : {snr} dB",
+ 'position_received': "📍 Position reçue de {from_id}",
+ 'detailed_position': "Position détaillée : {lat:.6f}, {lon:.6f}",
+ 'missing_from_field': "❌ Champ « from » manquant",
+ 'position_data_empty': "⚠️ Données de position vides",
+ 'missing_lat_lon': "❌ Latitude et longitude manquantes",
+ 'mention_detected': "🎯 Mention détectée, réponse au message diffusé",
+ 'question_detected': "❓ Question détectée, réponse au message diffusé",
+ 'keyword_detected': "🔍 Mot-clé « {keyword} » détecté, réponse au message diffusé",
+ 'ignore_broadcast': "⏭️ Message diffusé ignoré (conditions non remplies)",
+ 'ai_broadcast_response': "🤖 Réponse IA au message diffusé : {response}",
+ 'ai_private_response': "🤖 Réponse IA au message privé : {response}",
+ 'ai_processing_failed': "❌ Échec du traitement par IA : {error_msg}",
+ 'message_processing_error': "❌ Exception lors du traitement du message : {error}",
+ 'broadcast_settings_updated': "🔄 Paramètres des messages diffusés : {status}",
+ 'keywords_updated': "🔄 Mots-clés mis à jour : {keywords}",
+ 'processing_failed': "❌ Échec du traitement : {error_msg}",
+ 'processing_exception': "❌ Exception pendant le traitement, veuillez réessayer plus tard",
+ 'unknown': "Inconnu",
+ 'unknown_type': "Type inconnu",
+ 'unknown_error': "Erreur inconnue",
+ 'enabled': "Activé",
+ 'disabled': "Désactivé",
+ 'other_message': "Un autre message"
+ },
+ 'de': {
+ 'config_not_found': "⚠️ Konfigurationsdatei config.json nicht gefunden",
+ 'creating_example_config': "📝 Erstelle Beispiel-Konfigurationsdatei...",
+ 'plz_edit_and_restart': "ℹ️ Bitte config.json bearbeiten und Programm neu starten",
+ 'bot_running_error': "💥 Bot-Laufzeitfehler: {err}",
+ 'recieced_sig_closing': "🛑 Signal {sig} empfangen, wird geschlossen...",
+ 'platform_not_found': "❌ Keine Konfiguration für Plattform '{platform}' oder Standardplattform '{default_platform}' gefunden",
+ 'back_to_ollama': "🫥 Zurück zum integrierten Ollama-Client",
+
+ 'bot_initializing': "🥳 Initialisiere Mesh AI Bot...",
+ 'ai_client_initialized': "✅ KI-Client initialisiert",
+ 'available_models': "✅ Verfügbare KI-Modelle: {model_names}",
+ 'no_models_warning': "⚠️ Keine verfügbaren Modelle gefunden, bitte Dienst prüfen",
+ 'model_list_failed': "⚠️ Abruf der Modellliste fehlgeschlagen: {error}",
+ 'meshtastic_connected': "✅ Meshtastic-Verbindung erfolgreich, Knoten-ID: {node_id}",
+ 'node_info_error': "❌ Knoteninformationen von Meshtastic konnten nicht abgerufen werden",
+ 'meshtastic_connect_failed': "❌ Meshtastic-Verbindung fehlgeschlagen: {error}",
+ 'connection_established': "🔗 Mesh-Geräteverbindung hergestellt",
+ 'event_loop_not_running': "⚠️ Ereignisschleife läuft nicht, Nachrichten können nicht verarbeitet werden",
+ 'message_queued': "📩 Nachricht in Warteschlange, von: {sender}",
+ 'queue_failed': "❌ Nachricht konnte nicht in Warteschlange gestellt werden: {error}",
+ 'queue_processing_error': "❌ Fehler bei der Verarbeitung der Nachrichtenwarteschlange: {error}",
+ 'bot_started': "🚀 Mesh AI Bot gestartet, Drücke Strg+C zum Beenden...",
+ 'interrupt_received': "🛑 Unterbrechungssignal empfangen, wird beendet...",
+ 'bot_shutting_down': "🔧 Mesh AI Bot wird heruntergefahren...",
+ 'meshtastic_closed': "🔌 Meshtastic-Verbindung geschlossen",
+ 'ai_client_closed': "🧠 KI-Client geschlossen",
+ 'processing_message': "🔍 Verarbeite Nachricht von {sender}",
+ 'sending_response': "📤 Sende Antwort an {target}",
+ 'response_sent': "✅ Antwort erfolgreich gesendet",
+ 'send_failed': "❌ Nachricht konnte nicht gesendet werden: {error}",
+ 'lock_acquired': "🔒 Verarbeitungssperre erworben",
+ 'lock_released': "🔓 Verarbeitungssperre freigegeben",
+
+ 'packet_missing_decoded': "⚠️ Paket enthält kein 'decoded'-Feld",
+ 'node_name': "👤 Knoten {node_id} Name: {long_name}",
+ 'node_info_type_warning': "⚠️ Knoteninformationen für {node_id} sind kein Wörterbuch",
+ 'broadcast_message_received': "📢 Broadcast-Nachricht von {from_id}{name_info}: {short_text}",
+ 'private_message_received': "📩 Private Nachricht von {from_id}{name_info}: {short_text}",
+ 'rssi_debug': "📶 RSSI: {rssi} dBm",
+ 'snr_debug': "🔊 SNR: {snr} dB",
+ 'position_received': "📍 Standortdaten von {from_id} empfangen",
+ 'detailed_position': "Detaillierter Standort: {lat:.6f}, {lon:.6f}",
+ 'missing_from_field': "❌ Feld 'from' fehlt",
+ 'position_data_empty': "⚠️ Standortdaten leer",
+ 'missing_lat_lon': "❌ Breiten- und Längengrad fehlen",
+ 'mention_detected': "🎯 Erwähnung erkannt, antworte auf Broadcast",
+ 'question_detected': "❓ Frage erkannt, antworte auf Broadcast",
+ 'keyword_detected': "🔍 Stichwort '{keyword}' erkannt, antworte auf Broadcast",
+ 'ignore_broadcast': "⏭️ Broadcast ignoriert (Antwortbedingungen nicht erfüllt)",
+ 'ai_broadcast_response': "🤖 KI-Antwort auf Broadcast: {response}",
+ 'ai_private_response': "🤖 KI-Antwort auf private Nachricht: {response}",
+ 'ai_processing_failed': "❌ KI-Verarbeitung fehlgeschlagen: {error_msg}",
+ 'message_processing_error': "❌ Fehler bei der Nachrichtenverarbeitung: {error}",
+ 'broadcast_settings_updated': "🔄 Broadcast-Einstellungen: {status}",
+ 'keywords_updated': "🔄 Auslösende Stichwörter aktualisiert: {keywords}",
+ 'processing_failed': "❌ Verarbeitung fehlgeschlagen: {error_msg}",
+ 'processing_exception': "❌ Verarbeitungsfehler – bitte später erneut versuchen",
+ 'unknown': "Unbekannt",
+ 'unknown_type': "Unbekannter Typ",
+ 'unknown_error': "Unbekannter Fehler",
+ 'enabled': "Aktiviert",
+ 'disabled': "Deaktiviert",
+ 'other_message': "Eine andere Nachricht"
+ },
+ 'ja': {
+ 'config_not_found': "⚠️ 設定ファイル config.json が見つかりません",
+ 'creating_example_config': "📝 サンプル設定ファイルを作成中...",
+ 'plz_edit_and_restart': "ℹ️ config.json を編集してプログラムを再起動してください",
+ 'bot_running_error': "💥 ボット実行エラー: {err}",
+ 'recieced_sig_closing': "🛑 信号 {sig} を受信、終了中...",
+ 'platform_not_found': "❌ プラットフォーム '{platform}' またはデフォルトプラットフォーム '{default_platform}' の設定が見つかりません",
+ 'back_to_ollama': "🫥 組み込みの Ollama クライアントに戻ります",
+
+ 'bot_initializing': "🥳 Mesh AI ボットを初期化中...",
+ 'ai_client_initialized': "✅ AIクライアントが初期化されました",
+ 'available_models': "✅ 利用可能なAIモデル: {model_names}",
+ 'no_models_warning': "⚠️ 利用可能なモデルが見つかりません。サービスを確認してください",
+ 'model_list_failed': "⚠️ モデル一覧の取得に失敗しました: {error}",
+ 'meshtastic_connected': "✅ Meshtastic 接続成功、ノードID: {node_id}",
+ 'node_info_error': "❌ Meshtastic ノード情報の取得に失敗しました",
+ 'meshtastic_connect_failed': "❌ Meshtastic 接続失敗: {error}",
+ 'connection_established': "🔗 Mesh デバイスとの接続が確立されました",
+ 'event_loop_not_running': "⚠️ イベントループが実行されておらず、メッセージを処理できません",
+ 'message_queued': "📩 メッセージをキューに追加しました。送信元: {sender}",
+ 'queue_failed': "❌ メッセージのキューイングに失敗しました: {error}",
+ 'queue_processing_error': "❌ メッセージキュー処理中に例外が発生しました: {error}",
+ 'bot_started': "🚀 Mesh AI ボットが起動しました、Ctrl+Cで終了...",
+ 'interrupt_received': "🛑 割り込みシグナルを受信、シャットダウン中...",
+ 'bot_shutting_down': "🔧 Mesh AI ボットをシャットダウン中...",
+ 'meshtastic_closed': "🔌 Meshtastic 接続を閉じました",
+ 'ai_client_closed': "🧠 AIクライアントを閉じました",
+ 'processing_message': "🔍 {sender} からのメッセージを処理中",
+ 'sending_response': "📤 {target} へ返信を送信中",
+ 'response_sent': "✅ 返信を送信しました",
+ 'send_failed': "❌ メッセージ送信に失敗しました: {error}",
+ 'lock_acquired': "🔒 処理ロックを取得しました",
+ 'lock_released': "🔓 処理ロックを解放しました",
+
+ 'packet_missing_decoded': "⚠️ パケットに 'decoded' フィールドがありません",
+ 'node_name': "👤 ノード {node_id} 名前: {long_name}",
+ 'node_info_type_warning': "⚠️ ノード {node_id} の情報が辞書型ではありません",
+ 'broadcast_message_received': "📢 全体向けメッセージ 受信元 {from_id}{name_info}: {short_text}",
+ 'private_message_received': "📩 個別メッセージ 受信元 {from_id}{name_info}: {short_text}",
+ 'rssi_debug': "📶 RSSI: {rssi} dBm",
+ 'snr_debug': "🔊 SNR: {snr} dB",
+ 'position_received': "📍 {from_id} の位置情報を受信しました",
+ 'detailed_position': "詳細な位置情報: {lat:.6f}, {lon:.6f}",
+ 'missing_from_field': "❌ 'from' フィールドがありません",
+ 'position_data_empty': "⚠️ 位置情報データが空です",
+ 'missing_lat_lon': "❌ 緯度・経度が不足しています",
+ 'mention_detected': "🎯 メンションを検出、全体向けメッセージに返信します",
+ 'question_detected': "❓ 質問を検出、全体向けメッセージに返信します",
+ 'keyword_detected': "🔍 キーワード '{keyword}' を検出、全体向けメッセージに返信します",
+ 'ignore_broadcast': "⏭️ 全体向けメッセージを無視(返信条件未達)",
+ 'ai_broadcast_response': "🤖 AIが全体向けメッセージに返信: {response}",
+ 'ai_private_response': "🤖 AIが個別メッセージに返信: {response}",
+ 'ai_processing_failed': "❌ AI処理に失敗しました: {error_msg}",
+ 'message_processing_error': "❌ メッセージ処理中にエラーが発生しました: {error}",
+ 'broadcast_settings_updated': "🔄 全体向けメッセージ処理: {status}",
+ 'keywords_updated': "🔄 全体向けトリガーキーワードを更新: {keywords}",
+ 'processing_failed': "❌ 処理に失敗しました: {error_msg}",
+ 'processing_exception': "❌ 処理中に例外が発生しました。しばらくしてから再試行してください",
+ 'unknown': "不明",
+ 'unknown_type': "不明なタイプ",
+ 'unknown_error': "不明なエラー",
+ 'enabled': "有効",
+ 'disabled': "無効",
+ 'other_message': "その他のメッセージ"
+ },
+ 'ko': {
+ 'config_not_found': "⚠️ 설정 파일 config.json을 찾을 수 없습니다",
+ 'creating_example_config': "📝 예제 설정 파일 생성 중...",
+ 'plz_edit_and_restart': "ℹ️ config.json을 편집하고 프로그램을 재시작하세요",
+ 'bot_running_error': "💥 봇 실행 오류: {err}",
+ 'recieced_sig_closing': "🛑 {sig} 신호 수신, 종료 중...",
+ 'platform_not_found': "❌ 플랫폼 '{platform}' 또는 기본 플랫폼 '{default_platform}'에 대한 설정을 찾을 수 없습니다",
+ 'back_to_ollama': "🫥 내장된 Ollama 클라이언트로 되돌아갑니다",
+
+ 'bot_initializing': "🥳 Mesh AI 봇 초기화 중...",
+ 'ai_client_initialized': "✅ AI 클라이언트가 초기화되었습니다",
+ 'available_models': "✅ 사용 가능한 AI 모델: {model_names}",
+ 'no_models_warning': "⚠️ 사용 가능한 모델을 찾을 수 없습니다. 서비스를 확인하세요",
+ 'model_list_failed': "⚠️ 모델 목록 가져오기 실패: {error}",
+ 'meshtastic_connected': "✅ Meshtastic 연결 성공, 노드 ID: {node_id}",
+ 'node_info_error': "❌ Meshtastic 노드 정보를 가져올 수 없습니다",
+ 'meshtastic_connect_failed': "❌ Meshtastic 연결 실패: {error}",
+ 'connection_established': "🔗 Mesh 장치 연결이 설정되었습니다",
+ 'event_loop_not_running': "⚠️ 이벤트 루프가 실행되지 않아 메시지를 처리할 수 없습니다",
+ 'message_queued': "📩 메시지가 대기열에 추가됨, 발신자: {sender}",
+ 'queue_failed': "❌ 메시지 대기열 추가 실패: {error}",
+ 'queue_processing_error': "❌ 메시지 대기열 처리 중 예외 발생: {error}",
+ 'bot_started': "🚀 Mesh AI 봇이 시작되었습니다, Ctrl+C로 종료...",
+ 'interrupt_received': "🛑 인터럽트 신호 수신, 종료 중...",
+ 'bot_shutting_down': "🔧 Mesh AI 봇을 종료하는 중...",
+ 'meshtastic_closed': "🔌 Meshtastic 연결이 닫혔습니다",
+ 'ai_client_closed': "🧠 AI 클라이언트가 닫혔습니다",
+ 'processing_message': "🔍 {sender}의 메시지 처리 중",
+ 'sending_response': "📤 {target}에게 응답 전송 중",
+ 'response_sent': "✅ 응답 전송 성공",
+ 'send_failed': "❌ 메시지 전송 실패: {error}",
+ 'lock_acquired': "🔒 처리 잠금 획득",
+ 'lock_released': "🔓 처리 잠금 해제",
+
+ 'packet_missing_decoded': "⚠️ 패킷에 'decoded' 필드가 없습니다",
+ 'node_name': "👤 노드 {node_id} 이름: {long_name}",
+ 'node_info_type_warning': "⚠️ 노드 {node_id} 정보가 딕셔너리 유형이 아닙니다",
+ 'broadcast_message_received': "📢 전체 메시지 수신, 발신자 {from_id}{name_info}: {short_text}",
+ 'private_message_received': "📩 개인 메시지 수신, 발신자 {from_id}{name_info}: {short_text}",
+ 'rssi_debug': "📶 RSSI: {rssi} dBm",
+ 'snr_debug': "🔊 SNR: {snr} dB",
+ 'position_received': "📍 {from_id}의 위치 정보 수신",
+ 'detailed_position': "상세 위치: {lat:.6f}, {lon:.6f}",
+ 'missing_from_field': "❌ 'from' 필드 누락",
+ 'position_data_empty': "⚠️ 위치 데이터가 비어 있음",
+ 'missing_lat_lon': "❌ 위도/경도 누락",
+ 'mention_detected': "🎯 멘션 감지됨, 전체 메시지에 응답합니다",
+ 'question_detected': "❓ 질문 감지됨, 전체 메시지에 응답합니다",
+ 'keyword_detected': "🔍 키워드 '{keyword}' 감지됨, 전체 메시지에 응답합니다",
+ 'ignore_broadcast': "⏭️ 전체 메시지 무시됨(응답 조건 미충족)",
+ 'ai_broadcast_response': "🤖 AI 전체 메시지 응답: {response}",
+ 'ai_private_response': "🤖 AI 개인 메시지 응답: {response}",
+ 'ai_processing_failed': "❌ AI 처리 실패: {error_msg}",
+ 'message_processing_error': "❌ 메시지 처리 중 오류 발생: {error}",
+ 'broadcast_settings_updated': "🔄 전체 메시지 처리: {status}",
+ 'keywords_updated': "🔄 전체 메시지 트리거 키워드 업데이트됨: {keywords}",
+ 'processing_failed': "❌ 처리 실패: {error_msg}",
+ 'processing_exception': "❌ 처리 중 예외 발생, 잠시 후 다시 시도하세요",
+ 'unknown': "알 수 없음",
+ 'unknown_type': "알 수 없는 유형",
+ 'unknown_error': "알 수 없는 오류",
+ 'enabled': "활성화",
+ 'disabled': "비활성화",
+ 'other_message': "다른 메시지"
+ },
+ 'ru': {
+ 'config_not_found': "⚠️ Файл конфигурации config.json не найден",
+ 'creating_example_config': "📝 Создание примерного файла конфигурации...",
+ 'plz_edit_and_restart': "ℹ️ Отредактируйте config.json и перезапустите программу",
+ 'bot_running_error': "💥 Ошибка выполнения бота: {err}",
+ 'recieced_sig_closing': "🛑 Получен сигнал {sig}, закрытие...",
+ 'platform_not_found': "❌ Не найдена конфигурация для платформы '{platform}' или платформы по умолчанию '{default_platform}'",
+ 'back_to_ollama': "🫥 Возврат к встроенному клиенту Ollama",
+
+ 'bot_initializing': "🥳 Инициализация бота Mesh AI...",
+ 'ai_client_initialized': "✅ Клиент ИИ инициализирован",
+ 'available_models': "✅ Доступные модели ИИ: {model_names}",
+ 'no_models_warning': "⚠️ Доступные модели не найдены, проверьте службу",
+ 'model_list_failed': "⚠️ Не удалось получить список моделей: {error}",
+ 'meshtastic_connected': "✅ Подключение к Meshtastic успешно, ID узла: {node_id}",
+ 'node_info_error': "❌ Не удалось получить информацию об узле Meshtastic",
+ 'meshtastic_connect_failed': "❌ Не удалось подключиться к Meshtastic: {error}",
+ 'connection_established': "🔗 Подключение к устройству Mesh установлено",
+ 'event_loop_not_running': "⚠️ Цикл событий не запущен, невозможно обрабатывать сообщения",
+ 'message_queued': "📩 Сообщение добавлено в очередь, от: {sender}",
+ 'queue_failed': "❌ Не удалось добавить сообщение в очередь: {error}",
+ 'queue_processing_error': "❌ Ошибка при обработке очереди сообщений: {error}",
+ 'bot_started': "🚀 Бот Mesh AI запущен, нажмите Ctrl+C для выхода...",
+ 'interrupt_received': "🛑 Получен сигнал прерывания, завершение работы...",
+ 'bot_shutting_down': "🔧 Завершение работы бота Mesh AI...",
+ 'meshtastic_closed': "🔌 Подключение к Meshtastic закрыто",
+ 'ai_client_closed': "🧠 Клиент ИИ закрыт",
+ 'processing_message': "🔍 Обработка сообщения от {sender}",
+ 'sending_response': "📤 Отправка ответа на {target}",
+ 'response_sent': "✅ Ответ успешно отправлен",
+ 'send_failed': "❌ Не удалось отправить сообщение: {error}",
+ 'lock_acquired': "🔒 Получена блокировка обработки",
+ 'lock_released': "🔓 Блокировка обработки снята",
+
+ 'packet_missing_decoded': "⚠️ В пакете отсутствует поле 'decoded'",
+ 'node_name': "👤 Узел {node_id} имя: {long_name}",
+ 'node_info_type_warning': "⚠️ Информация об узле {node_id} не является словарём",
+ 'broadcast_message_received': "📢 Трансляция от {from_id}{name_info}: {short_text}",
+ 'private_message_received': "📩 Личное сообщение от {from_id}{name_info}: {short_text}",
+ 'rssi_debug': "📶 RSSI: {rssi} дБм",
+ 'snr_debug': "🔊 SNR: {snr} дБ",
+ 'position_received': "📍 Получено местоположение от {from_id}",
+ 'detailed_position': "Подробное местоположение: {lat:.6f}, {lon:.6f}",
+ 'missing_from_field': "❌ Отсутствует поле 'from'",
+ 'position_data_empty': "⚠️ Данные о местоположении пусты",
+ 'missing_lat_lon': "❌ Отсутствуют широта и долгота",
+ 'mention_detected': "🎯 Обнаружено упоминание, будет дан ответ на трансляцию",
+ 'question_detected': "❓ Обнаружен вопрос, будет дан ответ на трансляцию",
+ 'keyword_detected': "🔍 Обнаружено ключевое слово '{keyword}', будет дан ответ на трансляцию",
+ 'ignore_broadcast': "⏭️ Трансляция проигнорирована (условия ответа не выполнены)",
+ 'ai_broadcast_response': "🤖 Ответ ИИ на трансляцию: {response}",
+ 'ai_private_response': "🤖 Ответ ИИ на личное сообщение: {response}",
+ 'ai_processing_failed': "❌ Сбой обработки ИИ: {error_msg}",
+ 'message_processing_error': "❌ Ошибка обработки сообщения: {error}",
+ 'broadcast_settings_updated': "🔄 Настройки трансляции: {status}",
+ 'keywords_updated': "🔄 Обновлены ключевые слова для трансляции: {keywords}",
+ 'processing_failed': "❌ Сбой обработки: {error_msg}",
+ 'processing_exception': "❌ Исключение при обработке, повторите попытку позже",
+ 'unknown': "Неизвестно",
+ 'unknown_type': "Неизвестный тип",
+ 'unknown_error': "Неизвестная ошибка",
+ 'enabled': "Включено",
+ 'disabled': "Отключено",
+ 'other_message': "Другое сообщение"
+ }
+}
\ No newline at end of file
diff --git a/meshbot/utils/ai_client_factory.py b/meshbot/utils/ai_client_factory.py
index ef8e705..a8eee13 100644
--- a/meshbot/utils/ai_client_factory.py
+++ b/meshbot/utils/ai_client_factory.py
@@ -3,6 +3,7 @@
import logging
from meshbot.config.config_loader import get_ai_client_config, get_platform
+from meshbot.utils.localize import i18n
logger = logging.getLogger(__name__)
@@ -23,9 +24,9 @@ def create_ai_client(platform: str = ""):
# 获取配置,优先使用传入的 platform,否则使用默认 PLATFORM
config = ai_client_config.get(platform) or ai_client_config.get(default_platform)
if not config:
- logger.error(f"未找到平台 '{platform}' 或默认平台 '{default_platform}' 的配置")
+ logger.error(i18n.gettext('platform_not_found', platform = platform, default_platform = default_platform))
# 回退到内置 Ollama 配置
- logger.info("回退到内置 Ollama 客户端")
+ logger.info(i18n.gettext('back_to_ollama'))
from api.ollama_api import AsyncOllamaChatClient
return AsyncOllamaChatClient(default_model="qwen2.5:7b")
@@ -38,16 +39,16 @@ def create_ai_client(platform: str = ""):
kwargs = config["kwargs"].copy()
# 创建实例
- logger.info(f"🤖 创建 {platform} AI 客户端")
+ logger.info(i18n.gettext('ai_client_created', platform = platform))
return client_class(**kwargs)
except (ImportError, AttributeError, KeyError) as e:
logger.error(
- f"无法创建 AI 客户端 ({platform}): {type(e).__name__} - {e},回退到 Ollama"
+ i18n.gettext('ai_client_creation_failed', platform = platform, error_type = type(e).__name__, error_msg = e)
)
try:
from api.ollama_api import AsyncOllamaChatClient
return AsyncOllamaChatClient(default_model="qwen2.5:7b")
except ImportError:
- logger.critical("回退失败:无法导入 AsyncOllamaChatClient")
- raise RuntimeError("AI 客户端初始化失败,且无法回退到 Ollama")
\ No newline at end of file
+ logger.critical(i18n.gettext('fallback_failed'))
+ raise RuntimeError(i18n.gettext('ai_client_init_failed'))
\ No newline at end of file
diff --git a/meshbot/utils/localize.py b/meshbot/utils/localize.py
new file mode 100644
index 0000000..a473be2
--- /dev/null
+++ b/meshbot/utils/localize.py
@@ -0,0 +1,27 @@
+from meshbot.config.config_loader import _config_manager
+from meshbot.localizations.localization import MESSAGES
+
+class I18N:
+ def __init__(self):
+ self.language = ""
+ self.messages = MESSAGES.get(self.language, MESSAGES['zh_CN'])
+
+ def gettext(self, key: str, **kwargs) -> str:
+ """获取本地化消息,支持格式化参数"""
+ if self.language:
+ pass
+ else:
+ self.language = _config_manager.language
+
+ message_template = self.messages.get(key, key)
+
+ # 如果有参数,进行格式化
+ if kwargs:
+ try:
+ return message_template.format(**kwargs)
+ except KeyError as e:
+ return f"[Format error in '{key}': missing {e}]"
+
+ return message_template
+
+i18n = I18N()