diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..c28aad0 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,18 @@ +.git +.github +.codex-tasks +llmdoc +venv +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.env +.env.* +proxy/data +proxy/__pycache__ +mysearch/__pycache__ +openclaw/__pycache__ +tests/__pycache__ +docs/images diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml new file mode 100644 index 0000000..320e0bd --- /dev/null +++ b/.github/workflows/docker-publish.yml @@ -0,0 +1,200 @@ +name: Build and Publish Docker Images + +on: + push: + branches: + - main + tags: + - "v*" + pull_request: + branches: + - main + workflow_dispatch: + +concurrency: + group: docker-publish-${{ github.ref }} + cancel-in-progress: true + +jobs: + verify: + name: Verify proxy runtime and tests + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: pip + cache-dependency-path: | + mysearch/requirements.txt + proxy/requirements.txt + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install -r mysearch/requirements.txt -r proxy/requirements.txt + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "20" + + - name: Run Python test suite + run: python -m unittest discover -s tests + + - name: Run Python syntax checks + run: | + python -m py_compile \ + proxy/server.py \ + mysearch/config.py \ + mysearch/clients.py \ + mysearch/keyring.py \ + mysearch/social_gateway.py \ + openclaw/runtime/mysearch/config.py \ + openclaw/runtime/mysearch/clients.py \ + openclaw/runtime/mysearch/keyring.py + + - name: Run frontend syntax check + run: node --check proxy/static/js/console.js + + docker: + name: Build and publish ${{ matrix.target_label }} image + runs-on: ubuntu-latest + needs: verify + strategy: + fail-fast: false + matrix: + include: + - target: proxy + target_label: proxy + context: ./proxy + dockerfile: ./proxy/Dockerfile + default_image_name: mysearch-proxy + - target: mysearch + target_label: mysearch MCP + context: ./mysearch + dockerfile: ./mysearch/Dockerfile + default_image_name: mysearch-mcp + - target: stack + target_label: all-in-one stack + context: . + dockerfile: ./Dockerfile.stack + default_image_name: mysearch-stack + env: + DOCKERHUB_USERNAME_VAR: ${{ vars.DOCKERHUB_USERNAME }} + DOCKERHUB_USERNAME_SECRET: ${{ secrets.DOCKERHUB_USERNAME }} + IMAGE_NAME_PROXY_VAR: ${{ vars.DOCKERHUB_IMAGE_NAME_PROXY }} + IMAGE_NAME_PROXY_SECRET: ${{ secrets.DOCKERHUB_IMAGE_NAME_PROXY }} + IMAGE_NAME_MYSEARCH_VAR: ${{ vars.DOCKERHUB_IMAGE_NAME_MYSEARCH }} + IMAGE_NAME_MYSEARCH_SECRET: ${{ secrets.DOCKERHUB_IMAGE_NAME_MYSEARCH }} + IMAGE_NAME_STACK_VAR: ${{ vars.DOCKERHUB_IMAGE_NAME_STACK }} + IMAGE_NAME_STACK_SECRET: ${{ secrets.DOCKERHUB_IMAGE_NAME_STACK }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Resolve Docker Hub image repository + id: image + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + TARGET: ${{ matrix.target }} + DEFAULT_IMAGE_NAME: ${{ matrix.default_image_name }} + run: | + trim_single_line() { + printf '%s' "$1" | tr -d '\r\n' | sed 's/^[[:space:]]*//; s/[[:space:]]*$//' + } + + DOCKERHUB_USERNAME="$(trim_single_line "${DOCKERHUB_USERNAME_VAR:-${DOCKERHUB_USERNAME_SECRET:-}}")" + case "${TARGET}" in + proxy) + IMAGE_NAME_RAW="${IMAGE_NAME_PROXY_VAR:-${IMAGE_NAME_PROXY_SECRET:-${DEFAULT_IMAGE_NAME}}}" + ;; + mysearch) + IMAGE_NAME_RAW="${IMAGE_NAME_MYSEARCH_VAR:-${IMAGE_NAME_MYSEARCH_SECRET:-${DEFAULT_IMAGE_NAME}}}" + ;; + stack) + IMAGE_NAME_RAW="${IMAGE_NAME_STACK_VAR:-${IMAGE_NAME_STACK_SECRET:-${DEFAULT_IMAGE_NAME}}}" + ;; + *) + echo "Unsupported target: ${TARGET}" + exit 1 + ;; + esac + + IMAGE_NAME_CLEAN="$(trim_single_line "${IMAGE_NAME_RAW}" | tr '[:upper:]' '[:lower:]')" + + test -n "${DOCKERHUB_USERNAME}" || { + echo "Missing Docker Hub username. Set Actions Variable DOCKERHUB_USERNAME or Secret DOCKERHUB_USERNAME." + exit 1 + } + + test -n "${IMAGE_NAME_CLEAN}" || { + echo "Missing Docker image name. Set Actions Variable DOCKERHUB_IMAGE_NAME." + exit 1 + } + + case "${DOCKERHUB_USERNAME}" in + */*|*:*|*" "*) + echo "Invalid Docker Hub username." + exit 1 + ;; + esac + + case "${IMAGE_NAME_CLEAN}" in + */*|*:*|*" "*) + echo "Invalid Docker image name." + exit 1 + ;; + esac + + if [ "${GITHUB_EVENT_NAME}" != "pull_request" ]; then + test -n "${DOCKERHUB_TOKEN}" || { + echo "Missing Docker Hub token. Set Actions Secret DOCKERHUB_TOKEN." + exit 1 + } + fi + + IMAGE_REPOSITORY="${DOCKERHUB_USERNAME}/${IMAGE_NAME_CLEAN}" + echo "dockerhub_username=${DOCKERHUB_USERNAME}" >> "$GITHUB_OUTPUT" + echo "image_repository=${IMAGE_REPOSITORY}" >> "$GITHUB_OUTPUT" + echo "Resolved Docker image repository: ${IMAGE_REPOSITORY}" + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 + with: + username: ${{ steps.image.outputs.dockerhub_username }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ steps.image.outputs.image_repository }} + tags: | + type=raw,value=latest,enable={{is_default_branch}} + type=ref,event=branch + type=ref,event=tag + type=sha,format=short + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: ${{ matrix.context }} + file: ${{ matrix.dockerfile }} + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.gitignore b/.gitignore index c7433fe..1d49f7e 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,9 @@ openclaw/.venv/ # 本地测试输出 htmlcov/ .coverage + +# 本地 AI 辅助产物 +.ace-tool/ +llmdoc/ +.codex-tasks/ +extract.py diff --git a/Dockerfile.stack b/Dockerfile.stack new file mode 100644 index 0000000..ee080d5 --- /dev/null +++ b/Dockerfile.stack @@ -0,0 +1,23 @@ +FROM python:3.11-slim + +ENV PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 \ + PYTHONPATH=/app \ + MYSEARCH_PROXY_BASE_URL=http://127.0.0.1:9874 \ + MYSEARCH_PROXY_DB_PATH=/data/proxy.db + +WORKDIR /app + +COPY proxy/requirements.txt /tmp/proxy-requirements.txt +COPY mysearch/requirements.txt /tmp/mysearch-requirements.txt +RUN pip install --no-cache-dir -r /tmp/proxy-requirements.txt -r /tmp/mysearch-requirements.txt + +COPY proxy /app/proxy +COPY mysearch /app/mysearch +COPY docker /app/docker + +RUN chmod +x /app/docker/combined-entrypoint.sh /app/mysearch/docker-entrypoint.sh + +EXPOSE 9874 8000 + +CMD ["/app/docker/combined-entrypoint.sh"] diff --git a/README.md b/README.md index 116c618..2d0fd11 100644 --- a/README.md +++ b/README.md @@ -197,26 +197,40 @@ python3 skill/scripts/check_mysearch.py --health-only python3 skill/scripts/check_mysearch.py --web-query "OpenAI latest announcements" ``` -### 路线 B:先部署 Proxy,再让所有客户端复用 +### 路线 B:最简单的单容器部署 ```bash -mkdir -p mysearch-proxy-data - docker run -d \ - --name mysearch-proxy \ + --name mysearch-stack \ --restart unless-stopped \ -p 9874:9874 \ + -p 8000:8000 \ -e ADMIN_PASSWORD=change-me \ - -v $(pwd)/mysearch-proxy-data:/app/data \ - skernelx/mysearch-proxy:latest + -e MYSEARCH_PROXY_BOOTSTRAP_TOKEN=change-me-bootstrap-token \ + -v $(pwd)/mysearch-proxy-data:/data \ + skernelx/mysearch-stack:latest +``` + +部署完成后: + +- `proxy` 控制台:`http://localhost:9874` +- `mysearch` MCP:`http://localhost:8000/mcp` + +单容器镜像里,`proxy` 默认对外监听 `9874`,`mysearch` 默认对外监听 `8000/mcp`;`mysearch` 自己仍然通过容器内 `127.0.0.1:9874` 访问 Proxy。 + +这条链路里不再需要手动先创建 `mysp-` token。容器启动时会通过受限 bootstrap 接口自动创建或复用一个 `mysearch` 代理 token,再交给同容器里的 `mysearch` 运行时使用。 + +### 路线 C:一套 compose 部署 `proxy + mysearch` + +```bash +cd /path/to/MySearch-Proxy +docker compose up -d ``` -部署后: +部署完成后: -1. 登录控制台 -2. 添加 Tavily / Firecrawl / Exa / Social 上游配置 -3. 创建 MySearch 通用 token -4. 把这个 token 填给 `mysearch/.env` 或 OpenClaw skill env +- `proxy` 控制台:`http://localhost:9874` +- `mysearch` MCP:`http://localhost:8000/mcp` ## 目录说明 diff --git a/README_EN.md b/README_EN.md index 85698d2..6c00a91 100644 --- a/README_EN.md +++ b/README_EN.md @@ -474,7 +474,7 @@ docker run -d \ --restart unless-stopped \ -p 9874:9874 \ -e ADMIN_PASSWORD=your-admin-password \ - -v $(pwd)/mysearch-proxy-data:/app/data \ + -v $(pwd)/mysearch-proxy-data:/data \ skernelx/mysearch-proxy:latest ``` diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..56276a8 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,48 @@ +services: + proxy: + build: + context: ./proxy + dockerfile: Dockerfile + ports: + - "${MYSEARCH_PROXY_PORT:-9874}:9874" + environment: + ADMIN_PASSWORD: ${ADMIN_PASSWORD:-change-me} + MYSEARCH_PROXY_DB_PATH: ${MYSEARCH_PROXY_DB_PATH:-/data/proxy.db} + MYSEARCH_PROXY_BOOTSTRAP_TOKEN: ${MYSEARCH_PROXY_BOOTSTRAP_TOKEN:-change-me-bootstrap-token} + STATS_CACHE_TTL_SECONDS: ${STATS_CACHE_TTL_SECONDS:-8} + DASHBOARD_AUTO_SYNC_ON_STATS: ${DASHBOARD_AUTO_SYNC_ON_STATS:-0} + DASHBOARD_BACKGROUND_SYNC_ON_STATS: ${DASHBOARD_BACKGROUND_SYNC_ON_STATS:-1} + DASHBOARD_BACKGROUND_SYNC_MIN_INTERVAL_SECONDS: ${DASHBOARD_BACKGROUND_SYNC_MIN_INTERVAL_SECONDS:-45} + volumes: + - mysearch-proxy-data:/data + restart: unless-stopped + + mysearch: + build: + context: ./mysearch + dockerfile: Dockerfile + depends_on: + - proxy + ports: + - "${MYSEARCH_MCP_PORT:-8000}:8000" + environment: + MYSEARCH_NAME: ${MYSEARCH_NAME:-MySearch} + MYSEARCH_TIMEOUT_SECONDS: ${MYSEARCH_TIMEOUT_SECONDS:-45} + MYSEARCH_PROXY_BASE_URL: ${MYSEARCH_PROXY_BASE_URL:-http://proxy:9874} + MYSEARCH_PROXY_API_KEY: ${MYSEARCH_PROXY_API_KEY:-} + MYSEARCH_PROXY_BOOTSTRAP_TOKEN: ${MYSEARCH_PROXY_BOOTSTRAP_TOKEN:-change-me-bootstrap-token} + MYSEARCH_PROXY_BOOTSTRAP_NAME: ${MYSEARCH_PROXY_BOOTSTRAP_NAME:-docker-mysearch} + MYSEARCH_MCP_HOST: 0.0.0.0 + MYSEARCH_MCP_PORT: 8000 + MYSEARCH_MCP_STREAMABLE_HTTP_PATH: ${MYSEARCH_MCP_STREAMABLE_HTTP_PATH:-/mcp} + MYSEARCH_MCP_SSE_PATH: ${MYSEARCH_MCP_SSE_PATH:-/sse} + MYSEARCH_MCP_STATELESS_HTTP: ${MYSEARCH_MCP_STATELESS_HTTP:-false} + MYSEARCH_MAX_PARALLEL_WORKERS: ${MYSEARCH_MAX_PARALLEL_WORKERS:-4} + MYSEARCH_SEARCH_CACHE_TTL_SECONDS: ${MYSEARCH_SEARCH_CACHE_TTL_SECONDS:-30} + MYSEARCH_EXTRACT_CACHE_TTL_SECONDS: ${MYSEARCH_EXTRACT_CACHE_TTL_SECONDS:-300} + entrypoint: ["/app/mysearch/docker-entrypoint.sh"] + command: ["python", "-m", "mysearch", "--transport", "streamable-http", "--host", "0.0.0.0", "--port", "8000"] + restart: unless-stopped + +volumes: + mysearch-proxy-data: diff --git a/docker/combined-entrypoint.sh b/docker/combined-entrypoint.sh new file mode 100644 index 0000000..107c2f5 --- /dev/null +++ b/docker/combined-entrypoint.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -euo pipefail + +export PYTHONPATH="${PYTHONPATH:-/app}:/app/proxy" +export MYSEARCH_PROXY_BASE_URL="${MYSEARCH_PROXY_BASE_URL:-http://127.0.0.1:9874}" +export MYSEARCH_PROXY_HOST="${MYSEARCH_PROXY_HOST:-0.0.0.0}" + +cleanup() { + local exit_code=$? + if [[ -n "${MCP_PID:-}" ]]; then + kill "${MCP_PID}" 2>/dev/null || true + fi + if [[ -n "${PROXY_PID:-}" ]]; then + kill "${PROXY_PID}" 2>/dev/null || true + fi + wait 2>/dev/null || true + exit "${exit_code}" +} + +trap cleanup EXIT INT TERM + +python -m uvicorn proxy.server:app --host "${MYSEARCH_PROXY_HOST}" --port 9874 & +PROXY_PID=$! + +if [[ -z "${MYSEARCH_PROXY_API_KEY:-}" && -n "${MYSEARCH_PROXY_BOOTSTRAP_TOKEN:-}" ]]; then + export MYSEARCH_PROXY_API_KEY="$( + python /app/mysearch/scripts/bootstrap_proxy_token.py + )" +fi + +python -m mysearch --transport streamable-http --host 0.0.0.0 --port "${MYSEARCH_MCP_PORT:-8000}" & +MCP_PID=$! + +wait -n "${PROXY_PID}" "${MCP_PID}" diff --git a/install.sh b/install.sh index f57a965..9a1eb4b 100755 --- a/install.sh +++ b/install.sh @@ -24,6 +24,7 @@ ENV_KEYS=( MYSEARCH_MCP_SSE_PATH MYSEARCH_MCP_STREAMABLE_HTTP_PATH MYSEARCH_MCP_STATELESS_HTTP + MYSEARCH_TAVILY_MODE MYSEARCH_TAVILY_BASE_URL MYSEARCH_TAVILY_SEARCH_PATH MYSEARCH_TAVILY_EXTRACT_PATH @@ -35,6 +36,15 @@ ENV_KEYS=( MYSEARCH_TAVILY_API_KEYS MYSEARCH_TAVILY_KEYS_FILE MYSEARCH_TAVILY_ACCOUNTS_FILE + MYSEARCH_TAVILY_GATEWAY_BASE_URL + MYSEARCH_TAVILY_GATEWAY_SEARCH_PATH + MYSEARCH_TAVILY_GATEWAY_EXTRACT_PATH + MYSEARCH_TAVILY_GATEWAY_AUTH_MODE + MYSEARCH_TAVILY_GATEWAY_AUTH_HEADER + MYSEARCH_TAVILY_GATEWAY_AUTH_SCHEME + MYSEARCH_TAVILY_GATEWAY_AUTH_FIELD + MYSEARCH_TAVILY_GATEWAY_TOKEN + MYSEARCH_TAVILY_GATEWAY_TOKENS MYSEARCH_FIRECRAWL_BASE_URL MYSEARCH_FIRECRAWL_SEARCH_PATH MYSEARCH_FIRECRAWL_SCRAPE_PATH diff --git a/mysearch/.dockerignore b/mysearch/.dockerignore new file mode 100644 index 0000000..612f126 --- /dev/null +++ b/mysearch/.dockerignore @@ -0,0 +1,14 @@ +__pycache__/ +*.pyc +*.pyo +*.pyd +.Python +.env +.env.* +.venv/ +venv/ +accounts.txt +*.db +README.md +README_EN.md +Dockerfile diff --git a/mysearch/.env.example b/mysearch/.env.example index a27524d..57fb3cd 100644 --- a/mysearch/.env.example +++ b/mysearch/.env.example @@ -23,6 +23,9 @@ MYSEARCH_MCP_STREAMABLE_HTTP_PATH=/mcp MYSEARCH_MCP_STATELESS_HTTP=false # Tavily +MYSEARCH_TAVILY_MODE=official + +# official 模式:自己维护 Tavily 官方 key 池 MYSEARCH_TAVILY_BASE_URL=https://api.tavily.com MYSEARCH_TAVILY_SEARCH_PATH=/search MYSEARCH_TAVILY_EXTRACT_PATH=/extract @@ -34,6 +37,17 @@ MYSEARCH_TAVILY_API_KEY= MYSEARCH_TAVILY_API_KEYS= MYSEARCH_TAVILY_KEYS_FILE=accounts.txt +# gateway 模式:例如 tavily-hikari;建议把 BASE_URL 直接写到 /api/tavily +MYSEARCH_TAVILY_GATEWAY_BASE_URL= +MYSEARCH_TAVILY_GATEWAY_SEARCH_PATH=/search +MYSEARCH_TAVILY_GATEWAY_EXTRACT_PATH=/extract +MYSEARCH_TAVILY_GATEWAY_AUTH_MODE=bearer +MYSEARCH_TAVILY_GATEWAY_AUTH_HEADER=Authorization +MYSEARCH_TAVILY_GATEWAY_AUTH_SCHEME=Bearer +MYSEARCH_TAVILY_GATEWAY_AUTH_FIELD=api_key +MYSEARCH_TAVILY_GATEWAY_TOKEN= +MYSEARCH_TAVILY_GATEWAY_TOKENS= + # Firecrawl MYSEARCH_FIRECRAWL_BASE_URL=https://api.firecrawl.dev MYSEARCH_FIRECRAWL_SEARCH_PATH=/v2/search @@ -79,6 +93,11 @@ MYSEARCH_XAI_MODEL=grok-4.20-beta-latest-non-reasoning # MYSEARCH_XAI_SEARCH_MODE=compatible # MYSEARCH_XAI_API_KEY=your-social-gateway-token +# Tavily gateway 示例: +# MYSEARCH_TAVILY_MODE=gateway +# MYSEARCH_TAVILY_GATEWAY_BASE_URL=http://127.0.0.1:8787/api/tavily +# MYSEARCH_TAVILY_GATEWAY_TOKEN=th-xxxx-xxxxxxxxxxxx + # MySearch 内置 social gateway(可选) SOCIAL_GATEWAY_UPSTREAM_BASE_URL=https://api.x.ai/v1 SOCIAL_GATEWAY_UPSTREAM_RESPONSES_PATH=/responses diff --git a/mysearch/Dockerfile b/mysearch/Dockerfile new file mode 100644 index 0000000..213a1f8 --- /dev/null +++ b/mysearch/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.11-slim + +ENV PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 \ + PYTHONPATH=/app + +WORKDIR /app + +COPY requirements.txt /tmp/mysearch-requirements.txt +RUN pip install --no-cache-dir -r /tmp/mysearch-requirements.txt + +COPY . /app/mysearch +RUN chmod +x /app/mysearch/docker-entrypoint.sh + +EXPOSE 8000 + +ENTRYPOINT ["/app/mysearch/docker-entrypoint.sh"] +CMD ["python", "-m", "mysearch", "--transport", "streamable-http", "--host", "0.0.0.0", "--port", "8000"] diff --git a/mysearch/README.md b/mysearch/README.md index cea1268..111c341 100644 --- a/mysearch/README.md +++ b/mysearch/README.md @@ -67,6 +67,13 @@ MYSEARCH_PROXY_API_KEY=mysp-... 如果你还没有 Proxy,也可以直接连 provider。 +现在 Tavily 也支持显式两种接法: + +- `MYSEARCH_TAVILY_MODE=official` + - 自己导入和轮询 Tavily 官方 key +- `MYSEARCH_TAVILY_MODE=gateway` + - 用上游 gateway token 访问兼容网关,例如 `tavily-hikari` + ## 直连 provider 的最小配置 最小直连通常至少需要: @@ -76,6 +83,23 @@ MYSEARCH_TAVILY_API_KEY=tvly-... MYSEARCH_FIRECRAWL_API_KEY=fc-... ``` +如果你要让 Tavily 走上游 gateway: + +```env +MYSEARCH_TAVILY_MODE=gateway +MYSEARCH_TAVILY_GATEWAY_BASE_URL=http://127.0.0.1:8787/api/tavily +MYSEARCH_TAVILY_GATEWAY_TOKEN=th-xxxx-xxxxxxxxxxxx +MYSEARCH_FIRECRAWL_API_KEY=fc-... +``` + +如果你明确不走上游 gateway,就保持: + +```env +MYSEARCH_TAVILY_MODE=official +MYSEARCH_TAVILY_API_KEYS=tvly-a,tvly-b +MYSEARCH_TAVILY_KEYS_FILE=accounts.txt +``` + 如果你也要接 Exa: ```env @@ -124,6 +148,84 @@ python3 -m venv venv - 如果本机有 `codex` 或 `claude` 命令,就自动注册 `mysearch` MCP - 如果宿主已有 `mysearch` config,会直接复用其中的 `MYSEARCH_*` +## 作为 Docker MCP 服务运行 + +如果你已经把仓库根目录的一套 compose 跑起来: + +```bash +cd /path/to/MySearch-Proxy +docker compose up -d +``` + +这时 `mysearch` 会通过 `MYSEARCH_PROXY_BOOTSTRAP_TOKEN` 自动从 `proxy` 申请或复用自己的 `mysp-` token,不再要求你手动先创建 MySearch 通用 token 才能拉起远程 MCP。 + +默认远程 MCP 地址: + +- `streamableHTTP` + - `http://127.0.0.1:8000/mcp` +- `SSE` + - `http://127.0.0.1:8000/sse` + +如果你部署的是单容器 `mysearch-stack`,容器会同时对外提供 `9874` 控制台和 `8000/mcp`;`mysearch` 自己仍然通过容器内 `127.0.0.1:9874` 回连 Proxy。 + +部署完成后,如果你要让 `Codex` 直接使用这个远程 MCP,最小 `~/.codex/config.toml` 配置是: + +```toml +[mcp_servers.mysearch] +type = "http" +url = "http://127.0.0.1:8000/mcp" +``` + +如果你部署在远程主机: + +```toml +[mcp_servers.mysearch] +type = "http" +url = "https://mysearch.example.com/mcp" +``` + +如果你的远程入口额外套了 Bearer 鉴权,可以继续写成: + +```toml +[mcp_servers.mysearch] +type = "http" +url = "https://mysearch.example.com/mcp" +headers = { Authorization = "Bearer YOUR_MCP_TOKEN" } +``` + +加完配置后重启 `Codex`,再验收: + +```bash +codex mcp get mysearch +python3 skill/scripts/check_mysearch.py --health-only +``` + +如果你只想单独构建 `mysearch` 镜像,也可以: + +```bash +docker build -t mysearch-mcp ./mysearch +docker run --rm -p 8000:8000 \ + -e MYSEARCH_PROXY_BASE_URL=http://:9874 \ + -e MYSEARCH_PROXY_API_KEY=mysp-... \ + mysearch-mcp +``` + +如果你更看重“部署最简单”,还可以直接跑单容器镜像: + +```bash +docker run -d \ + --name mysearch-stack \ + --restart unless-stopped \ + -p 9874:9874 \ + -p 8000:8000 \ + -e ADMIN_PASSWORD=change-me \ + -e MYSEARCH_PROXY_BOOTSTRAP_TOKEN=change-me-bootstrap-token \ + -v $(pwd)/mysearch-proxy-data:/data \ + skernelx/mysearch-stack:latest +``` + +这个镜像会在同一容器里同时启动 `proxy` 和 `mysearch`,并通过内部 bootstrap 接口自动创建或复用 `mysearch` 专用 token。 + ## 推荐验收 ### 1. 看 MCP 是否注册成功 @@ -207,14 +309,24 @@ MySearch 不是单一 provider 的壳。 - 优先 Tavily - `docs / github / pdf` - 优先 Firecrawl -- 补充网页发现 - - 可回退 Exa +- `pricing / changelog / 官方文档` + - 仍按 Firecrawl / 官方结果优先处理,不为凑数默认混入第三方页面 +- 补充网页发现 / 长尾资料 + - Exa 只做补位,不做默认主搜 - `social` - 走 xAI 或 compatible `/social/search` - `extract_url` - Firecrawl 优先,Tavily 回退 - `research` - - 搜索 + 抓取 + 可选 social 补充 + - 一轮小 research:搜索发现 + 正文抓取 + 可选 social 补充 + +补充约束: + +- `web` 与 `news` 使用不同排序口径: + - `web` 更看官方性、页面相关性 + - `news` 更看时效、媒体质量与事件一致性 +- `official / 官方 / 官网`、`docs / pricing / changelog` 一类查询会进入更严格的官方结果模式;如果官方域结果不足,会明确说明,而不是默认拿第三方结果补齐 +- Exa 只在 Tavily / Firecrawl 结果不足、长尾语义查询或显式 fallback 场景下介入 ## Intent 和 Strategy @@ -239,13 +351,13 @@ MySearch 不是单一 provider 的壳。 适合记忆的简单规则: - 想快一点: - - `fast` + - `fast`:单 provider,最小候选池 - 想稳一点: - - `balanced` + - `balanced`:主 provider 为主,按模式补少量候选 - 想多做交叉验证: - - `verify` + - `verify`:扩大候选池并交叉验证,必要时启用 Exa 补位 - 想做小研究: - - `deep` + - `deep`:更偏 `research` 的较大候选池与更多正文抓取 ## 关键环境变量 diff --git a/mysearch/clients.py b/mysearch/clients.py index a42c5b1..4189631 100644 --- a/mysearch/clients.py +++ b/mysearch/clients.py @@ -6,20 +6,27 @@ import hashlib import json import re +import sys import threading import time from concurrent.futures import Future, ThreadPoolExecutor -from dataclasses import dataclass +from dataclasses import dataclass as _dataclass from datetime import date, datetime, time as dt_time, timezone from typing import Any, Callable, Literal from urllib.error import HTTPError, URLError -from urllib.parse import urlparse +from urllib.parse import urlparse, urlunparse from urllib.request import Request, urlopen from mysearch.config import MySearchConfig, ProviderConfig from mysearch.keyring import MySearchKeyRing +def dataclass(*args, **kwargs): + if sys.version_info < (3, 10): + kwargs.pop("slots", None) + return _dataclass(*args, **kwargs) + + SearchMode = Literal["auto", "web", "news", "social", "docs", "research", "github", "pdf"] SearchIntent = Literal[ "auto", @@ -99,6 +106,80 @@ class RouteDecision: tavily_topic: str = "general" firecrawl_categories: list[str] | None = None sources: list[str] | None = None + fallback_chain: list[str] | None = None + result_profile: Literal["off", "web", "news", "resource"] = "off" + allow_exa_rescue: bool = False + + +@dataclass(slots=True) +class SearchRoutePolicy: + key: str + provider: str + fallback_chain: tuple[str, ...] = () + tavily_topic: str = "general" + firecrawl_categories: tuple[str, ...] = () + result_profile: Literal["off", "web", "news", "resource"] = "off" + allow_exa_rescue: bool = False + + +_MODE_PROVIDER_POLICY: dict[str, SearchRoutePolicy] = { + "web": SearchRoutePolicy( + key="web", + provider="tavily", + fallback_chain=("exa", "firecrawl"), + result_profile="web", + allow_exa_rescue=True, + ), + "news": SearchRoutePolicy( + key="news", + provider="tavily", + fallback_chain=("exa",), + tavily_topic="news", + result_profile="news", + allow_exa_rescue=True, + ), + "docs": SearchRoutePolicy( + key="docs", + provider="firecrawl", + fallback_chain=("tavily", "exa"), + firecrawl_categories=("research",), + result_profile="resource", + ), + "github": SearchRoutePolicy( + key="github", + provider="firecrawl", + fallback_chain=("exa", "tavily"), + firecrawl_categories=("github",), + result_profile="resource", + ), + "pdf": SearchRoutePolicy( + key="pdf", + provider="firecrawl", + fallback_chain=("tavily", "exa"), + firecrawl_categories=("pdf",), + result_profile="resource", + ), + "content": SearchRoutePolicy( + key="content", + provider="firecrawl", + fallback_chain=("tavily", "exa"), + result_profile="resource", + ), + "resource": SearchRoutePolicy( + key="resource", + provider="firecrawl", + fallback_chain=("tavily", "exa"), + firecrawl_categories=("research",), + result_profile="resource", + ), + "research": SearchRoutePolicy( + key="research", + provider="tavily", + fallback_chain=("exa", "firecrawl"), + result_profile="web", + allow_exa_rescue=True, + ), +} class MySearchClient: @@ -376,6 +457,8 @@ def _annotate_search_debug( include_content: bool, include_answer: bool, cache_hit: bool, + requested_max_results: int | None = None, + candidate_max_results: int | None = None, ) -> dict[str, Any]: annotated = copy.deepcopy(result) annotated["route_debug"] = { @@ -388,6 +471,17 @@ def _annotate_search_debug( "include_answer": include_answer, "cache_hit": cache_hit, } + if requested_max_results is not None: + annotated["route_debug"]["requested_max_results"] = requested_max_results + if candidate_max_results is not None: + annotated["route_debug"]["candidate_max_results"] = candidate_max_results + evidence = annotated.get("evidence") or {} + if evidence.get("official_mode"): + annotated["route_debug"]["official_mode"] = evidence.get("official_mode") + if "official_filter_applied" in evidence: + annotated["route_debug"]["official_filter_applied"] = bool( + evidence.get("official_filter_applied") + ) return annotated def search( @@ -451,9 +545,18 @@ def search( provider=provider, sources=normalized_sources, include_content=include_content, + include_domains=include_domains, allowed_x_handles=allowed_x_handles, excluded_x_handles=excluded_x_handles, ) + candidate_max_results = self._candidate_result_budget( + requested_max_results=max_results, + strategy=resolved_strategy, + mode=mode, + intent=resolved_intent, + include_domains=include_domains, + route_provider=decision.provider, + ) cacheable = self._should_cache_search( decision=decision, normalized_sources=normalized_sources, @@ -490,6 +593,8 @@ def search( include_content=include_content, include_answer=effective_include_answer, cache_hit=True, + requested_max_results=max_results, + candidate_max_results=candidate_max_results, ) if decision.provider == "hybrid": @@ -560,6 +665,13 @@ def search( "web": web_result, "social": social_result, } + hybrid_result = self._augment_evidence_summary( + hybrid_result, + query=query, + mode=mode, + intent=resolved_intent, + include_domains=include_domains, + ) hybrid_result = self._annotate_search_debug( hybrid_result, provider=provider, @@ -570,6 +682,8 @@ def search( include_content=include_content, include_answer=effective_include_answer, cache_hit=False, + requested_max_results=max_results, + candidate_max_results=candidate_max_results, ) return hybrid_result @@ -578,6 +692,9 @@ def search( decision=decision, sources=normalized_sources, strategy=resolved_strategy, + mode=mode, + intent=resolved_intent, + include_domains=include_domains, ): result = self._search_web_blended( query=query, @@ -585,17 +702,17 @@ def search( intent=resolved_intent, strategy=resolved_strategy, decision=decision, - max_results=max_results, + max_results=candidate_max_results, include_content=include_content, include_answer=effective_include_answer, include_domains=include_domains, exclude_domains=exclude_domains, ) - elif decision.provider in ("tavily", "firecrawl", "exa"): + elif decision.provider in {"tavily", "firecrawl", "exa"}: result, fallback_info = self._search_with_fallback( primary_provider=decision.provider, query=query, - max_results=max_results, + max_results=candidate_max_results, mode=mode, intent=resolved_intent, decision=decision, @@ -623,6 +740,64 @@ def search( else: raise MySearchError(f"Unsupported route decision: {decision.provider}") + if self._should_attempt_exa_rescue( + query=query, + mode=mode, + intent=resolved_intent, + decision=decision, + result=result, + max_results=max_results, + include_domains=include_domains, + ): + result = self._apply_exa_rescue( + query=query, + primary_result=result, + max_results=candidate_max_results, + include_domains=include_domains, + exclude_domains=exclude_domains, + include_content=include_content, + ) + + if self._should_rerank_resource_results(mode=mode, intent=resolved_intent): + reranked_results = self._rerank_resource_results( + query=query, + mode=mode, + results=list(result.get("results") or []), + include_domains=include_domains, + ) + result["results"] = reranked_results + result["citations"] = self._align_citations_with_results( + results=reranked_results, + citations=list(result.get("citations") or []), + ) + elif self._should_rerank_general_results(result_profile=decision.result_profile): + reranked_results = self._rerank_general_results( + query=query, + result_profile=decision.result_profile, + results=list(result.get("results") or []), + include_domains=include_domains, + ) + result["results"] = reranked_results + result["citations"] = self._align_citations_with_results( + results=reranked_results, + citations=list(result.get("citations") or []), + ) + result = self._apply_official_resource_policy( + query=query, + mode=mode, + intent=resolved_intent, + result=result, + include_domains=include_domains, + ) + result = self._trim_search_payload(result, max_results=max_results) + result = self._augment_evidence_summary( + result, + query=query, + mode=mode, + intent=resolved_intent, + include_domains=include_domains, + ) + route_reason = decision.reason if result.get("provider") == "hybrid" and resolved_strategy in {"balanced", "verify", "deep"}: route_reason = f"{route_reason};strategy={resolved_strategy} 已启用 Tavily + Firecrawl 交叉检索" @@ -668,6 +843,8 @@ def search( include_content=include_content, include_answer=effective_include_answer, cache_hit=False, + requested_max_results=max_results, + candidate_max_results=candidate_max_results, ) def extract_url( @@ -816,17 +993,39 @@ def research( query = query.strip() if not query: raise MySearchError("query must not be empty") - - web_mode = "news" if mode == "news" else ("docs" if mode in {"docs", "github", "pdf"} else "web") + resolved_intent = self._resolve_intent( + query=query, + mode=mode, + intent=intent, + sources=["web"], + ) + resolved_strategy = self._resolve_strategy( + mode=mode, + intent=resolved_intent, + strategy=strategy, + sources=["web"], + include_content=False, + ) + research_plan = self._resolve_research_plan( + query=query, + mode=mode, + intent=resolved_intent, + strategy=resolved_strategy, + web_max_results=web_max_results, + social_max_results=social_max_results, + scrape_top_n=scrape_top_n, + include_social=include_social, + include_domains=include_domains, + ) research_tasks: dict[str, Callable[[], Any]] = { "web": lambda: self.search( query=query, - mode=web_mode, - intent=intent, - strategy=strategy, + mode=research_plan["web_mode"], + intent=resolved_intent, + strategy=resolved_strategy, provider="auto", sources=["web"], - max_results=web_max_results, + max_results=research_plan["web_max_results"], include_content=False, include_answer=True, include_domains=include_domains, @@ -840,7 +1039,7 @@ def research( intent="status", provider="auto", sources=["x"], - max_results=social_max_results, + max_results=research_plan["social_max_results"], allowed_x_handles=allowed_x_handles, excluded_x_handles=excluded_x_handles, from_date=from_date, @@ -864,7 +1063,7 @@ def research( if not url or url in urls: continue urls.append(url) - if len(urls) >= scrape_top_n: + if len(urls) >= research_plan["scrape_top_n"]: break pages: list[dict[str, Any]] = [] @@ -907,32 +1106,434 @@ def research( web_search.get("citations") or [], (social.get("citations") or []) if social else [], ) + evidence = self._augment_research_evidence( + query=query, + mode=mode, + intent=web_search.get("intent", intent if intent != "auto" else "factual"), + requested_page_count=len(urls), + pages=pages, + citations=citations, + web_search=web_search, + social=social, + social_error=social_error, + providers_consulted=providers_consulted, + research_plan=research_plan, + ) return { "provider": "hybrid", "query": query, - "intent": web_search.get("intent", intent if intent != "auto" else "factual"), - "strategy": web_search.get("strategy", strategy if strategy != "auto" else "fast"), + "intent": web_search.get("intent", resolved_intent), + "strategy": web_search.get("strategy", resolved_strategy), "web_search": web_search, "pages": pages, "social_search": social, "social_error": social_error, "citations": citations, - "evidence": { - "providers_consulted": providers_consulted, - "web_result_count": len(candidate_results), - "page_count": len([page for page in pages if not page.get("error")]), - "citation_count": len(citations), - "verification": "cross-provider" - if web_provider == "hybrid" or len(providers_consulted) > 1 - else "single-provider", - }, + "evidence": evidence, "notes": [ "默认用 Tavily 做发现,Firecrawl 做正文抓取,X 搜索走 xAI Responses API", "如果某个 provider 没配 key,会保留错误并尽量返回其余部分", ], } + def _resolve_research_plan( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + strategy: SearchStrategy, + web_max_results: int, + social_max_results: int, + scrape_top_n: int, + include_social: bool, + include_domains: list[str] | None, + ) -> dict[str, Any]: + web_mode = "news" if mode == "news" else ("docs" if mode in {"docs", "github", "pdf"} else "web") + planned_web_max = web_max_results + planned_social_max = social_max_results if include_social else 0 + planned_scrape_top_n = scrape_top_n + + if mode in {"docs", "github", "pdf"} or self._should_use_strict_resource_policy( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ): + planned_web_max = max(planned_web_max, 4) + planned_scrape_top_n = max(1, min(planned_scrape_top_n, 2)) + elif mode == "news" or intent in {"news", "status"}: + planned_web_max = min(max(planned_web_max, 6), 8) + planned_scrape_top_n = min(max(planned_scrape_top_n, 4), 5) + if include_social: + planned_social_max = min(max(planned_social_max, 4), 6) + elif intent in {"comparison", "exploratory"} or strategy in {"verify", "deep"}: + planned_web_max = min(max(planned_web_max, 6), 10) + planned_scrape_top_n = min(max(planned_scrape_top_n, 4), 5) + if include_social: + planned_social_max = min(max(planned_social_max, 3), 5) + + return { + "web_mode": web_mode, + "web_max_results": planned_web_max, + "social_max_results": planned_social_max, + "scrape_top_n": planned_scrape_top_n, + } + + def _candidate_result_budget( + self, + *, + requested_max_results: int, + strategy: SearchStrategy, + mode: SearchMode, + intent: ResolvedSearchIntent, + include_domains: list[str] | None, + route_provider: str, + ) -> int: + if route_provider == "xai": + return requested_max_results + + budget = requested_max_results + strategy_floor = { + "fast": requested_max_results, + "balanced": min(max(requested_max_results * 2, requested_max_results + 2), 10), + "verify": min(max(requested_max_results * 3, requested_max_results + 4), 15), + "deep": min(max(requested_max_results * 4, requested_max_results + 6), 20), + } + budget = max(budget, strategy_floor.get(strategy, requested_max_results)) + + if include_domains or self._should_rerank_resource_results(mode=mode, intent=intent): + budget = max(budget, min(max(requested_max_results * 2, requested_max_results + 3), 12)) + + return max(requested_max_results, budget) + + def _trim_search_payload(self, result: dict[str, Any], *, max_results: int) -> dict[str, Any]: + trimmed = dict(result) + results = list(trimmed.get("results") or [])[:max_results] + trimmed["results"] = results + trimmed["citations"] = self._align_citations_with_results( + results=results, + citations=list(trimmed.get("citations") or []), + ) + return trimmed + + def _augment_evidence_summary( + self, + result: dict[str, Any], + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + include_domains: list[str] | None, + ) -> dict[str, Any]: + enriched = dict(result) + evidence = dict(enriched.get("evidence") or {}) + results = list(enriched.get("results") or []) + citations = list(enriched.get("citations") or []) + official_mode = self._resolve_official_result_mode( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ) + providers_consulted = [ + item + for item in ( + evidence.get("providers_consulted") + or [enriched.get("provider", "")] + ) + if item + ] + evidence.setdefault("providers_consulted", providers_consulted) + evidence.setdefault( + "verification", + "cross-provider" if len(set(providers_consulted)) > 1 else "single-provider", + ) + evidence.setdefault("citation_count", len(citations)) + evidence.setdefault("official_mode", official_mode) + evidence.setdefault("official_filter_applied", False) + + source_domains = self._collect_source_domains(results=results, citations=citations) + official_source_count = self._count_official_resource_results( + query=query, + mode=mode, + intent=intent, + results=results, + include_domains=include_domains, + ) + conflicts = self._detect_evidence_conflicts( + mode=mode, + intent=intent, + results=results, + include_domains=include_domains, + source_domains=source_domains, + official_source_count=official_source_count, + providers_consulted=providers_consulted, + official_mode=str(evidence.get("official_mode") or official_mode), + ) + evidence["source_diversity"] = len(source_domains) + evidence["source_domains"] = source_domains[:5] + evidence["official_source_count"] = official_source_count + evidence["third_party_source_count"] = max(len(results) - official_source_count, 0) + evidence["confidence"] = self._estimate_search_confidence( + mode=mode, + intent=intent, + result_count=len(results), + source_domain_count=len(source_domains), + official_source_count=official_source_count, + verification=str(evidence.get("verification") or "single-provider"), + conflicts=conflicts, + official_mode=str(evidence.get("official_mode") or official_mode), + ) + evidence["conflicts"] = conflicts + enriched["evidence"] = evidence + return enriched + + def _resolve_official_result_mode( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + include_domains: list[str] | None, + ) -> str: + if self._should_use_strict_resource_policy( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ): + return "strict" + if self._should_rerank_resource_results(mode=mode, intent=intent): + return "standard" + return "off" + + def _should_use_strict_resource_policy( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + include_domains: list[str] | None, + ) -> bool: + query_lower = query.lower() + if include_domains: + return True + if mode in {"docs", "github", "pdf"}: + return True + if self._looks_like_official_query(query): + return True + if self._looks_like_pricing_query(query_lower): + return True + if self._looks_like_changelog_query(query_lower): + return True + if intent in {"resource", "tutorial"} and self._looks_like_docs_query(query_lower): + return True + return False + + def _looks_like_official_query(self, query: str) -> bool: + query_lower = query.lower() + if re.search(r"\bofficial\b", query_lower): + return True + official_markers = ( + "官网", + "官方", + "原文", + "定价官方", + "官方定价", + "官方价格", + "官方文档", + ) + return any(marker in query for marker in official_markers) + + def _looks_like_pricing_query(self, query_lower: str) -> bool: + keywords = [ + "price", + "pricing", + "plans", + "subscription", + "费用", + "套餐", + "定价", + "价格", + "售价", + ] + return any(keyword in query_lower for keyword in keywords) + + def _looks_like_changelog_query(self, query_lower: str) -> bool: + keywords = [ + "changelog", + "release notes", + "what's new", + "whats new", + "更新日志", + "发布说明", + "变更日志", + "版本更新", + ] + return any(keyword in query_lower for keyword in keywords) + + def _apply_official_resource_policy( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + result: dict[str, Any], + include_domains: list[str] | None, + ) -> dict[str, Any]: + enriched = dict(result) + results = list(enriched.get("results") or []) + citations = list(enriched.get("citations") or []) + official_mode = self._resolve_official_result_mode( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ) + evidence = dict(enriched.get("evidence") or {}) + evidence.setdefault("official_mode", official_mode) + evidence.setdefault("official_filter_applied", False) + evidence.setdefault("official_candidate_count", 0) + if official_mode == "off" or not results: + enriched["evidence"] = evidence + return enriched + + official_candidates = self._collect_official_result_candidates( + query=query, + mode=mode, + results=results, + include_domains=include_domains, + strict_official=official_mode == "strict", + ) + evidence["official_candidate_count"] = len(official_candidates) + if official_mode == "strict" and official_candidates: + evidence["official_filter_applied"] = len(official_candidates) < len(results) + enriched["results"] = official_candidates + enriched["citations"] = self._align_citations_with_results( + results=official_candidates, + citations=citations, + ) + enriched["evidence"] = evidence + return enriched + + def _collect_official_result_candidates( + self, + *, + query: str, + mode: SearchMode, + results: list[dict[str, Any]], + include_domains: list[str] | None, + strict_official: bool, + ) -> list[dict[str, Any]]: + query_tokens = self._query_brand_tokens(query) + candidates: list[dict[str, Any]] = [] + for item in results: + if self._result_matches_official_policy( + item=item, + mode=mode, + query_tokens=query_tokens, + include_domains=include_domains, + strict_official=strict_official, + ): + candidates.append(dict(item)) + return candidates + + def _augment_research_evidence( + self, + *, + query: str, + mode: SearchMode, + intent: str, + requested_page_count: int, + pages: list[dict[str, Any]], + citations: list[dict[str, Any]], + web_search: dict[str, Any], + social: dict[str, Any] | None, + social_error: str, + providers_consulted: list[str], + research_plan: dict[str, Any], + ) -> dict[str, Any]: + successful_pages = [page for page in pages if not page.get("error")] + page_error_count = max(len(pages) - len(successful_pages), 0) + page_success_rate = ( + round(len(successful_pages) / requested_page_count, 2) + if requested_page_count > 0 + else 0.0 + ) + web_evidence = dict(web_search.get("evidence") or {}) + source_domains = self._collect_source_domains( + results=successful_pages, + citations=citations, + ) + conflicts = list(web_evidence.get("conflicts") or []) + if requested_page_count and not successful_pages: + conflicts.append("page-extraction-unavailable") + elif requested_page_count and page_error_count > 0: + conflicts.append("page-extraction-partial") + if social_error: + conflicts.append("social-search-unavailable") + + official_mode = str( + web_evidence.get("official_mode") + or self._resolve_official_result_mode( + query=query, + mode=mode, + intent=str(intent) if isinstance(intent, str) else "factual", + include_domains=None, + ) + ) + confidence = self._estimate_research_confidence( + search_confidence=str(web_evidence.get("confidence") or "low"), + page_success_count=len(successful_pages), + requested_page_count=requested_page_count, + social_present=social is not None, + social_error=bool(social_error), + conflicts=conflicts, + ) + return { + "providers_consulted": providers_consulted, + "web_result_count": len(web_search.get("results") or []), + "page_count": len(successful_pages), + "page_error_count": page_error_count, + "page_success_rate": page_success_rate, + "citation_count": len(citations), + "verification": "cross-provider" + if web_search.get("provider") == "hybrid" or len(providers_consulted) > 1 + else "single-provider", + "source_diversity": len(source_domains), + "source_domains": source_domains[:5], + "official_source_count": int(web_evidence.get("official_source_count") or 0), + "official_mode": official_mode, + "search_confidence": str(web_evidence.get("confidence") or "low"), + "confidence": confidence, + "conflicts": conflicts, + "research_plan": research_plan, + } + + def _estimate_research_confidence( + self, + *, + search_confidence: str, + page_success_count: int, + requested_page_count: int, + social_present: bool, + social_error: bool, + conflicts: list[str], + ) -> str: + if "strict-official-unmet" in conflicts or "page-extraction-unavailable" in conflicts: + return "low" + if search_confidence == "high" and page_success_count > 0 and not social_error: + return "high" + if search_confidence in {"high", "medium"} and ( + page_success_count > 0 or requested_page_count <= 0 or not social_present + ): + return "medium" + if search_confidence == "high": + return "medium" + return "low" if conflicts else "medium" + def _should_request_search_answer( self, *, @@ -966,35 +1567,59 @@ def _route_search( provider: ProviderName, sources: list[str] | None, include_content: bool, + include_domains: list[str] | None, allowed_x_handles: list[str] | None, excluded_x_handles: list[str] | None, ) -> RouteDecision: normalized_sources = sorted(set(sources or ["web"])) - query_lower = query.lower() + policy = self._route_policy_for_request( + query=query, + mode=mode, + intent=intent, + include_content=include_content, + ) if provider != "auto": if provider == "tavily": return RouteDecision( provider="tavily", reason="显式指定 Tavily", - tavily_topic="news" if mode == "news" or intent in {"news", "status"} else "general", + tavily_topic=policy.tavily_topic, + fallback_chain=self._explicit_provider_fallback_chain( + provider=provider, + policy=policy, + ), + result_profile=policy.result_profile, + allow_exa_rescue=policy.allow_exa_rescue and policy.provider == "tavily", ) if provider == "firecrawl": return RouteDecision( provider="firecrawl", reason="显式指定 Firecrawl", - firecrawl_categories=self._firecrawl_categories(mode, intent), + firecrawl_categories=list(policy.firecrawl_categories) + or self._firecrawl_categories(mode, intent), + fallback_chain=self._explicit_provider_fallback_chain( + provider=provider, + policy=policy, + ), + result_profile=policy.result_profile, ) if provider == "exa": return RouteDecision( provider="exa", reason="显式指定 Exa", + fallback_chain=self._explicit_provider_fallback_chain( + provider=provider, + policy=policy, + ), + result_profile=policy.result_profile, ) if provider == "xai": return RouteDecision( provider="xai", reason="显式指定 xAI/X 搜索", sources=normalized_sources, + result_profile="off", ) if normalized_sources == ["web", "x"] or ( @@ -1007,6 +1632,7 @@ def _route_search( provider="xai", reason="社交舆情 / X 搜索更适合走 xAI", sources=["x"], + result_profile="off", ) if allowed_x_handles or excluded_x_handles: @@ -1014,139 +1640,47 @@ def _route_search( provider="xai", reason="检测到 X handle 过滤条件", sources=["x"], + result_profile="off", ) + if policy.key in {"docs", "resource"} and include_domains and self._domains_prefer_firecrawl_discovery(include_domains): + reason = "检测到受限 / 社区域名,优先用 Firecrawl 做站内发现" + elif policy.key in {"docs", "github", "pdf"}: + reason = "文档 / GitHub / PDF 默认走 Firecrawl,页面发现与正文抓取保持一致" + elif policy.key == "content": + reason = "请求里需要正文内容,优先走 Firecrawl" + elif policy.key == "news": + reason = "状态 / 新闻类查询默认走 Tavily" + elif policy.key == "resource": + reason = "resource / docs 查询默认走 Firecrawl" + elif policy.key == "research": + reason = "research 发现阶段默认走 Tavily" + else: + reason = "普通网页检索默认走 Tavily" + return self._decision_from_policy(policy=policy, reason=reason) - if mode in {"docs", "github", "pdf"}: - if include_content: - if not self._provider_can_serve(self.config.firecrawl) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Firecrawl 未配置,文档正文查询回退到 Exa", - ) - return RouteDecision( - provider="firecrawl", - reason="文档正文查询优先走 Firecrawl", - firecrawl_categories=self._firecrawl_categories(mode, intent), - ) - if self._provider_can_serve(self.config.tavily): - return RouteDecision( - provider="tavily", - reason="文档类查询先用 Tavily 做官方页面发现,正文再交给 Firecrawl", - tavily_topic="general", - ) - if not self._provider_can_serve(self.config.firecrawl) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Firecrawl 未配置,文档类查询回退到 Exa", - ) - return RouteDecision( - provider="firecrawl", - reason="文档 / GitHub / PDF 内容优先走 Firecrawl", - firecrawl_categories=self._firecrawl_categories(mode, intent), - ) - - if include_content: - if not self._provider_can_serve(self.config.firecrawl) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Firecrawl 未配置,正文查询回退到 Exa", - ) - return RouteDecision( - provider="firecrawl", - reason="请求里需要正文内容,优先用 Firecrawl search + scrape", - firecrawl_categories=self._firecrawl_categories(mode, intent), - ) - - if intent in {"news", "status"} or mode == "news" or self._looks_like_news_query(query_lower): - if not self._provider_can_serve(self.config.tavily) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Tavily 未配置,新闻 / 状态类查询回退到 Exa", - ) - return RouteDecision( - provider="tavily", - reason="状态 / 新闻类查询默认走 Tavily", - tavily_topic="news", - ) - - if intent == "resource" or self._looks_like_docs_query(query_lower): - if include_content: - if not self._provider_can_serve(self.config.firecrawl) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Firecrawl 未配置,resource 正文查询回退到 Exa", - ) - return RouteDecision( - provider="firecrawl", - reason="resource / docs 正文查询优先走 Firecrawl", - firecrawl_categories=self._firecrawl_categories("docs", intent), - ) - if self._provider_can_serve(self.config.tavily): - return RouteDecision( - provider="tavily", - reason="resource / docs 查询先用 Tavily 做页面发现,正文再交给 Firecrawl", - tavily_topic="general", - ) - if not self._provider_can_serve(self.config.firecrawl) and self._provider_can_serve( - self.config.exa + def _domains_prefer_firecrawl_discovery(self, include_domains: list[str] | None) -> bool: + if not include_domains: + return False + firecrawl_preferred_domains = { + "dev.to", + "juejin.cn", + "linux.do", + "medium.com", + "mp.weixin.qq.com", + "notion.site", + "notion.so", + "substack.com", + "weixin.qq.com", + "zhihu.com", + } + for domain in include_domains: + cleaned_domain = self._clean_hostname(domain) + if any( + self._domain_matches(cleaned_domain, preferred) + for preferred in firecrawl_preferred_domains ): - return RouteDecision( - provider="exa", - reason="Firecrawl 未配置,resource / docs 类查询回退到 Exa", - ) - return RouteDecision( - provider="firecrawl", - reason="resource / docs 类查询优先走 Firecrawl", - firecrawl_categories=self._firecrawl_categories("docs", intent), - ) - - if mode == "research": - if self._provider_can_serve(self.config.tavily): - return RouteDecision( - provider="tavily", - reason="research 模式先用 Tavily 做发现,再按策略决定是否扩展验证", - tavily_topic="general", - ) - if self._provider_can_serve(self.config.exa): - return RouteDecision( - provider="exa", - reason="Tavily 未配置,research 发现阶段回退到 Exa", - ) - if self._provider_can_serve(self.config.firecrawl): - return RouteDecision( - provider="firecrawl", - reason="Tavily / Exa 未配置,research 发现阶段回退到 Firecrawl", - firecrawl_categories=self._firecrawl_categories(mode, intent), - ) - return RouteDecision( - provider="tavily", - reason="research 模式默认走 Tavily(无可用替代)", - tavily_topic="general", - ) - - if not self._provider_can_serve(self.config.tavily) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Tavily 未配置,普通网页检索回退到 Exa", - ) - - return RouteDecision( - provider="tavily", - reason="普通网页检索默认走 Tavily", - tavily_topic="general", - ) + return True + return False def _resolve_intent( self, @@ -1204,6 +1738,81 @@ def _resolve_strategy( return "balanced" return "fast" + def _route_policy_for_request( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + include_content: bool, + ) -> SearchRoutePolicy: + query_lower = query.lower() + if mode == "research": + return _MODE_PROVIDER_POLICY["research"] + if include_content: + return _MODE_PROVIDER_POLICY["content"] + if mode in {"docs", "github", "pdf"}: + return _MODE_PROVIDER_POLICY[mode] + if intent in {"resource", "tutorial"} or self._looks_like_docs_query(query_lower): + return _MODE_PROVIDER_POLICY["resource"] + if intent in {"news", "status"} or mode == "news" or self._looks_like_news_query(query_lower): + return _MODE_PROVIDER_POLICY["news"] + return _MODE_PROVIDER_POLICY["web"] + + def _decision_from_policy( + self, + *, + policy: SearchRoutePolicy, + reason: str, + sources: list[str] | None = None, + ) -> RouteDecision: + provider, fallback_chain = self._resolve_available_policy_chain(policy=policy) + return RouteDecision( + provider=provider, + reason=reason, + tavily_topic=policy.tavily_topic, + firecrawl_categories=list(policy.firecrawl_categories) or None, + sources=sources, + fallback_chain=fallback_chain, + result_profile=policy.result_profile, + allow_exa_rescue=policy.allow_exa_rescue, + ) + + def _resolve_available_policy_chain( + self, + *, + policy: SearchRoutePolicy, + ) -> tuple[ProviderName, list[str] | None]: + ordered: list[ProviderName] = [policy.provider, *policy.fallback_chain] + available: list[ProviderName] = [] + for provider_name in ordered: + config = self._provider_config_for_name(provider_name) + if self._provider_can_serve(config): + available.append(provider_name) + if not available: + return policy.provider, list(policy.fallback_chain) or None + return available[0], list(available[1:]) or None + + def _provider_config_for_name(self, provider_name: ProviderName) -> ProviderConfig: + if provider_name == "tavily": + return self.config.tavily + if provider_name == "firecrawl": + return self.config.firecrawl + if provider_name == "exa": + return self.config.exa + return self.config.xai + + def _explicit_provider_fallback_chain( + self, + *, + provider: ProviderName, + policy: SearchRoutePolicy, + ) -> list[str] | None: + if provider == "xai": + return None + chain = [item for item in policy.fallback_chain if item != provider] + return list(chain) or None + def _should_blend_web_providers( self, *, @@ -1211,6 +1820,9 @@ def _should_blend_web_providers( decision: RouteDecision, sources: list[str], strategy: SearchStrategy, + mode: SearchMode = "auto", + intent: ResolvedSearchIntent = "factual", + include_domains: list[str] | None = None, ) -> bool: if requested_provider != "auto": return False @@ -1220,20 +1832,18 @@ def _should_blend_web_providers( return False if "x" in sources: return False + if mode == "news" or intent in {"news", "status"}: + return False + if include_domains: + return False + if mode in {"docs", "github", "pdf"}: + return False + if intent in {"resource", "tutorial"}: + return False return self._provider_can_serve(self.config.tavily) and self._provider_can_serve( self.config.firecrawl ) - # ------------------------------------------------------------------ - # Provider-level fallback: if the chosen provider fails, try others - # ------------------------------------------------------------------ - - _SEARCH_FALLBACK_CHAIN: dict[str, list[str]] = { - "tavily": ["exa", "firecrawl"], - "firecrawl": ["exa", "tavily"], - "exa": ["firecrawl", "tavily"], - } - def _search_with_fallback( self, *, @@ -1248,8 +1858,7 @@ def _search_with_fallback( include_domains: list[str] | None, exclude_domains: list[str] | None, ) -> tuple[dict[str, Any], dict[str, Any] | None]: - """Try the primary provider; on failure, walk the fallback chain.""" - chain = [primary_provider] + self._SEARCH_FALLBACK_CHAIN.get(primary_provider, []) + chain = [primary_provider, *(decision.fallback_chain or [])] last_error: Exception | None = None for provider_name in chain: try: @@ -1279,9 +1888,7 @@ def _search_with_fallback( except Exception as exc: last_error = MySearchError(f"{provider_name}: {exc}") continue - raise MySearchError( - f"All providers failed for query '{query[:80]}': {last_error}" - ) + raise MySearchError(f"All providers failed for query '{query[:80]}': {last_error}") def _dispatch_single_provider( self, @@ -1297,7 +1904,6 @@ def _dispatch_single_provider( include_domains: list[str] | None, exclude_domains: list[str] | None, ) -> dict[str, Any]: - """Call exactly one search provider by name.""" if provider_name == "tavily": return self._search_tavily( query=query, @@ -1327,6 +1933,246 @@ def _dispatch_single_provider( ) raise MySearchError(f"Unknown provider: {provider_name}") + def _should_attempt_exa_rescue( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + decision: RouteDecision, + result: dict[str, Any], + max_results: int, + include_domains: list[str] | None, + ) -> bool: + if not decision.allow_exa_rescue: + return False + if not self._provider_can_serve(self.config.exa): + return False + if result.get("provider") in {"exa", "xai"}: + return False + if result.get("fallback"): + return False + if include_domains or self._resolve_official_result_mode( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ) == "strict": + return False + results = list(result.get("results") or []) + if len(results) >= min(max_results, 3): + return False + query_terms = re.findall(r"[a-z0-9\u4e00-\u9fff]+", query.lower()) + long_tail_signal = len(query_terms) >= 6 or len(query) >= 48 + return mode == "news" or intent in {"comparison", "exploratory", "tutorial"} or long_tail_signal + + def _apply_exa_rescue( + self, + *, + query: str, + primary_result: dict[str, Any], + max_results: int, + include_domains: list[str] | None, + exclude_domains: list[str] | None, + include_content: bool, + ) -> dict[str, Any]: + exa_result = self._search_exa( + query=query, + max_results=max_results, + include_domains=include_domains, + exclude_domains=exclude_domains, + include_content=include_content, + ) + if not exa_result.get("results"): + return primary_result + + merged = self._merge_search_payloads( + primary_result=primary_result, + secondary_result=exa_result, + max_results=max_results, + ) + return { + "provider": "hybrid", + "route_selected": f"{primary_result.get('provider', 'unknown')}+exa", + "query": query, + "answer": primary_result.get("answer") or exa_result.get("answer", ""), + "results": merged["results"], + "citations": merged["citations"], + "evidence": { + "providers_consulted": [ + item + for item in [primary_result.get("provider"), exa_result.get("provider")] + if item + ], + "matched_results": merged["matched_results"], + "citation_count": len(merged["citations"]), + "verification": "fallback", + }, + "primary_search": primary_result, + "secondary_search": exa_result, + "secondary_error": "", + "fallback": { + "from": primary_result.get("provider", "unknown"), + "to": "exa", + "reason": "primary provider returned sparse results; Exa rescue engaged", + }, + } + + def _should_rerank_general_results( + self, + *, + result_profile: str, + ) -> bool: + return result_profile in {"web", "news"} + + def _rerank_general_results( + self, + *, + query: str, + result_profile: Literal["web", "news"], + results: list[dict[str, Any]], + include_domains: list[str] | None, + ) -> list[dict[str, Any]]: + if len(results) < 2: + return results + ranked = sorted( + enumerate(results), + key=lambda pair: ( + self._general_result_rank( + query=query, + result_profile=result_profile, + item=pair[1], + include_domains=include_domains, + ), + -pair[0], + ), + reverse=True, + ) + return [dict(pair[1]) for pair in ranked] + + def _general_result_rank( + self, + *, + query: str, + result_profile: Literal["web", "news"], + item: dict[str, Any], + include_domains: list[str] | None, + ) -> tuple[int, int, int, int, int, int, int, int]: + if result_profile == "news": + return self._news_result_rank(item=item, include_domains=include_domains) + return self._web_result_rank( + query=query, + item=item, + include_domains=include_domains, + ) + + def _news_result_rank( + self, + *, + item: dict[str, Any], + include_domains: list[str] | None, + ) -> tuple[int, int, int, int, int, int, int, int]: + hostname = self._result_hostname(item) + include_match = int( + bool(include_domains) + and any(self._domain_matches(hostname, domain) for domain in include_domains or []) + ) + mainstream = int(self._is_mainstream_news_domain(hostname)) + article_shape = int(self._looks_like_news_article_result(item)) + has_timestamp = int(self._result_published_timestamp(item) is not None) + timestamp_score = int(self._result_published_timestamp(item) or 0) + content_score, snippet_score, title_score = self._result_quality_score(item) + return ( + include_match, + mainstream, + article_shape, + has_timestamp, + timestamp_score, + content_score, + snippet_score, + title_score, + ) + + def _web_result_rank( + self, + *, + query: str, + item: dict[str, Any], + include_domains: list[str] | None, + ) -> tuple[int, int, int, int, int, int, int, int]: + hostname = self._result_hostname(item) + registered_domain = self._registered_domain(hostname) + title_text = (item.get("title") or "").lower() + query_tokens = self._query_brand_tokens(query) + include_match = int( + bool(include_domains) + and any(self._domain_matches(hostname, domain) for domain in include_domains or []) + ) + registered_domain_label_match = int( + self._registered_domain_label_matches( + registered_domain=registered_domain, + query_tokens=query_tokens, + ) + ) + host_brand_match = int(any(token in hostname for token in query_tokens)) + title_brand_match = int(any(token in title_text for token in query_tokens)) + non_aggregator = int(not self._is_obvious_web_aggregator(registered_domain)) + matched_provider_count = len(item.get("matched_providers") or []) + content_score, snippet_score, title_score = self._result_quality_score(item) + return ( + include_match, + registered_domain_label_match, + host_brand_match, + title_brand_match, + non_aggregator, + matched_provider_count, + content_score, + max(snippet_score, title_score), + ) + + def _result_published_timestamp(self, item: dict[str, Any]) -> float | None: + for field in ("published_date", "publishedDate", "created_at"): + parsed = self._parse_result_timestamp(item.get(field)) + if parsed is not None: + return parsed.timestamp() + return None + + def _is_mainstream_news_domain(self, hostname: str) -> bool: + registered_domain = self._registered_domain(hostname) + mainstream_domains = { + "apnews.com", + "bbc.com", + "bloomberg.com", + "cnn.com", + "ft.com", + "latimes.com", + "nytimes.com", + "reuters.com", + "theguardian.com", + "theverge.com", + "washingtonpost.com", + "wsj.com", + "xinhuanet.com", + } + return registered_domain in mainstream_domains + + def _looks_like_news_article_result(self, item: dict[str, Any]) -> bool: + path = urlparse(item.get("url", "")).path.lower() + return any( + marker in path + for marker in ("/news/", "/story/", "/stories/", "/article/", "/articles/", "/202") + ) + + def _is_obvious_web_aggregator(self, registered_domain: str) -> bool: + return registered_domain in { + "linkedin.com", + "medium.com", + "quora.com", + "reddit.com", + "researchgate.net", + "stackoverflow.com", + } + def _search_web_blended( self, *, @@ -1383,12 +2229,10 @@ def _search_web_blended( } blended_results, blended_errors = self._execute_parallel(tasks, max_workers=2) - primary_failed = "primary" in blended_errors secondary_failed = "secondary" in blended_errors if primary_failed and not secondary_failed: - # Primary down but secondary succeeded — use secondary as sole result primary_result = blended_results["secondary"] primary_result["fallback"] = { "from": decision.provider, @@ -1398,7 +2242,6 @@ def _search_web_blended( secondary_result = None secondary_error = "" elif primary_failed and secondary_failed: - # Both failed — raise with context from both primary_err = str(blended_errors["primary"])[:150] secondary_err = str(blended_errors["secondary"])[:150] raise MySearchError( @@ -1458,6 +2301,55 @@ def _search_tavily( include_content: bool, include_domains: list[str] | None, exclude_domains: list[str] | None, + ) -> dict[str, Any]: + include_domains = [item.strip() for item in (include_domains or []) if item and item.strip()] + exclude_domains = [item.strip() for item in (exclude_domains or []) if item and item.strip()] + + response = self._search_tavily_once( + query=query, + max_results=max_results, + topic=topic, + include_answer=include_answer, + include_content=include_content, + include_domains=include_domains, + exclude_domains=exclude_domains, + ) + if response.get("results") or not include_domains: + return response + + retry_response = self._search_tavily_domain_retry( + query=query, + max_results=max_results, + topic=topic, + include_content=include_content, + include_domains=include_domains, + exclude_domains=exclude_domains, + ) + if retry_response is not None: + return retry_response + + fallback_response = self._search_tavily_domain_fallback( + query=query, + max_results=max_results, + include_content=include_content, + include_domains=include_domains, + exclude_domains=exclude_domains, + ) + if fallback_response is not None: + return fallback_response + + return response + + def _search_tavily_once( + self, + *, + query: str, + max_results: int, + topic: str, + include_answer: bool, + include_content: bool, + include_domains: list[str] | None, + exclude_domains: list[str] | None, ) -> dict[str, Any]: provider = self.config.tavily key = self._get_key_or_raise(provider) @@ -1474,37 +2366,208 @@ def _search_tavily( if exclude_domains: payload["exclude_domains"] = exclude_domains - response = self._request_json( - provider=provider, - method="POST", - path=provider.path("search"), - payload=payload, - key=key.key, + response = self._request_json( + provider=provider, + method="POST", + path=provider.path("search"), + payload=payload, + key=key.key, + ) + results = [ + { + "provider": "tavily", + "source": "web", + "title": item.get("title", ""), + "url": item.get("url", ""), + "snippet": item.get("content", ""), + "content": item.get("raw_content", "") if include_content else "", + "score": item.get("score"), + "published_date": item.get("published_date") + or item.get("publishedDate") + or item.get("published_at") + or item.get("publishedAt") + or "", + } + for item in response.get("results", []) + ] + filtered_results = self._filter_results_by_domains( + results, + include_domains=include_domains, + exclude_domains=exclude_domains, + ) + return { + "provider": "tavily", + "transport": key.source, + "query": response.get("query", query), + "answer": response.get("answer", ""), + "request_id": response.get("request_id", ""), + "response_time": response.get("response_time"), + "results": filtered_results, + "citations": [ + {"title": item.get("title", ""), "url": item.get("url", "")} + for item in filtered_results + if item.get("url") + ], + } + + def _search_tavily_domain_retry( + self, + *, + query: str, + max_results: int, + topic: str, + include_content: bool, + include_domains: list[str], + exclude_domains: list[str] | None, + ) -> dict[str, Any] | None: + per_domain_results = [] + retried_domains: list[str] = [] + for domain in include_domains: + domain_result = self._search_tavily_once( + query=self._build_firecrawl_domain_query( + query=query, + include_domain=domain, + exclude_domains=exclude_domains, + ), + max_results=max_results, + topic=topic, + include_answer=False, + include_content=include_content, + include_domains=None, + exclude_domains=exclude_domains, + ) + filtered_results = self._filter_results_by_domains( + domain_result.get("results", []), + include_domains=[domain], + exclude_domains=exclude_domains, + ) + if not filtered_results: + continue + domain_result = dict(domain_result) + domain_result["results"] = filtered_results + domain_result["citations"] = self._align_citations_with_results( + results=filtered_results, + citations=list(domain_result.get("citations") or []), + ) + per_domain_results.append(domain_result) + retried_domains.append(domain) + + if not per_domain_results: + return None + + merged_results = self._merge_ranked_results( + [result.get("results", []) for result in per_domain_results], + max_results=max_results, + ) + citations = self._align_citations_with_results( + results=merged_results, + citations=self._dedupe_citations( + *[result.get("citations", []) for result in per_domain_results] + ), + ) + return { + "provider": "tavily", + "transport": per_domain_results[0].get("transport", "env"), + "query": query, + "answer": "", + "request_id": "", + "response_time": None, + "results": merged_results, + "citations": citations, + "route_debug": { + "domain_filter_mode": "site_query_retry", + "retried_include_domains": retried_domains, + }, + } + + def _search_tavily_domain_fallback( + self, + *, + query: str, + max_results: int, + include_content: bool, + include_domains: list[str], + exclude_domains: list[str] | None, + ) -> dict[str, Any] | None: + if not self._provider_can_serve(self.config.firecrawl): + return None + + categories = ( + self._firecrawl_categories("docs", "resource") + if self._looks_like_docs_query(query.lower()) or self._looks_like_tutorial_query(query.lower()) + else [] + ) + per_domain_results = [] + citations = [] + seen_urls: set[str] = set() + for domain in include_domains: + domain_result = self._search_firecrawl_once( + query=self._build_firecrawl_domain_query( + query=query, + include_domain=domain, + exclude_domains=exclude_domains, + ), + max_results=max_results, + categories=categories, + include_content=include_content, + ) + if not domain_result.get("results"): + retry_result = self._search_firecrawl_domain_retry( + query=query, + max_results=max_results, + categories=categories, + include_content=include_content, + include_domain=domain, + exclude_domains=exclude_domains, + ) + if retry_result is not None: + domain_result = retry_result + per_domain_results.append(domain_result) + for item in domain_result.get("results", []): + url = item.get("url", "") + if not url or url in seen_urls: + continue + seen_urls.add(url) + citations.append({"title": item.get("title", ""), "url": url}) + + merged_results = self._merge_ranked_results( + [result.get("results", []) for result in per_domain_results], + max_results=max_results, ) + if not merged_results: + return None + return { - "provider": "tavily", - "transport": key.source, - "query": response.get("query", query), - "answer": response.get("answer", ""), - "request_id": response.get("request_id", ""), - "response_time": response.get("response_time"), - "results": [ - { - "provider": "tavily", - "source": "web", - "title": item.get("title", ""), - "url": item.get("url", ""), - "snippet": item.get("content", ""), - "content": item.get("raw_content", "") if include_content else "", - "score": item.get("score"), - } - for item in response.get("results", []) - ], - "citations": [ - {"title": item.get("title", ""), "url": item.get("url", "")} - for item in response.get("results", []) - if item.get("url") - ], + "provider": "hybrid", + "route_selected": "tavily+firecrawl", + "query": query, + "answer": "", + "results": merged_results, + "citations": citations[:max_results], + "primary_search": { + "provider": "tavily", + "query": query, + "results": [], + "citations": [], + }, + "secondary_search": { + "provider": "firecrawl", + "query": query, + "results": merged_results, + "citations": citations[:max_results], + }, + "secondary_error": "", + "evidence": { + "providers_consulted": ["tavily", "firecrawl"], + "matched_results": 0, + "citation_count": len(citations[:max_results]), + "verification": "fallback", + }, + "fallback": { + "from": "tavily", + "to": "firecrawl", + "reason": "tavily returned 0 results for domain-filtered search", + }, } def _search_firecrawl( @@ -1734,6 +2797,10 @@ def _search_firecrawl_once( "url": item.get("url", ""), "snippet": item.get("description", "") or item.get("markdown", ""), "content": item.get("markdown", "") if include_content else "", + "published_date": item.get("publishedDate") + or item.get("published_date") + or item.get("published_at") + or "", } ) @@ -2213,6 +3280,7 @@ def _build_xai_responses_payload( to_date: str | None, include_x_images: bool, include_x_videos: bool, + model: str | None = None, ) -> dict[str, Any]: tools: list[dict[str, Any]] = [] if "web" in sources: @@ -2244,7 +3312,7 @@ def _build_xai_responses_payload( augmented_query = f"{query}\n\nReturn up to {max_results} relevant results with concise sourcing." return { - "model": self.config.xai_model, + "model": (model or self.config.xai_model).strip(), "input": [ { "role": "user", @@ -2528,6 +3596,7 @@ def _rerank_resource_results( return results query_tokens = self._query_brand_tokens(query) + strict_official = bool(include_domains) or self._looks_like_official_query(query) ranked = sorted( enumerate(results), key=lambda pair: ( @@ -2536,6 +3605,7 @@ def _rerank_resource_results( item=pair[1], query_tokens=query_tokens, include_domains=include_domains, + strict_official=strict_official, ), -pair[0], ), @@ -2550,43 +3620,44 @@ def _resource_result_rank( item: dict[str, Any], query_tokens: list[str], include_domains: list[str] | None, - ) -> tuple[int, int, int, int, int, int, int, int, int, int, int]: - url = item.get("url", "") - hostname = self._result_hostname(item) - registered_domain = self._registered_domain(hostname) - title_text = (item.get("title") or "").lower() - include_match = int( - bool(include_domains) - and any(self._domain_matches(hostname, domain) for domain in include_domains or []) - ) - host_brand_match = int( - any(token in hostname or token in registered_domain for token in query_tokens) - ) - title_brand_match = int(any(token in title_text for token in query_tokens)) - docs_shape_match = int( - self._looks_like_resource_result( - url=url, - hostname=hostname, - title_text=title_text, - mode=mode, - ) + strict_official: bool, + ) -> tuple[int, int, int, int, int, int, int, int, int, int, int, int, int]: + flags = self._resource_result_flags( + mode=mode, + item=item, + query_tokens=query_tokens, + include_domains=include_domains, ) + include_match = int(flags["include_match"]) + host_brand_match = int(flags["host_brand_match"]) + registered_domain_label_match = int(flags["registered_domain_label_match"]) + title_brand_match = int(flags["title_brand_match"]) + docs_shape_match = int(flags["docs_shape_match"]) github_bonus = int( mode == "github" - and hostname in {"github.com", "raw.githubusercontent.com"} + and flags["hostname"] in {"github.com", "raw.githubusercontent.com"} ) - pdf_bonus = int(mode == "pdf" and self._looks_like_pdf_url(url)) - non_third_party = int( - not self._is_obvious_third_party_resource( - hostname=hostname, - registered_domain=registered_domain, + pdf_bonus = int(mode == "pdf" and self._looks_like_pdf_url(item.get("url", ""))) + non_third_party = int(flags["non_third_party"]) + official_resource_match = int( + self._is_probably_official_resource_result( mode=mode, + hostname=str(flags["hostname"]), + include_match=bool(include_match), + registered_domain_label_match=bool(registered_domain_label_match), + host_brand_match=bool(host_brand_match), + title_brand_match=bool(title_brand_match), + docs_shape_match=bool(docs_shape_match), + non_third_party=bool(non_third_party), + official_query=strict_official, ) ) matched_provider_count = len(item.get("matched_providers") or []) content_score, snippet_score, title_score = self._result_quality_score(item) return ( include_match, + official_resource_match, + registered_domain_label_match, github_bonus, pdf_bonus, host_brand_match, @@ -2599,6 +3670,38 @@ def _resource_result_rank( title_score, ) + def _is_probably_official_resource_result( + self, + *, + mode: SearchMode, + hostname: str, + include_match: bool, + registered_domain_label_match: bool, + host_brand_match: bool, + title_brand_match: bool, + docs_shape_match: bool, + non_third_party: bool, + official_query: bool, + ) -> bool: + if include_match: + return True + if mode in {"github", "pdf"}: + return True + if not non_third_party: + return False + if official_query and registered_domain_label_match: + return True + if not docs_shape_match: + return False + official_host_surface = any( + part in {"api", "developer", "developers", "docs", "help", "platform", "reference", "support"} + for part in hostname.split(".") + if part + ) + return registered_domain_label_match or (host_brand_match and official_host_surface) or ( + title_brand_match and official_host_surface + ) + def _align_citations_with_results( self, *, @@ -2712,6 +3815,87 @@ def _domain_matches(self, hostname: str, domain: str) -> bool: cleaned_host == cleaned_domain or cleaned_host.endswith(f".{cleaned_domain}") ) + def _registered_domain_label_matches(self, *, registered_domain: str, query_tokens: list[str]) -> bool: + labels = [item for item in self._clean_hostname(registered_domain).split(".") if item] + return any( + label == token or label.startswith(f"{token}-") or label.startswith(f"{token}_") + for token in query_tokens + for label in labels + ) + + def _resource_result_flags( + self, + *, + mode: SearchMode, + item: dict[str, Any], + query_tokens: list[str], + include_domains: list[str] | None, + ) -> dict[str, Any]: + url = item.get("url", "") + hostname = self._result_hostname(item) + registered_domain = self._registered_domain(hostname) + title_text = (item.get("title") or "").lower() + include_match = bool( + include_domains + and any(self._domain_matches(hostname, domain) for domain in include_domains or []) + ) + host_brand_match = any( + token in hostname or token in registered_domain for token in query_tokens + ) + registered_domain_label_match = self._registered_domain_label_matches( + registered_domain=registered_domain, + query_tokens=query_tokens, + ) + title_brand_match = any(token in title_text for token in query_tokens) + docs_shape_match = self._looks_like_resource_result( + url=url, + hostname=hostname, + title_text=title_text, + mode=mode, + ) + non_third_party = not self._is_obvious_third_party_resource( + hostname=hostname, + registered_domain=registered_domain, + mode=mode, + ) + return { + "hostname": hostname, + "registered_domain": registered_domain, + "include_match": include_match, + "host_brand_match": host_brand_match, + "registered_domain_label_match": registered_domain_label_match, + "title_brand_match": title_brand_match, + "docs_shape_match": docs_shape_match, + "non_third_party": non_third_party, + } + + def _result_matches_official_policy( + self, + *, + item: dict[str, Any], + mode: SearchMode, + query_tokens: list[str], + include_domains: list[str] | None, + strict_official: bool, + ) -> bool: + flags = self._resource_result_flags( + mode=mode, + item=item, + query_tokens=query_tokens, + include_domains=include_domains, + ) + return self._is_probably_official_resource_result( + mode=mode, + hostname=str(flags["hostname"]), + include_match=bool(flags["include_match"]), + registered_domain_label_match=bool(flags["registered_domain_label_match"]), + host_brand_match=bool(flags["host_brand_match"]), + title_brand_match=bool(flags["title_brand_match"]), + docs_shape_match=bool(flags["docs_shape_match"]), + non_third_party=bool(flags["non_third_party"]), + official_query=strict_official, + ) + def _query_brand_tokens(self, query: str) -> list[str]: stopwords = { "a", @@ -2849,6 +4033,115 @@ def _is_obvious_third_party_resource( } return registered_domain in third_party_domains + def _collect_source_domains( + self, + *, + results: list[dict[str, Any]], + citations: list[dict[str, Any]], + ) -> list[str]: + domains: list[str] = [] + seen: set[str] = set() + for item in [*results, *citations]: + if not isinstance(item, dict): + continue + hostname = self._result_hostname(item) + registered_domain = self._registered_domain(hostname) + if not registered_domain or registered_domain in seen: + continue + seen.add(registered_domain) + domains.append(registered_domain) + return domains + + def _count_official_resource_results( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + results: list[dict[str, Any]], + include_domains: list[str] | None, + ) -> int: + official_mode = self._resolve_official_result_mode( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ) + if official_mode == "off" and not self._should_rerank_resource_results(mode=mode, intent=intent): + return 0 + query_tokens = self._query_brand_tokens(query) + strict_official = official_mode == "strict" + official_count = 0 + for item in results: + if self._result_matches_official_policy( + item=item, + mode=mode, + query_tokens=query_tokens, + include_domains=include_domains, + strict_official=strict_official, + ): + official_count += 1 + return official_count + + def _detect_evidence_conflicts( + self, + *, + mode: SearchMode, + intent: ResolvedSearchIntent, + results: list[dict[str, Any]], + include_domains: list[str] | None, + source_domains: list[str], + official_source_count: int, + providers_consulted: list[str], + official_mode: str, + ) -> list[str]: + conflicts: list[str] = [] + if len(source_domains) <= 1 and len(results) > 1: + conflicts.append("low-source-diversity") + if len(set(providers_consulted)) <= 1 and len(source_domains) <= 1 and results: + conflicts.append("single-provider-single-domain") + if self._should_rerank_resource_results(mode=mode, intent=intent): + if results and official_source_count <= 0: + conflicts.append("official-source-not-confirmed") + elif results and official_source_count < len(results): + conflicts.append("mixed-official-and-third-party") + if include_domains and not results: + conflicts.append("domain-filter-returned-empty") + if official_mode == "strict" and results and official_source_count <= 0: + conflicts.append("strict-official-unmet") + return conflicts + + def _estimate_search_confidence( + self, + *, + mode: SearchMode, + intent: ResolvedSearchIntent, + result_count: int, + source_domain_count: int, + official_source_count: int, + verification: str, + conflicts: list[str], + official_mode: str, + ) -> str: + if result_count <= 0: + return "low" + if official_mode == "strict" and official_source_count <= 0: + return "low" + if self._should_rerank_resource_results(mode=mode, intent=intent): + if official_source_count > 0 and "official-source-not-confirmed" not in conflicts: + if ( + verification == "cross-provider" + or (source_domain_count >= 2 and "mixed-official-and-third-party" not in conflicts) + ): + return "high" + return "medium" + return "medium" if source_domain_count >= 2 else "low" + if verification == "cross-provider" and source_domain_count >= 2: + return "high" + if source_domain_count >= 2: + return "medium" + return "low" if conflicts else "medium" + def _describe_provider( self, provider: ProviderConfig, @@ -2858,6 +4151,7 @@ def _describe_provider( return { "base_url": provider.base_url, "alternate_base_urls": provider.alternate_base_urls, + "provider_mode": provider.provider_mode, "auth_mode": provider.auth_mode, "paths": provider.default_paths, "search_mode": provider.search_mode, @@ -2872,6 +4166,15 @@ def _describe_provider( def _get_key_or_raise(self, provider: ProviderConfig): record = self.keyring.get_next(provider.name) if record is None: + if provider.name == "tavily": + raise MySearchError( + "Tavily is not configured. Use " + "MYSEARCH_TAVILY_MODE=gateway with MYSEARCH_TAVILY_GATEWAY_TOKEN " + "to consume an upstream gateway, or keep " + "MYSEARCH_TAVILY_MODE=official and import your own Tavily keys " + "with MYSEARCH_TAVILY_API_KEY / MYSEARCH_TAVILY_API_KEYS / " + "MYSEARCH_TAVILY_KEYS_FILE." + ) if provider.name == "xai": raise MySearchError( "xAI / Social search is not configured; MySearch can still use " @@ -2893,13 +4196,13 @@ def _request_json( provider: ProviderConfig, method: str, path: str, - payload: dict[str, Any], + payload: dict[str, Any] | None, key: str, base_url: str | None = None, timeout_seconds: int | None = None, ) -> dict[str, Any]: headers: dict[str, str] = {} - body = dict(payload) + body = dict(payload or {}) if provider.auth_mode == "bearer": token = key if not provider.auth_scheme else f"{provider.auth_scheme} {key}" @@ -2912,7 +4215,9 @@ def _request_json( url = f"{(base_url or provider.base_url)}{path}" headers.setdefault("Content-Type", "application/json") headers.setdefault("User-Agent", "MySearch/0.2") - request_body = json.dumps(body).encode("utf-8") + request_body = None + if method.upper() != "GET": + request_body = json.dumps(body).encode("utf-8") request = Request( url, data=request_body, @@ -2959,6 +4264,106 @@ def _request_json( ) return data + def _request_text( + self, + *, + url: str, + timeout_seconds: int | None = None, + ) -> tuple[int, str]: + request = Request( + url, + headers={ + "User-Agent": "MySearch/0.2", + "Accept": "text/html,application/json;q=0.9,*/*;q=0.8", + }, + method="GET", + ) + try: + with urlopen(request, timeout=timeout_seconds or self.config.timeout_seconds) as response: + return response.status, response.read().decode("utf-8", errors="replace") + except HTTPError as exc: + return exc.code, exc.read().decode("utf-8", errors="replace") + except (URLError, OSError) as exc: + raise MySearchError(str(exc)) from exc + + def _xai_probe_model(self) -> str: + return "grok-4.1-fast" + + def _derive_root_health_base_url(self, provider: ProviderConfig) -> str: + candidate = ( + provider.base_url_for("social_search") + or provider.base_url_for("social_health") + or provider.base_url + ) + parsed = urlparse(str(candidate or "").strip()) + if not parsed.scheme or not parsed.netloc: + return str(candidate or "").strip().rstrip("/") + return urlunparse((parsed.scheme, parsed.netloc, "", "", "", "")).rstrip("/") + + def _probe_xai_official_status_page(self, timeout_seconds: int) -> None: + status_url = "https://status.x.ai/" + status_code, response_text = self._request_text( + url=status_url, + timeout_seconds=timeout_seconds, + ) + if status_code >= 400: + raise MySearchHTTPError( + provider="xai", + status_code=status_code, + detail=f"status.x.ai returned HTTP {status_code}", + url=status_url, + ) + + lowered = " ".join(response_text.lower().split()) + if "all systems operational" in lowered: + return + + matches = re.findall( + r"api(?:\s*\([^)]*\))?[^a-z]{0,40}(available|operational|degraded|outage|unavailable|disrupted)", + lowered, + ) + if matches: + negative = {"degraded", "outage", "unavailable", "disrupted"} + if any(item in negative for item in matches): + raise MySearchError( + "status.x.ai reports xAI API is not fully available" + ) + return + + if "api" in lowered and "available" in lowered: + return + + raise MySearchError("unable to determine xAI API status from status.x.ai") + + def _probe_xai_official_via_responses( + self, + provider: ProviderConfig, + key: str, + timeout_seconds: int, + ) -> None: + fallback_timeout_seconds = min(self.config.timeout_seconds, 20) + self._request_json( + provider=provider, + method="POST", + path=provider.path("responses"), + payload=self._build_xai_responses_payload( + query="openai", + sources=["x"], + max_results=1, + include_domains=None, + exclude_domains=None, + allowed_x_handles=None, + excluded_x_handles=None, + from_date=None, + to_date=None, + include_x_images=False, + include_x_videos=False, + model=self._xai_probe_model(), + ), + key=key, + timeout_seconds=max(timeout_seconds, fallback_timeout_seconds), + ) + def _probe_provider_status( self, provider: ProviderConfig, @@ -3014,6 +4419,46 @@ def _probe_provider_status( } return result + def _probe_xai_compatible_gateway(self, provider: ProviderConfig, key: str, timeout_seconds: int) -> None: + health_path = "/health" + health_base_url = self._derive_root_health_base_url(provider) + try: + payload = self._request_json( + provider=provider, + method="GET", + path=health_path, + payload=None, + key=key, + base_url=health_base_url, + timeout_seconds=timeout_seconds, + ) + if isinstance(payload, dict) and payload.get("ok") is False: + detail = ( + payload.get("error") + or payload.get("detail") + or "social/X gateway health probe reported unavailable" + ) + raise MySearchError(str(detail)) + return + except (MySearchHTTPError, MySearchError): + pass + + fallback_timeout_seconds = min(self.config.timeout_seconds, 20) + self._request_json( + provider=provider, + method="POST", + path=provider.path("social_search"), + payload={ + "query": "openai", + "source": "x", + "max_results": 1, + "model": self._xai_probe_model(), + }, + key=key, + base_url=provider.base_url_for("social_search"), + timeout_seconds=fallback_timeout_seconds, + ) + def _probe_provider_request(self, provider: ProviderConfig, key: str) -> None: timeout_seconds = min(self.config.timeout_seconds, 10) if provider.name == "tavily": @@ -3061,40 +4506,24 @@ def _probe_provider_request(self, provider: ProviderConfig, key: str) -> None: return if provider.name == "xai": if provider.search_mode == "compatible": - self._request_json( + self._probe_xai_compatible_gateway(provider, key, timeout_seconds) + return + try: + self._probe_xai_official_status_page(timeout_seconds=timeout_seconds) + except MySearchError as exc: + if "not fully available" in str(exc): + raise + self._probe_xai_official_via_responses( + provider=provider, + key=key, + timeout_seconds=timeout_seconds, + ) + except MySearchHTTPError as exc: + self._probe_xai_official_via_responses( provider=provider, - method="POST", - path=provider.path("social_search"), - payload={ - "query": "openai", - "source": "x", - "max_results": 1, - }, key=key, - base_url=provider.base_url_for("social_search"), timeout_seconds=timeout_seconds, ) - return - self._request_json( - provider=provider, - method="POST", - path=provider.path("responses"), - payload=self._build_xai_responses_payload( - query="openai", - sources=["x"], - max_results=1, - include_domains=None, - exclude_domains=None, - allowed_x_handles=None, - excluded_x_handles=None, - from_date=None, - to_date=None, - include_x_images=False, - include_x_videos=False, - ), - key=key, - timeout_seconds=timeout_seconds, - ) return def _summarize_route_error(self, error_text: str) -> str: @@ -3298,13 +4727,11 @@ def _looks_like_docs_query(self, query_lower: str) -> bool: "documentation", "api reference", "changelog", - "pricing", "readme", "github", "manual", "文档", "接口", - "价格", "更新日志", ] return any(keyword in query_lower for keyword in keywords) diff --git a/mysearch/config.py b/mysearch/config.py index 9115dd8..acead3f 100644 --- a/mysearch/config.py +++ b/mysearch/config.py @@ -3,7 +3,8 @@ from __future__ import annotations import os -from dataclasses import dataclass, field +import sys +from dataclasses import dataclass as _dataclass, field from pathlib import Path from typing import Literal @@ -13,9 +14,16 @@ tomllib = None # type: ignore[assignment] +def dataclass(*args, **kwargs): + if sys.version_info < (3, 10): + kwargs.pop("slots", None) + return _dataclass(*args, **kwargs) + + MODULE_DIR = Path(__file__).resolve().parent ROOT_DIR = MODULE_DIR.parent AuthMode = Literal["bearer", "body"] +TavilyMode = Literal["official", "gateway"] XAISearchMode = Literal["official", "compatible"] MCPTransport = Literal["stdio", "sse", "streamable-http"] @@ -196,6 +204,46 @@ def _provider_path( return _normalize_path(default) +def _get_tavily_mode(proxy_base_url: str) -> TavilyMode: + explicit = _get_str("MYSEARCH_TAVILY_MODE") + if explicit: + return explicit # type: ignore[return-value] + if _get_str( + "MYSEARCH_TAVILY_GATEWAY_BASE_URL", + "MYSEARCH_TAVILY_GATEWAY_TOKEN", + "MYSEARCH_TAVILY_GATEWAY_API_KEY", + ) or _get_list("MYSEARCH_TAVILY_GATEWAY_TOKENS", "MYSEARCH_TAVILY_GATEWAY_API_KEYS"): + return "gateway" + return "gateway" if proxy_base_url else "official" + + +def _tavily_gateway_base_url(proxy_base_url: str, default: str) -> str: + explicit = _get_str("MYSEARCH_TAVILY_GATEWAY_BASE_URL") + if explicit: + return _normalize_base_url(explicit) + if proxy_base_url: + return _normalize_base_url(proxy_base_url) + return _normalize_base_url(default) + + +def _tavily_gateway_path( + *, + explicit_name: str, + explicit_gateway_base_url: str, + proxy_base_url: str, + proxy_default: str, + default: str, +) -> str: + explicit = _get_str(explicit_name) + if explicit: + return _normalize_path(explicit) + if explicit_gateway_base_url: + return _normalize_path(default) + if proxy_base_url: + return _normalize_path(proxy_default) + return _normalize_path(default) + + _bootstrap_runtime_env() @@ -209,6 +257,7 @@ class ProviderConfig: auth_field: str default_paths: dict[str, str] alternate_base_urls: dict[str, str] = field(default_factory=dict) + provider_mode: str = "" search_mode: XAISearchMode = "official" api_keys: list[str] = field(default_factory=list) keys_file: Path | None = None @@ -243,6 +292,12 @@ class MySearchConfig: def from_env(cls) -> "MySearchConfig": proxy_base_url = _get_str("MYSEARCH_PROXY_BASE_URL") proxy_api_key = _get_str("MYSEARCH_PROXY_API_KEY") + tavily_mode = _get_tavily_mode(proxy_base_url) + tavily_gateway_base_url = _get_str("MYSEARCH_TAVILY_GATEWAY_BASE_URL") + tavily_gateway_token = _get_str( + "MYSEARCH_TAVILY_GATEWAY_TOKEN", + "MYSEARCH_TAVILY_GATEWAY_API_KEY", + ) return cls( server_name=_get_str("MYSEARCH_NAME", "MYSEARCH_SERVER_NAME", default="MySearch"), timeout_seconds=_get_int("MYSEARCH_TIMEOUT_SECONDS", 45), @@ -263,44 +318,104 @@ def from_env(cls) -> "MySearchConfig": mcp_stateless_http=_get_bool("MYSEARCH_MCP_STATELESS_HTTP", False), tavily=ProviderConfig( name="tavily", - base_url=_provider_base_url( - explicit_names=("MYSEARCH_TAVILY_BASE_URL",), - proxy_base_url=proxy_base_url, - default="https://api.tavily.com", + base_url=( + _tavily_gateway_base_url( + proxy_base_url=proxy_base_url, + default="https://api.tavily.com", + ) + if tavily_mode == "gateway" + else _provider_base_url( + explicit_names=("MYSEARCH_TAVILY_BASE_URL",), + proxy_base_url="", + default="https://api.tavily.com", + ) ), - auth_mode=_get_str( - "MYSEARCH_TAVILY_AUTH_MODE", - default="bearer" if proxy_base_url else "body", + auth_mode=( + _get_str( + "MYSEARCH_TAVILY_GATEWAY_AUTH_MODE", + default="bearer", + ) + if tavily_mode == "gateway" + else _get_str("MYSEARCH_TAVILY_AUTH_MODE", default="body") ), # type: ignore[arg-type] - auth_header=_get_str("MYSEARCH_TAVILY_AUTH_HEADER", default="Authorization"), - auth_scheme=_get_str("MYSEARCH_TAVILY_AUTH_SCHEME", default="Bearer"), - auth_field=_get_str("MYSEARCH_TAVILY_AUTH_FIELD", default="api_key"), + auth_header=( + _get_str("MYSEARCH_TAVILY_GATEWAY_AUTH_HEADER", default="Authorization") + if tavily_mode == "gateway" + else _get_str("MYSEARCH_TAVILY_AUTH_HEADER", default="Authorization") + ), + auth_scheme=( + _get_str("MYSEARCH_TAVILY_GATEWAY_AUTH_SCHEME", default="Bearer") + if tavily_mode == "gateway" + else _get_str("MYSEARCH_TAVILY_AUTH_SCHEME", default="Bearer") + ), + auth_field=( + _get_str("MYSEARCH_TAVILY_GATEWAY_AUTH_FIELD", default="api_key") + if tavily_mode == "gateway" + else _get_str("MYSEARCH_TAVILY_AUTH_FIELD", default="api_key") + ), default_paths={ - "search": _provider_path( - explicit_name="MYSEARCH_TAVILY_SEARCH_PATH", - proxy_base_url=proxy_base_url, - proxy_default="/api/search", - default="/search", + "search": ( + _tavily_gateway_path( + explicit_name="MYSEARCH_TAVILY_GATEWAY_SEARCH_PATH", + explicit_gateway_base_url=tavily_gateway_base_url, + proxy_base_url=proxy_base_url, + proxy_default="/api/search", + default="/search", + ) + if tavily_mode == "gateway" + else _provider_path( + explicit_name="MYSEARCH_TAVILY_SEARCH_PATH", + proxy_base_url="", + proxy_default="/api/search", + default="/search", + ) ), - "extract": _provider_path( - explicit_name="MYSEARCH_TAVILY_EXTRACT_PATH", - proxy_base_url=proxy_base_url, - proxy_default="/api/extract", - default="/extract", + "extract": ( + _tavily_gateway_path( + explicit_name="MYSEARCH_TAVILY_GATEWAY_EXTRACT_PATH", + explicit_gateway_base_url=tavily_gateway_base_url, + proxy_base_url=proxy_base_url, + proxy_default="/api/extract", + default="/extract", + ) + if tavily_mode == "gateway" + else _provider_path( + explicit_name="MYSEARCH_TAVILY_EXTRACT_PATH", + proxy_base_url="", + proxy_default="/api/extract", + default="/extract", + ) ), }, + provider_mode=tavily_mode, api_keys=[ - *_get_list("MYSEARCH_TAVILY_API_KEYS"), + *( + _get_list( + "MYSEARCH_TAVILY_GATEWAY_TOKENS", + "MYSEARCH_TAVILY_GATEWAY_API_KEYS", + ) + if tavily_mode == "gateway" + else _get_list("MYSEARCH_TAVILY_API_KEYS") + ), + *( + [tavily_gateway_token] + if tavily_mode == "gateway" and tavily_gateway_token + else ([proxy_api_key] if tavily_mode == "gateway" and proxy_api_key else []) + ), *( [_get_str("MYSEARCH_TAVILY_API_KEY")] - if _get_str("MYSEARCH_TAVILY_API_KEY") - else ([proxy_api_key] if proxy_api_key else []) + if tavily_mode != "gateway" and _get_str("MYSEARCH_TAVILY_API_KEY") + else [] ), ], - keys_file=_resolve_path( - "MYSEARCH_TAVILY_KEYS_FILE", - "MYSEARCH_TAVILY_ACCOUNTS_FILE", - default_name="accounts.txt", + keys_file=( + None + if tavily_mode == "gateway" + else _resolve_path( + "MYSEARCH_TAVILY_KEYS_FILE", + "MYSEARCH_TAVILY_ACCOUNTS_FILE", + default_name="accounts.txt", + ) ), ), firecrawl=ProviderConfig( @@ -400,11 +515,20 @@ def from_env(cls) -> "MySearchConfig": proxy_default="/social/search", default="/social/search", ), + "social_health": _provider_path( + explicit_name="MYSEARCH_XAI_SOCIAL_HEALTH_PATH", + proxy_base_url=proxy_base_url, + proxy_default="/social/health", + default="/social/health", + ), }, alternate_base_urls={ "social_search": _normalize_base_url( _get_str("MYSEARCH_XAI_SOCIAL_BASE_URL") or proxy_base_url - ) + ), + "social_health": _normalize_base_url( + _get_str("MYSEARCH_XAI_SOCIAL_BASE_URL") or proxy_base_url + ), }, search_mode=_get_str( "MYSEARCH_XAI_SEARCH_MODE", diff --git a/mysearch/docker-entrypoint.sh b/mysearch/docker-entrypoint.sh new file mode 100644 index 0000000..152597a --- /dev/null +++ b/mysearch/docker-entrypoint.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -euo pipefail + +if [[ -z "${MYSEARCH_PROXY_API_KEY:-}" && -n "${MYSEARCH_PROXY_BOOTSTRAP_TOKEN:-}" ]]; then + export MYSEARCH_PROXY_API_KEY="$( + python /app/mysearch/scripts/bootstrap_proxy_token.py + )" +fi + +exec "$@" diff --git a/mysearch/keyring.py b/mysearch/keyring.py index 7331ad7..90864d6 100644 --- a/mysearch/keyring.py +++ b/mysearch/keyring.py @@ -2,12 +2,19 @@ from __future__ import annotations -from dataclasses import dataclass +import sys +from dataclasses import dataclass as _dataclass from threading import Lock from mysearch.config import MySearchConfig, ProviderConfig +def dataclass(*args, **kwargs): + if sys.version_info < (3, 10): + kwargs.pop("slots", None) + return _dataclass(*args, **kwargs) + + @dataclass(frozen=True, slots=True) class KeyRecord: provider: str diff --git a/mysearch/scripts/bootstrap_proxy_token.py b/mysearch/scripts/bootstrap_proxy_token.py new file mode 100644 index 0000000..d95ae79 --- /dev/null +++ b/mysearch/scripts/bootstrap_proxy_token.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import json +import os +import sys +import time +import urllib.error +import urllib.request + + +def _normalize_base_url(value: str) -> str: + return value.rstrip("/") + + +def main() -> int: + base_url = _normalize_base_url(os.environ.get("MYSEARCH_PROXY_BASE_URL", "")) + bootstrap_token = os.environ.get("MYSEARCH_PROXY_BOOTSTRAP_TOKEN", "").strip() + token_name = os.environ.get("MYSEARCH_PROXY_BOOTSTRAP_NAME", "docker-mysearch").strip() or "docker-mysearch" + timeout_seconds = max(1.0, float(os.environ.get("MYSEARCH_PROXY_BOOTSTRAP_TIMEOUT_SECONDS", "60"))) + interval_seconds = max(0.2, float(os.environ.get("MYSEARCH_PROXY_BOOTSTRAP_INTERVAL_SECONDS", "1.5"))) + + if not base_url: + print("Missing MYSEARCH_PROXY_BASE_URL for proxy token bootstrap.", file=sys.stderr) + return 1 + if not bootstrap_token: + print("Missing MYSEARCH_PROXY_BOOTSTRAP_TOKEN for proxy token bootstrap.", file=sys.stderr) + return 1 + + target = f"{base_url}/api/internal/mysearch/token" + payload = json.dumps({"name": token_name}).encode("utf-8") + deadline = time.time() + timeout_seconds + last_error = "proxy token bootstrap did not start" + + while time.time() < deadline: + request = urllib.request.Request( + target, + data=payload, + headers={ + "Content-Type": "application/json", + "Authorization": f"Bearer {bootstrap_token}", + }, + method="POST", + ) + try: + with urllib.request.urlopen(request, timeout=10) as response: + data = json.loads(response.read().decode("utf-8")) + token = (data.get("token") or "").strip() + if token: + print(token) + return 0 + last_error = "bootstrap endpoint returned empty token" + except urllib.error.HTTPError as exc: + detail = exc.read().decode("utf-8", errors="replace").strip() + last_error = f"HTTP {exc.code}: {detail or exc.reason}" + except Exception as exc: # noqa: BLE001 + last_error = str(exc) + time.sleep(interval_seconds) + + print(f"Failed to bootstrap MYSEARCH_PROXY_API_KEY: {last_error}", file=sys.stderr) + return 1 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/mysearch/social_gateway.py b/mysearch/social_gateway.py index 4626d43..e43e929 100644 --- a/mysearch/social_gateway.py +++ b/mysearch/social_gateway.py @@ -66,7 +66,14 @@ def _derive_admin_base_url(upstream_base_url: str) -> str: http_client = httpx.AsyncClient(timeout=60) state_cache: dict[str, Any] = {"expires_at": 0.0, "value": None} -state_lock = asyncio.Lock() +state_lock: asyncio.Lock | None = None + + +def get_state_lock() -> asyncio.Lock: + global state_lock + if state_lock is None: + state_lock = asyncio.Lock() + return state_lock @asynccontextmanager @@ -152,12 +159,27 @@ def mask_secret(value: str) -> str: return f"{value[:6]}***{value[-4:]}" +def unwrap_social_tokens_payload(tokens_payload: Any) -> Any: + if isinstance(tokens_payload, dict): + for key_name in ("tokens", "data", "items", "result", "pools"): + candidate = tokens_payload.get(key_name) + if isinstance(candidate, dict): + return candidate + if isinstance(candidate, list): + return {"default": candidate} + return tokens_payload + if isinstance(tokens_payload, list): + return {"default": tokens_payload} + return {} + + def flatten_social_tokens(tokens_payload: Any) -> list[dict[str, Any]]: flat: list[dict[str, Any]] = [] - if not isinstance(tokens_payload, dict): + normalized = unwrap_social_tokens_payload(tokens_payload) + if not isinstance(normalized, dict): return flat - for pool_name, items in tokens_payload.items(): + for pool_name, items in normalized.items(): if not isinstance(items, list): continue for item in items: @@ -284,7 +306,7 @@ async def resolve_gateway_state(force: bool = False) -> dict[str, Any]: if not force and cached and state_cache.get("expires_at", 0) > now: return cached - async with state_lock: + async with get_state_lock(): now = time.time() cached = state_cache.get("value") if not force and cached and state_cache.get("expires_at", 0) > now: @@ -707,15 +729,22 @@ def has_social_fallback(primary_model: str, fallback_model: str) -> bool: return bool(primary and fallback and fallback != primary) +def effective_social_fallback_threshold(min_results: int, max_results: int) -> int: + configured = max(1, int(min_results or 1)) + requested = max(1, int(max_results or 1)) + return min(configured, requested) + + def should_retry_social_with_fallback( primary_model: str, fallback_model: str, response: dict[str, Any] | None, min_results: int, + max_results: int, ) -> tuple[bool, str]: if not has_social_fallback(primary_model, fallback_model): return False, "" - threshold = max(1, int(min_results or 1)) + threshold = effective_social_fallback_threshold(min_results, max_results) if count_social_results(response) >= threshold: return False, "" return True, "result_count_below_threshold" @@ -750,6 +779,7 @@ def build_social_route_metadata( fallback_model: str, fallback_reason: str, fallback_min_results: int, + requested_max_results: int, ) -> dict[str, Any]: primary_model = attempts[0]["model"] if attempts else "" selected_model = (selected_attempt or {}).get("model") or primary_model @@ -779,7 +809,10 @@ def build_social_route_metadata( "triggered": fallback_attempted, "used": bool(fallback_attempted and selected_model == fallback_target), "reason": fallback_reason or "", - "threshold": max(1, int(fallback_min_results or 1)), + "threshold": effective_social_fallback_threshold( + fallback_min_results, + requested_max_results, + ), "from": primary_model, "to": fallback_target, "selected_model": selected_model, @@ -795,6 +828,7 @@ def attach_social_route_metadata( fallback_model: str, fallback_reason: str, fallback_min_results: int, + requested_max_results: int, ) -> dict[str, Any]: payload = dict(response or {}) tool_usage = dict(payload.get("tool_usage") or {}) @@ -807,6 +841,7 @@ def attach_social_route_metadata( fallback_model=fallback_model, fallback_reason=fallback_reason, fallback_min_results=fallback_min_results, + requested_max_results=requested_max_results, ) return payload @@ -1019,11 +1054,12 @@ async def social_search(request: Request) -> dict[str, Any]: raise HTTPException(status_code=503, detail="Missing social upstream API key") _, max_results = build_upstream_payload(body) attempts = [] + primary_model = str(body.get("model") or state["model"]).strip() or state["model"] primary_attempt = await execute_social_search_attempt( query, body, state, - state["model"], + primary_model, max_results, ) attempts.append(primary_attempt) @@ -1035,10 +1071,11 @@ async def social_search(request: Request) -> dict[str, Any]: if primary_attempt.get("ok"): selected_attempt = primary_attempt should_retry, fallback_reason = should_retry_social_with_fallback( - state["model"], + primary_model, fallback_model, primary_attempt.get("response"), fallback_min_results, + max_results, ) if should_retry: fallback_attempt = await execute_social_search_attempt( @@ -1058,9 +1095,10 @@ async def social_search(request: Request) -> dict[str, Any]: fallback_model=fallback_model, fallback_reason=fallback_reason, fallback_min_results=fallback_min_results, + requested_max_results=max_results, ) - if has_social_fallback(state["model"], fallback_model): + if has_social_fallback(primary_model, fallback_model): fallback_reason = "upstream_error" fallback_attempt = await execute_social_search_attempt( query, @@ -1078,6 +1116,7 @@ async def social_search(request: Request) -> dict[str, Any]: fallback_model=fallback_model, fallback_reason=fallback_reason, fallback_min_results=fallback_min_results, + requested_max_results=max_results, ) detail = fallback_attempt.get("error") or primary_attempt.get("error") or "Social search failed" status_code = fallback_attempt.get("status_code") or primary_attempt.get("status_code") or 502 diff --git a/openclaw/.env.example b/openclaw/.env.example index 5a3af32..68f4b34 100644 --- a/openclaw/.env.example +++ b/openclaw/.env.example @@ -10,6 +10,9 @@ MYSEARCH_PROXY_BASE_URL= MYSEARCH_PROXY_API_KEY= # Tavily +MYSEARCH_TAVILY_MODE=official + +# official 模式:自己维护 Tavily 官方 key 池 MYSEARCH_TAVILY_BASE_URL=https://api.tavily.com MYSEARCH_TAVILY_SEARCH_PATH=/search MYSEARCH_TAVILY_EXTRACT_PATH=/extract @@ -21,6 +24,17 @@ MYSEARCH_TAVILY_API_KEY= MYSEARCH_TAVILY_API_KEYS= MYSEARCH_TAVILY_KEYS_FILE= +# gateway 模式:例如 tavily-hikari;建议把 BASE_URL 直接写到 /api/tavily +MYSEARCH_TAVILY_GATEWAY_BASE_URL= +MYSEARCH_TAVILY_GATEWAY_SEARCH_PATH=/search +MYSEARCH_TAVILY_GATEWAY_EXTRACT_PATH=/extract +MYSEARCH_TAVILY_GATEWAY_AUTH_MODE=bearer +MYSEARCH_TAVILY_GATEWAY_AUTH_HEADER=Authorization +MYSEARCH_TAVILY_GATEWAY_AUTH_SCHEME=Bearer +MYSEARCH_TAVILY_GATEWAY_AUTH_FIELD=api_key +MYSEARCH_TAVILY_GATEWAY_TOKEN= +MYSEARCH_TAVILY_GATEWAY_TOKENS= + # Firecrawl MYSEARCH_FIRECRAWL_BASE_URL=https://api.firecrawl.dev MYSEARCH_FIRECRAWL_SEARCH_PATH=/v2/search @@ -64,3 +78,8 @@ MYSEARCH_XAI_MODEL=grok-4.20-beta-latest-non-reasoning # MYSEARCH_XAI_SOCIAL_BASE_URL=https://your-social-gateway.example.com # MYSEARCH_XAI_SEARCH_MODE=compatible # MYSEARCH_XAI_API_KEY=your-gateway-token + +# Tavily gateway 示例: +# MYSEARCH_TAVILY_MODE=gateway +# MYSEARCH_TAVILY_GATEWAY_BASE_URL=http://127.0.0.1:8787/api/tavily +# MYSEARCH_TAVILY_GATEWAY_TOKEN=th-xxxx-xxxxxxxxxxxx diff --git a/openclaw/public.json b/openclaw/public.json index 14d5088..105151c 100644 --- a/openclaw/public.json +++ b/openclaw/public.json @@ -16,7 +16,10 @@ "MYSEARCH_PROXY_API_KEY" ], "optional_env": [ + "MYSEARCH_TAVILY_MODE", "MYSEARCH_TAVILY_API_KEY", + "MYSEARCH_TAVILY_GATEWAY_BASE_URL", + "MYSEARCH_TAVILY_GATEWAY_TOKEN", "MYSEARCH_FIRECRAWL_API_KEY", "MYSEARCH_EXA_API_KEY", "MYSEARCH_XAI_API_KEY" diff --git a/openclaw/runtime/mysearch/clients.py b/openclaw/runtime/mysearch/clients.py index a42c5b1..4189631 100644 --- a/openclaw/runtime/mysearch/clients.py +++ b/openclaw/runtime/mysearch/clients.py @@ -6,20 +6,27 @@ import hashlib import json import re +import sys import threading import time from concurrent.futures import Future, ThreadPoolExecutor -from dataclasses import dataclass +from dataclasses import dataclass as _dataclass from datetime import date, datetime, time as dt_time, timezone from typing import Any, Callable, Literal from urllib.error import HTTPError, URLError -from urllib.parse import urlparse +from urllib.parse import urlparse, urlunparse from urllib.request import Request, urlopen from mysearch.config import MySearchConfig, ProviderConfig from mysearch.keyring import MySearchKeyRing +def dataclass(*args, **kwargs): + if sys.version_info < (3, 10): + kwargs.pop("slots", None) + return _dataclass(*args, **kwargs) + + SearchMode = Literal["auto", "web", "news", "social", "docs", "research", "github", "pdf"] SearchIntent = Literal[ "auto", @@ -99,6 +106,80 @@ class RouteDecision: tavily_topic: str = "general" firecrawl_categories: list[str] | None = None sources: list[str] | None = None + fallback_chain: list[str] | None = None + result_profile: Literal["off", "web", "news", "resource"] = "off" + allow_exa_rescue: bool = False + + +@dataclass(slots=True) +class SearchRoutePolicy: + key: str + provider: str + fallback_chain: tuple[str, ...] = () + tavily_topic: str = "general" + firecrawl_categories: tuple[str, ...] = () + result_profile: Literal["off", "web", "news", "resource"] = "off" + allow_exa_rescue: bool = False + + +_MODE_PROVIDER_POLICY: dict[str, SearchRoutePolicy] = { + "web": SearchRoutePolicy( + key="web", + provider="tavily", + fallback_chain=("exa", "firecrawl"), + result_profile="web", + allow_exa_rescue=True, + ), + "news": SearchRoutePolicy( + key="news", + provider="tavily", + fallback_chain=("exa",), + tavily_topic="news", + result_profile="news", + allow_exa_rescue=True, + ), + "docs": SearchRoutePolicy( + key="docs", + provider="firecrawl", + fallback_chain=("tavily", "exa"), + firecrawl_categories=("research",), + result_profile="resource", + ), + "github": SearchRoutePolicy( + key="github", + provider="firecrawl", + fallback_chain=("exa", "tavily"), + firecrawl_categories=("github",), + result_profile="resource", + ), + "pdf": SearchRoutePolicy( + key="pdf", + provider="firecrawl", + fallback_chain=("tavily", "exa"), + firecrawl_categories=("pdf",), + result_profile="resource", + ), + "content": SearchRoutePolicy( + key="content", + provider="firecrawl", + fallback_chain=("tavily", "exa"), + result_profile="resource", + ), + "resource": SearchRoutePolicy( + key="resource", + provider="firecrawl", + fallback_chain=("tavily", "exa"), + firecrawl_categories=("research",), + result_profile="resource", + ), + "research": SearchRoutePolicy( + key="research", + provider="tavily", + fallback_chain=("exa", "firecrawl"), + result_profile="web", + allow_exa_rescue=True, + ), +} class MySearchClient: @@ -376,6 +457,8 @@ def _annotate_search_debug( include_content: bool, include_answer: bool, cache_hit: bool, + requested_max_results: int | None = None, + candidate_max_results: int | None = None, ) -> dict[str, Any]: annotated = copy.deepcopy(result) annotated["route_debug"] = { @@ -388,6 +471,17 @@ def _annotate_search_debug( "include_answer": include_answer, "cache_hit": cache_hit, } + if requested_max_results is not None: + annotated["route_debug"]["requested_max_results"] = requested_max_results + if candidate_max_results is not None: + annotated["route_debug"]["candidate_max_results"] = candidate_max_results + evidence = annotated.get("evidence") or {} + if evidence.get("official_mode"): + annotated["route_debug"]["official_mode"] = evidence.get("official_mode") + if "official_filter_applied" in evidence: + annotated["route_debug"]["official_filter_applied"] = bool( + evidence.get("official_filter_applied") + ) return annotated def search( @@ -451,9 +545,18 @@ def search( provider=provider, sources=normalized_sources, include_content=include_content, + include_domains=include_domains, allowed_x_handles=allowed_x_handles, excluded_x_handles=excluded_x_handles, ) + candidate_max_results = self._candidate_result_budget( + requested_max_results=max_results, + strategy=resolved_strategy, + mode=mode, + intent=resolved_intent, + include_domains=include_domains, + route_provider=decision.provider, + ) cacheable = self._should_cache_search( decision=decision, normalized_sources=normalized_sources, @@ -490,6 +593,8 @@ def search( include_content=include_content, include_answer=effective_include_answer, cache_hit=True, + requested_max_results=max_results, + candidate_max_results=candidate_max_results, ) if decision.provider == "hybrid": @@ -560,6 +665,13 @@ def search( "web": web_result, "social": social_result, } + hybrid_result = self._augment_evidence_summary( + hybrid_result, + query=query, + mode=mode, + intent=resolved_intent, + include_domains=include_domains, + ) hybrid_result = self._annotate_search_debug( hybrid_result, provider=provider, @@ -570,6 +682,8 @@ def search( include_content=include_content, include_answer=effective_include_answer, cache_hit=False, + requested_max_results=max_results, + candidate_max_results=candidate_max_results, ) return hybrid_result @@ -578,6 +692,9 @@ def search( decision=decision, sources=normalized_sources, strategy=resolved_strategy, + mode=mode, + intent=resolved_intent, + include_domains=include_domains, ): result = self._search_web_blended( query=query, @@ -585,17 +702,17 @@ def search( intent=resolved_intent, strategy=resolved_strategy, decision=decision, - max_results=max_results, + max_results=candidate_max_results, include_content=include_content, include_answer=effective_include_answer, include_domains=include_domains, exclude_domains=exclude_domains, ) - elif decision.provider in ("tavily", "firecrawl", "exa"): + elif decision.provider in {"tavily", "firecrawl", "exa"}: result, fallback_info = self._search_with_fallback( primary_provider=decision.provider, query=query, - max_results=max_results, + max_results=candidate_max_results, mode=mode, intent=resolved_intent, decision=decision, @@ -623,6 +740,64 @@ def search( else: raise MySearchError(f"Unsupported route decision: {decision.provider}") + if self._should_attempt_exa_rescue( + query=query, + mode=mode, + intent=resolved_intent, + decision=decision, + result=result, + max_results=max_results, + include_domains=include_domains, + ): + result = self._apply_exa_rescue( + query=query, + primary_result=result, + max_results=candidate_max_results, + include_domains=include_domains, + exclude_domains=exclude_domains, + include_content=include_content, + ) + + if self._should_rerank_resource_results(mode=mode, intent=resolved_intent): + reranked_results = self._rerank_resource_results( + query=query, + mode=mode, + results=list(result.get("results") or []), + include_domains=include_domains, + ) + result["results"] = reranked_results + result["citations"] = self._align_citations_with_results( + results=reranked_results, + citations=list(result.get("citations") or []), + ) + elif self._should_rerank_general_results(result_profile=decision.result_profile): + reranked_results = self._rerank_general_results( + query=query, + result_profile=decision.result_profile, + results=list(result.get("results") or []), + include_domains=include_domains, + ) + result["results"] = reranked_results + result["citations"] = self._align_citations_with_results( + results=reranked_results, + citations=list(result.get("citations") or []), + ) + result = self._apply_official_resource_policy( + query=query, + mode=mode, + intent=resolved_intent, + result=result, + include_domains=include_domains, + ) + result = self._trim_search_payload(result, max_results=max_results) + result = self._augment_evidence_summary( + result, + query=query, + mode=mode, + intent=resolved_intent, + include_domains=include_domains, + ) + route_reason = decision.reason if result.get("provider") == "hybrid" and resolved_strategy in {"balanced", "verify", "deep"}: route_reason = f"{route_reason};strategy={resolved_strategy} 已启用 Tavily + Firecrawl 交叉检索" @@ -668,6 +843,8 @@ def search( include_content=include_content, include_answer=effective_include_answer, cache_hit=False, + requested_max_results=max_results, + candidate_max_results=candidate_max_results, ) def extract_url( @@ -816,17 +993,39 @@ def research( query = query.strip() if not query: raise MySearchError("query must not be empty") - - web_mode = "news" if mode == "news" else ("docs" if mode in {"docs", "github", "pdf"} else "web") + resolved_intent = self._resolve_intent( + query=query, + mode=mode, + intent=intent, + sources=["web"], + ) + resolved_strategy = self._resolve_strategy( + mode=mode, + intent=resolved_intent, + strategy=strategy, + sources=["web"], + include_content=False, + ) + research_plan = self._resolve_research_plan( + query=query, + mode=mode, + intent=resolved_intent, + strategy=resolved_strategy, + web_max_results=web_max_results, + social_max_results=social_max_results, + scrape_top_n=scrape_top_n, + include_social=include_social, + include_domains=include_domains, + ) research_tasks: dict[str, Callable[[], Any]] = { "web": lambda: self.search( query=query, - mode=web_mode, - intent=intent, - strategy=strategy, + mode=research_plan["web_mode"], + intent=resolved_intent, + strategy=resolved_strategy, provider="auto", sources=["web"], - max_results=web_max_results, + max_results=research_plan["web_max_results"], include_content=False, include_answer=True, include_domains=include_domains, @@ -840,7 +1039,7 @@ def research( intent="status", provider="auto", sources=["x"], - max_results=social_max_results, + max_results=research_plan["social_max_results"], allowed_x_handles=allowed_x_handles, excluded_x_handles=excluded_x_handles, from_date=from_date, @@ -864,7 +1063,7 @@ def research( if not url or url in urls: continue urls.append(url) - if len(urls) >= scrape_top_n: + if len(urls) >= research_plan["scrape_top_n"]: break pages: list[dict[str, Any]] = [] @@ -907,32 +1106,434 @@ def research( web_search.get("citations") or [], (social.get("citations") or []) if social else [], ) + evidence = self._augment_research_evidence( + query=query, + mode=mode, + intent=web_search.get("intent", intent if intent != "auto" else "factual"), + requested_page_count=len(urls), + pages=pages, + citations=citations, + web_search=web_search, + social=social, + social_error=social_error, + providers_consulted=providers_consulted, + research_plan=research_plan, + ) return { "provider": "hybrid", "query": query, - "intent": web_search.get("intent", intent if intent != "auto" else "factual"), - "strategy": web_search.get("strategy", strategy if strategy != "auto" else "fast"), + "intent": web_search.get("intent", resolved_intent), + "strategy": web_search.get("strategy", resolved_strategy), "web_search": web_search, "pages": pages, "social_search": social, "social_error": social_error, "citations": citations, - "evidence": { - "providers_consulted": providers_consulted, - "web_result_count": len(candidate_results), - "page_count": len([page for page in pages if not page.get("error")]), - "citation_count": len(citations), - "verification": "cross-provider" - if web_provider == "hybrid" or len(providers_consulted) > 1 - else "single-provider", - }, + "evidence": evidence, "notes": [ "默认用 Tavily 做发现,Firecrawl 做正文抓取,X 搜索走 xAI Responses API", "如果某个 provider 没配 key,会保留错误并尽量返回其余部分", ], } + def _resolve_research_plan( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + strategy: SearchStrategy, + web_max_results: int, + social_max_results: int, + scrape_top_n: int, + include_social: bool, + include_domains: list[str] | None, + ) -> dict[str, Any]: + web_mode = "news" if mode == "news" else ("docs" if mode in {"docs", "github", "pdf"} else "web") + planned_web_max = web_max_results + planned_social_max = social_max_results if include_social else 0 + planned_scrape_top_n = scrape_top_n + + if mode in {"docs", "github", "pdf"} or self._should_use_strict_resource_policy( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ): + planned_web_max = max(planned_web_max, 4) + planned_scrape_top_n = max(1, min(planned_scrape_top_n, 2)) + elif mode == "news" or intent in {"news", "status"}: + planned_web_max = min(max(planned_web_max, 6), 8) + planned_scrape_top_n = min(max(planned_scrape_top_n, 4), 5) + if include_social: + planned_social_max = min(max(planned_social_max, 4), 6) + elif intent in {"comparison", "exploratory"} or strategy in {"verify", "deep"}: + planned_web_max = min(max(planned_web_max, 6), 10) + planned_scrape_top_n = min(max(planned_scrape_top_n, 4), 5) + if include_social: + planned_social_max = min(max(planned_social_max, 3), 5) + + return { + "web_mode": web_mode, + "web_max_results": planned_web_max, + "social_max_results": planned_social_max, + "scrape_top_n": planned_scrape_top_n, + } + + def _candidate_result_budget( + self, + *, + requested_max_results: int, + strategy: SearchStrategy, + mode: SearchMode, + intent: ResolvedSearchIntent, + include_domains: list[str] | None, + route_provider: str, + ) -> int: + if route_provider == "xai": + return requested_max_results + + budget = requested_max_results + strategy_floor = { + "fast": requested_max_results, + "balanced": min(max(requested_max_results * 2, requested_max_results + 2), 10), + "verify": min(max(requested_max_results * 3, requested_max_results + 4), 15), + "deep": min(max(requested_max_results * 4, requested_max_results + 6), 20), + } + budget = max(budget, strategy_floor.get(strategy, requested_max_results)) + + if include_domains or self._should_rerank_resource_results(mode=mode, intent=intent): + budget = max(budget, min(max(requested_max_results * 2, requested_max_results + 3), 12)) + + return max(requested_max_results, budget) + + def _trim_search_payload(self, result: dict[str, Any], *, max_results: int) -> dict[str, Any]: + trimmed = dict(result) + results = list(trimmed.get("results") or [])[:max_results] + trimmed["results"] = results + trimmed["citations"] = self._align_citations_with_results( + results=results, + citations=list(trimmed.get("citations") or []), + ) + return trimmed + + def _augment_evidence_summary( + self, + result: dict[str, Any], + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + include_domains: list[str] | None, + ) -> dict[str, Any]: + enriched = dict(result) + evidence = dict(enriched.get("evidence") or {}) + results = list(enriched.get("results") or []) + citations = list(enriched.get("citations") or []) + official_mode = self._resolve_official_result_mode( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ) + providers_consulted = [ + item + for item in ( + evidence.get("providers_consulted") + or [enriched.get("provider", "")] + ) + if item + ] + evidence.setdefault("providers_consulted", providers_consulted) + evidence.setdefault( + "verification", + "cross-provider" if len(set(providers_consulted)) > 1 else "single-provider", + ) + evidence.setdefault("citation_count", len(citations)) + evidence.setdefault("official_mode", official_mode) + evidence.setdefault("official_filter_applied", False) + + source_domains = self._collect_source_domains(results=results, citations=citations) + official_source_count = self._count_official_resource_results( + query=query, + mode=mode, + intent=intent, + results=results, + include_domains=include_domains, + ) + conflicts = self._detect_evidence_conflicts( + mode=mode, + intent=intent, + results=results, + include_domains=include_domains, + source_domains=source_domains, + official_source_count=official_source_count, + providers_consulted=providers_consulted, + official_mode=str(evidence.get("official_mode") or official_mode), + ) + evidence["source_diversity"] = len(source_domains) + evidence["source_domains"] = source_domains[:5] + evidence["official_source_count"] = official_source_count + evidence["third_party_source_count"] = max(len(results) - official_source_count, 0) + evidence["confidence"] = self._estimate_search_confidence( + mode=mode, + intent=intent, + result_count=len(results), + source_domain_count=len(source_domains), + official_source_count=official_source_count, + verification=str(evidence.get("verification") or "single-provider"), + conflicts=conflicts, + official_mode=str(evidence.get("official_mode") or official_mode), + ) + evidence["conflicts"] = conflicts + enriched["evidence"] = evidence + return enriched + + def _resolve_official_result_mode( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + include_domains: list[str] | None, + ) -> str: + if self._should_use_strict_resource_policy( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ): + return "strict" + if self._should_rerank_resource_results(mode=mode, intent=intent): + return "standard" + return "off" + + def _should_use_strict_resource_policy( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + include_domains: list[str] | None, + ) -> bool: + query_lower = query.lower() + if include_domains: + return True + if mode in {"docs", "github", "pdf"}: + return True + if self._looks_like_official_query(query): + return True + if self._looks_like_pricing_query(query_lower): + return True + if self._looks_like_changelog_query(query_lower): + return True + if intent in {"resource", "tutorial"} and self._looks_like_docs_query(query_lower): + return True + return False + + def _looks_like_official_query(self, query: str) -> bool: + query_lower = query.lower() + if re.search(r"\bofficial\b", query_lower): + return True + official_markers = ( + "官网", + "官方", + "原文", + "定价官方", + "官方定价", + "官方价格", + "官方文档", + ) + return any(marker in query for marker in official_markers) + + def _looks_like_pricing_query(self, query_lower: str) -> bool: + keywords = [ + "price", + "pricing", + "plans", + "subscription", + "费用", + "套餐", + "定价", + "价格", + "售价", + ] + return any(keyword in query_lower for keyword in keywords) + + def _looks_like_changelog_query(self, query_lower: str) -> bool: + keywords = [ + "changelog", + "release notes", + "what's new", + "whats new", + "更新日志", + "发布说明", + "变更日志", + "版本更新", + ] + return any(keyword in query_lower for keyword in keywords) + + def _apply_official_resource_policy( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + result: dict[str, Any], + include_domains: list[str] | None, + ) -> dict[str, Any]: + enriched = dict(result) + results = list(enriched.get("results") or []) + citations = list(enriched.get("citations") or []) + official_mode = self._resolve_official_result_mode( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ) + evidence = dict(enriched.get("evidence") or {}) + evidence.setdefault("official_mode", official_mode) + evidence.setdefault("official_filter_applied", False) + evidence.setdefault("official_candidate_count", 0) + if official_mode == "off" or not results: + enriched["evidence"] = evidence + return enriched + + official_candidates = self._collect_official_result_candidates( + query=query, + mode=mode, + results=results, + include_domains=include_domains, + strict_official=official_mode == "strict", + ) + evidence["official_candidate_count"] = len(official_candidates) + if official_mode == "strict" and official_candidates: + evidence["official_filter_applied"] = len(official_candidates) < len(results) + enriched["results"] = official_candidates + enriched["citations"] = self._align_citations_with_results( + results=official_candidates, + citations=citations, + ) + enriched["evidence"] = evidence + return enriched + + def _collect_official_result_candidates( + self, + *, + query: str, + mode: SearchMode, + results: list[dict[str, Any]], + include_domains: list[str] | None, + strict_official: bool, + ) -> list[dict[str, Any]]: + query_tokens = self._query_brand_tokens(query) + candidates: list[dict[str, Any]] = [] + for item in results: + if self._result_matches_official_policy( + item=item, + mode=mode, + query_tokens=query_tokens, + include_domains=include_domains, + strict_official=strict_official, + ): + candidates.append(dict(item)) + return candidates + + def _augment_research_evidence( + self, + *, + query: str, + mode: SearchMode, + intent: str, + requested_page_count: int, + pages: list[dict[str, Any]], + citations: list[dict[str, Any]], + web_search: dict[str, Any], + social: dict[str, Any] | None, + social_error: str, + providers_consulted: list[str], + research_plan: dict[str, Any], + ) -> dict[str, Any]: + successful_pages = [page for page in pages if not page.get("error")] + page_error_count = max(len(pages) - len(successful_pages), 0) + page_success_rate = ( + round(len(successful_pages) / requested_page_count, 2) + if requested_page_count > 0 + else 0.0 + ) + web_evidence = dict(web_search.get("evidence") or {}) + source_domains = self._collect_source_domains( + results=successful_pages, + citations=citations, + ) + conflicts = list(web_evidence.get("conflicts") or []) + if requested_page_count and not successful_pages: + conflicts.append("page-extraction-unavailable") + elif requested_page_count and page_error_count > 0: + conflicts.append("page-extraction-partial") + if social_error: + conflicts.append("social-search-unavailable") + + official_mode = str( + web_evidence.get("official_mode") + or self._resolve_official_result_mode( + query=query, + mode=mode, + intent=str(intent) if isinstance(intent, str) else "factual", + include_domains=None, + ) + ) + confidence = self._estimate_research_confidence( + search_confidence=str(web_evidence.get("confidence") or "low"), + page_success_count=len(successful_pages), + requested_page_count=requested_page_count, + social_present=social is not None, + social_error=bool(social_error), + conflicts=conflicts, + ) + return { + "providers_consulted": providers_consulted, + "web_result_count": len(web_search.get("results") or []), + "page_count": len(successful_pages), + "page_error_count": page_error_count, + "page_success_rate": page_success_rate, + "citation_count": len(citations), + "verification": "cross-provider" + if web_search.get("provider") == "hybrid" or len(providers_consulted) > 1 + else "single-provider", + "source_diversity": len(source_domains), + "source_domains": source_domains[:5], + "official_source_count": int(web_evidence.get("official_source_count") or 0), + "official_mode": official_mode, + "search_confidence": str(web_evidence.get("confidence") or "low"), + "confidence": confidence, + "conflicts": conflicts, + "research_plan": research_plan, + } + + def _estimate_research_confidence( + self, + *, + search_confidence: str, + page_success_count: int, + requested_page_count: int, + social_present: bool, + social_error: bool, + conflicts: list[str], + ) -> str: + if "strict-official-unmet" in conflicts or "page-extraction-unavailable" in conflicts: + return "low" + if search_confidence == "high" and page_success_count > 0 and not social_error: + return "high" + if search_confidence in {"high", "medium"} and ( + page_success_count > 0 or requested_page_count <= 0 or not social_present + ): + return "medium" + if search_confidence == "high": + return "medium" + return "low" if conflicts else "medium" + def _should_request_search_answer( self, *, @@ -966,35 +1567,59 @@ def _route_search( provider: ProviderName, sources: list[str] | None, include_content: bool, + include_domains: list[str] | None, allowed_x_handles: list[str] | None, excluded_x_handles: list[str] | None, ) -> RouteDecision: normalized_sources = sorted(set(sources or ["web"])) - query_lower = query.lower() + policy = self._route_policy_for_request( + query=query, + mode=mode, + intent=intent, + include_content=include_content, + ) if provider != "auto": if provider == "tavily": return RouteDecision( provider="tavily", reason="显式指定 Tavily", - tavily_topic="news" if mode == "news" or intent in {"news", "status"} else "general", + tavily_topic=policy.tavily_topic, + fallback_chain=self._explicit_provider_fallback_chain( + provider=provider, + policy=policy, + ), + result_profile=policy.result_profile, + allow_exa_rescue=policy.allow_exa_rescue and policy.provider == "tavily", ) if provider == "firecrawl": return RouteDecision( provider="firecrawl", reason="显式指定 Firecrawl", - firecrawl_categories=self._firecrawl_categories(mode, intent), + firecrawl_categories=list(policy.firecrawl_categories) + or self._firecrawl_categories(mode, intent), + fallback_chain=self._explicit_provider_fallback_chain( + provider=provider, + policy=policy, + ), + result_profile=policy.result_profile, ) if provider == "exa": return RouteDecision( provider="exa", reason="显式指定 Exa", + fallback_chain=self._explicit_provider_fallback_chain( + provider=provider, + policy=policy, + ), + result_profile=policy.result_profile, ) if provider == "xai": return RouteDecision( provider="xai", reason="显式指定 xAI/X 搜索", sources=normalized_sources, + result_profile="off", ) if normalized_sources == ["web", "x"] or ( @@ -1007,6 +1632,7 @@ def _route_search( provider="xai", reason="社交舆情 / X 搜索更适合走 xAI", sources=["x"], + result_profile="off", ) if allowed_x_handles or excluded_x_handles: @@ -1014,139 +1640,47 @@ def _route_search( provider="xai", reason="检测到 X handle 过滤条件", sources=["x"], + result_profile="off", ) + if policy.key in {"docs", "resource"} and include_domains and self._domains_prefer_firecrawl_discovery(include_domains): + reason = "检测到受限 / 社区域名,优先用 Firecrawl 做站内发现" + elif policy.key in {"docs", "github", "pdf"}: + reason = "文档 / GitHub / PDF 默认走 Firecrawl,页面发现与正文抓取保持一致" + elif policy.key == "content": + reason = "请求里需要正文内容,优先走 Firecrawl" + elif policy.key == "news": + reason = "状态 / 新闻类查询默认走 Tavily" + elif policy.key == "resource": + reason = "resource / docs 查询默认走 Firecrawl" + elif policy.key == "research": + reason = "research 发现阶段默认走 Tavily" + else: + reason = "普通网页检索默认走 Tavily" + return self._decision_from_policy(policy=policy, reason=reason) - if mode in {"docs", "github", "pdf"}: - if include_content: - if not self._provider_can_serve(self.config.firecrawl) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Firecrawl 未配置,文档正文查询回退到 Exa", - ) - return RouteDecision( - provider="firecrawl", - reason="文档正文查询优先走 Firecrawl", - firecrawl_categories=self._firecrawl_categories(mode, intent), - ) - if self._provider_can_serve(self.config.tavily): - return RouteDecision( - provider="tavily", - reason="文档类查询先用 Tavily 做官方页面发现,正文再交给 Firecrawl", - tavily_topic="general", - ) - if not self._provider_can_serve(self.config.firecrawl) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Firecrawl 未配置,文档类查询回退到 Exa", - ) - return RouteDecision( - provider="firecrawl", - reason="文档 / GitHub / PDF 内容优先走 Firecrawl", - firecrawl_categories=self._firecrawl_categories(mode, intent), - ) - - if include_content: - if not self._provider_can_serve(self.config.firecrawl) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Firecrawl 未配置,正文查询回退到 Exa", - ) - return RouteDecision( - provider="firecrawl", - reason="请求里需要正文内容,优先用 Firecrawl search + scrape", - firecrawl_categories=self._firecrawl_categories(mode, intent), - ) - - if intent in {"news", "status"} or mode == "news" or self._looks_like_news_query(query_lower): - if not self._provider_can_serve(self.config.tavily) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Tavily 未配置,新闻 / 状态类查询回退到 Exa", - ) - return RouteDecision( - provider="tavily", - reason="状态 / 新闻类查询默认走 Tavily", - tavily_topic="news", - ) - - if intent == "resource" or self._looks_like_docs_query(query_lower): - if include_content: - if not self._provider_can_serve(self.config.firecrawl) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Firecrawl 未配置,resource 正文查询回退到 Exa", - ) - return RouteDecision( - provider="firecrawl", - reason="resource / docs 正文查询优先走 Firecrawl", - firecrawl_categories=self._firecrawl_categories("docs", intent), - ) - if self._provider_can_serve(self.config.tavily): - return RouteDecision( - provider="tavily", - reason="resource / docs 查询先用 Tavily 做页面发现,正文再交给 Firecrawl", - tavily_topic="general", - ) - if not self._provider_can_serve(self.config.firecrawl) and self._provider_can_serve( - self.config.exa + def _domains_prefer_firecrawl_discovery(self, include_domains: list[str] | None) -> bool: + if not include_domains: + return False + firecrawl_preferred_domains = { + "dev.to", + "juejin.cn", + "linux.do", + "medium.com", + "mp.weixin.qq.com", + "notion.site", + "notion.so", + "substack.com", + "weixin.qq.com", + "zhihu.com", + } + for domain in include_domains: + cleaned_domain = self._clean_hostname(domain) + if any( + self._domain_matches(cleaned_domain, preferred) + for preferred in firecrawl_preferred_domains ): - return RouteDecision( - provider="exa", - reason="Firecrawl 未配置,resource / docs 类查询回退到 Exa", - ) - return RouteDecision( - provider="firecrawl", - reason="resource / docs 类查询优先走 Firecrawl", - firecrawl_categories=self._firecrawl_categories("docs", intent), - ) - - if mode == "research": - if self._provider_can_serve(self.config.tavily): - return RouteDecision( - provider="tavily", - reason="research 模式先用 Tavily 做发现,再按策略决定是否扩展验证", - tavily_topic="general", - ) - if self._provider_can_serve(self.config.exa): - return RouteDecision( - provider="exa", - reason="Tavily 未配置,research 发现阶段回退到 Exa", - ) - if self._provider_can_serve(self.config.firecrawl): - return RouteDecision( - provider="firecrawl", - reason="Tavily / Exa 未配置,research 发现阶段回退到 Firecrawl", - firecrawl_categories=self._firecrawl_categories(mode, intent), - ) - return RouteDecision( - provider="tavily", - reason="research 模式默认走 Tavily(无可用替代)", - tavily_topic="general", - ) - - if not self._provider_can_serve(self.config.tavily) and self._provider_can_serve( - self.config.exa - ): - return RouteDecision( - provider="exa", - reason="Tavily 未配置,普通网页检索回退到 Exa", - ) - - return RouteDecision( - provider="tavily", - reason="普通网页检索默认走 Tavily", - tavily_topic="general", - ) + return True + return False def _resolve_intent( self, @@ -1204,6 +1738,81 @@ def _resolve_strategy( return "balanced" return "fast" + def _route_policy_for_request( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + include_content: bool, + ) -> SearchRoutePolicy: + query_lower = query.lower() + if mode == "research": + return _MODE_PROVIDER_POLICY["research"] + if include_content: + return _MODE_PROVIDER_POLICY["content"] + if mode in {"docs", "github", "pdf"}: + return _MODE_PROVIDER_POLICY[mode] + if intent in {"resource", "tutorial"} or self._looks_like_docs_query(query_lower): + return _MODE_PROVIDER_POLICY["resource"] + if intent in {"news", "status"} or mode == "news" or self._looks_like_news_query(query_lower): + return _MODE_PROVIDER_POLICY["news"] + return _MODE_PROVIDER_POLICY["web"] + + def _decision_from_policy( + self, + *, + policy: SearchRoutePolicy, + reason: str, + sources: list[str] | None = None, + ) -> RouteDecision: + provider, fallback_chain = self._resolve_available_policy_chain(policy=policy) + return RouteDecision( + provider=provider, + reason=reason, + tavily_topic=policy.tavily_topic, + firecrawl_categories=list(policy.firecrawl_categories) or None, + sources=sources, + fallback_chain=fallback_chain, + result_profile=policy.result_profile, + allow_exa_rescue=policy.allow_exa_rescue, + ) + + def _resolve_available_policy_chain( + self, + *, + policy: SearchRoutePolicy, + ) -> tuple[ProviderName, list[str] | None]: + ordered: list[ProviderName] = [policy.provider, *policy.fallback_chain] + available: list[ProviderName] = [] + for provider_name in ordered: + config = self._provider_config_for_name(provider_name) + if self._provider_can_serve(config): + available.append(provider_name) + if not available: + return policy.provider, list(policy.fallback_chain) or None + return available[0], list(available[1:]) or None + + def _provider_config_for_name(self, provider_name: ProviderName) -> ProviderConfig: + if provider_name == "tavily": + return self.config.tavily + if provider_name == "firecrawl": + return self.config.firecrawl + if provider_name == "exa": + return self.config.exa + return self.config.xai + + def _explicit_provider_fallback_chain( + self, + *, + provider: ProviderName, + policy: SearchRoutePolicy, + ) -> list[str] | None: + if provider == "xai": + return None + chain = [item for item in policy.fallback_chain if item != provider] + return list(chain) or None + def _should_blend_web_providers( self, *, @@ -1211,6 +1820,9 @@ def _should_blend_web_providers( decision: RouteDecision, sources: list[str], strategy: SearchStrategy, + mode: SearchMode = "auto", + intent: ResolvedSearchIntent = "factual", + include_domains: list[str] | None = None, ) -> bool: if requested_provider != "auto": return False @@ -1220,20 +1832,18 @@ def _should_blend_web_providers( return False if "x" in sources: return False + if mode == "news" or intent in {"news", "status"}: + return False + if include_domains: + return False + if mode in {"docs", "github", "pdf"}: + return False + if intent in {"resource", "tutorial"}: + return False return self._provider_can_serve(self.config.tavily) and self._provider_can_serve( self.config.firecrawl ) - # ------------------------------------------------------------------ - # Provider-level fallback: if the chosen provider fails, try others - # ------------------------------------------------------------------ - - _SEARCH_FALLBACK_CHAIN: dict[str, list[str]] = { - "tavily": ["exa", "firecrawl"], - "firecrawl": ["exa", "tavily"], - "exa": ["firecrawl", "tavily"], - } - def _search_with_fallback( self, *, @@ -1248,8 +1858,7 @@ def _search_with_fallback( include_domains: list[str] | None, exclude_domains: list[str] | None, ) -> tuple[dict[str, Any], dict[str, Any] | None]: - """Try the primary provider; on failure, walk the fallback chain.""" - chain = [primary_provider] + self._SEARCH_FALLBACK_CHAIN.get(primary_provider, []) + chain = [primary_provider, *(decision.fallback_chain or [])] last_error: Exception | None = None for provider_name in chain: try: @@ -1279,9 +1888,7 @@ def _search_with_fallback( except Exception as exc: last_error = MySearchError(f"{provider_name}: {exc}") continue - raise MySearchError( - f"All providers failed for query '{query[:80]}': {last_error}" - ) + raise MySearchError(f"All providers failed for query '{query[:80]}': {last_error}") def _dispatch_single_provider( self, @@ -1297,7 +1904,6 @@ def _dispatch_single_provider( include_domains: list[str] | None, exclude_domains: list[str] | None, ) -> dict[str, Any]: - """Call exactly one search provider by name.""" if provider_name == "tavily": return self._search_tavily( query=query, @@ -1327,6 +1933,246 @@ def _dispatch_single_provider( ) raise MySearchError(f"Unknown provider: {provider_name}") + def _should_attempt_exa_rescue( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + decision: RouteDecision, + result: dict[str, Any], + max_results: int, + include_domains: list[str] | None, + ) -> bool: + if not decision.allow_exa_rescue: + return False + if not self._provider_can_serve(self.config.exa): + return False + if result.get("provider") in {"exa", "xai"}: + return False + if result.get("fallback"): + return False + if include_domains or self._resolve_official_result_mode( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ) == "strict": + return False + results = list(result.get("results") or []) + if len(results) >= min(max_results, 3): + return False + query_terms = re.findall(r"[a-z0-9\u4e00-\u9fff]+", query.lower()) + long_tail_signal = len(query_terms) >= 6 or len(query) >= 48 + return mode == "news" or intent in {"comparison", "exploratory", "tutorial"} or long_tail_signal + + def _apply_exa_rescue( + self, + *, + query: str, + primary_result: dict[str, Any], + max_results: int, + include_domains: list[str] | None, + exclude_domains: list[str] | None, + include_content: bool, + ) -> dict[str, Any]: + exa_result = self._search_exa( + query=query, + max_results=max_results, + include_domains=include_domains, + exclude_domains=exclude_domains, + include_content=include_content, + ) + if not exa_result.get("results"): + return primary_result + + merged = self._merge_search_payloads( + primary_result=primary_result, + secondary_result=exa_result, + max_results=max_results, + ) + return { + "provider": "hybrid", + "route_selected": f"{primary_result.get('provider', 'unknown')}+exa", + "query": query, + "answer": primary_result.get("answer") or exa_result.get("answer", ""), + "results": merged["results"], + "citations": merged["citations"], + "evidence": { + "providers_consulted": [ + item + for item in [primary_result.get("provider"), exa_result.get("provider")] + if item + ], + "matched_results": merged["matched_results"], + "citation_count": len(merged["citations"]), + "verification": "fallback", + }, + "primary_search": primary_result, + "secondary_search": exa_result, + "secondary_error": "", + "fallback": { + "from": primary_result.get("provider", "unknown"), + "to": "exa", + "reason": "primary provider returned sparse results; Exa rescue engaged", + }, + } + + def _should_rerank_general_results( + self, + *, + result_profile: str, + ) -> bool: + return result_profile in {"web", "news"} + + def _rerank_general_results( + self, + *, + query: str, + result_profile: Literal["web", "news"], + results: list[dict[str, Any]], + include_domains: list[str] | None, + ) -> list[dict[str, Any]]: + if len(results) < 2: + return results + ranked = sorted( + enumerate(results), + key=lambda pair: ( + self._general_result_rank( + query=query, + result_profile=result_profile, + item=pair[1], + include_domains=include_domains, + ), + -pair[0], + ), + reverse=True, + ) + return [dict(pair[1]) for pair in ranked] + + def _general_result_rank( + self, + *, + query: str, + result_profile: Literal["web", "news"], + item: dict[str, Any], + include_domains: list[str] | None, + ) -> tuple[int, int, int, int, int, int, int, int]: + if result_profile == "news": + return self._news_result_rank(item=item, include_domains=include_domains) + return self._web_result_rank( + query=query, + item=item, + include_domains=include_domains, + ) + + def _news_result_rank( + self, + *, + item: dict[str, Any], + include_domains: list[str] | None, + ) -> tuple[int, int, int, int, int, int, int, int]: + hostname = self._result_hostname(item) + include_match = int( + bool(include_domains) + and any(self._domain_matches(hostname, domain) for domain in include_domains or []) + ) + mainstream = int(self._is_mainstream_news_domain(hostname)) + article_shape = int(self._looks_like_news_article_result(item)) + has_timestamp = int(self._result_published_timestamp(item) is not None) + timestamp_score = int(self._result_published_timestamp(item) or 0) + content_score, snippet_score, title_score = self._result_quality_score(item) + return ( + include_match, + mainstream, + article_shape, + has_timestamp, + timestamp_score, + content_score, + snippet_score, + title_score, + ) + + def _web_result_rank( + self, + *, + query: str, + item: dict[str, Any], + include_domains: list[str] | None, + ) -> tuple[int, int, int, int, int, int, int, int]: + hostname = self._result_hostname(item) + registered_domain = self._registered_domain(hostname) + title_text = (item.get("title") or "").lower() + query_tokens = self._query_brand_tokens(query) + include_match = int( + bool(include_domains) + and any(self._domain_matches(hostname, domain) for domain in include_domains or []) + ) + registered_domain_label_match = int( + self._registered_domain_label_matches( + registered_domain=registered_domain, + query_tokens=query_tokens, + ) + ) + host_brand_match = int(any(token in hostname for token in query_tokens)) + title_brand_match = int(any(token in title_text for token in query_tokens)) + non_aggregator = int(not self._is_obvious_web_aggregator(registered_domain)) + matched_provider_count = len(item.get("matched_providers") or []) + content_score, snippet_score, title_score = self._result_quality_score(item) + return ( + include_match, + registered_domain_label_match, + host_brand_match, + title_brand_match, + non_aggregator, + matched_provider_count, + content_score, + max(snippet_score, title_score), + ) + + def _result_published_timestamp(self, item: dict[str, Any]) -> float | None: + for field in ("published_date", "publishedDate", "created_at"): + parsed = self._parse_result_timestamp(item.get(field)) + if parsed is not None: + return parsed.timestamp() + return None + + def _is_mainstream_news_domain(self, hostname: str) -> bool: + registered_domain = self._registered_domain(hostname) + mainstream_domains = { + "apnews.com", + "bbc.com", + "bloomberg.com", + "cnn.com", + "ft.com", + "latimes.com", + "nytimes.com", + "reuters.com", + "theguardian.com", + "theverge.com", + "washingtonpost.com", + "wsj.com", + "xinhuanet.com", + } + return registered_domain in mainstream_domains + + def _looks_like_news_article_result(self, item: dict[str, Any]) -> bool: + path = urlparse(item.get("url", "")).path.lower() + return any( + marker in path + for marker in ("/news/", "/story/", "/stories/", "/article/", "/articles/", "/202") + ) + + def _is_obvious_web_aggregator(self, registered_domain: str) -> bool: + return registered_domain in { + "linkedin.com", + "medium.com", + "quora.com", + "reddit.com", + "researchgate.net", + "stackoverflow.com", + } + def _search_web_blended( self, *, @@ -1383,12 +2229,10 @@ def _search_web_blended( } blended_results, blended_errors = self._execute_parallel(tasks, max_workers=2) - primary_failed = "primary" in blended_errors secondary_failed = "secondary" in blended_errors if primary_failed and not secondary_failed: - # Primary down but secondary succeeded — use secondary as sole result primary_result = blended_results["secondary"] primary_result["fallback"] = { "from": decision.provider, @@ -1398,7 +2242,6 @@ def _search_web_blended( secondary_result = None secondary_error = "" elif primary_failed and secondary_failed: - # Both failed — raise with context from both primary_err = str(blended_errors["primary"])[:150] secondary_err = str(blended_errors["secondary"])[:150] raise MySearchError( @@ -1458,6 +2301,55 @@ def _search_tavily( include_content: bool, include_domains: list[str] | None, exclude_domains: list[str] | None, + ) -> dict[str, Any]: + include_domains = [item.strip() for item in (include_domains or []) if item and item.strip()] + exclude_domains = [item.strip() for item in (exclude_domains or []) if item and item.strip()] + + response = self._search_tavily_once( + query=query, + max_results=max_results, + topic=topic, + include_answer=include_answer, + include_content=include_content, + include_domains=include_domains, + exclude_domains=exclude_domains, + ) + if response.get("results") or not include_domains: + return response + + retry_response = self._search_tavily_domain_retry( + query=query, + max_results=max_results, + topic=topic, + include_content=include_content, + include_domains=include_domains, + exclude_domains=exclude_domains, + ) + if retry_response is not None: + return retry_response + + fallback_response = self._search_tavily_domain_fallback( + query=query, + max_results=max_results, + include_content=include_content, + include_domains=include_domains, + exclude_domains=exclude_domains, + ) + if fallback_response is not None: + return fallback_response + + return response + + def _search_tavily_once( + self, + *, + query: str, + max_results: int, + topic: str, + include_answer: bool, + include_content: bool, + include_domains: list[str] | None, + exclude_domains: list[str] | None, ) -> dict[str, Any]: provider = self.config.tavily key = self._get_key_or_raise(provider) @@ -1474,37 +2366,208 @@ def _search_tavily( if exclude_domains: payload["exclude_domains"] = exclude_domains - response = self._request_json( - provider=provider, - method="POST", - path=provider.path("search"), - payload=payload, - key=key.key, + response = self._request_json( + provider=provider, + method="POST", + path=provider.path("search"), + payload=payload, + key=key.key, + ) + results = [ + { + "provider": "tavily", + "source": "web", + "title": item.get("title", ""), + "url": item.get("url", ""), + "snippet": item.get("content", ""), + "content": item.get("raw_content", "") if include_content else "", + "score": item.get("score"), + "published_date": item.get("published_date") + or item.get("publishedDate") + or item.get("published_at") + or item.get("publishedAt") + or "", + } + for item in response.get("results", []) + ] + filtered_results = self._filter_results_by_domains( + results, + include_domains=include_domains, + exclude_domains=exclude_domains, + ) + return { + "provider": "tavily", + "transport": key.source, + "query": response.get("query", query), + "answer": response.get("answer", ""), + "request_id": response.get("request_id", ""), + "response_time": response.get("response_time"), + "results": filtered_results, + "citations": [ + {"title": item.get("title", ""), "url": item.get("url", "")} + for item in filtered_results + if item.get("url") + ], + } + + def _search_tavily_domain_retry( + self, + *, + query: str, + max_results: int, + topic: str, + include_content: bool, + include_domains: list[str], + exclude_domains: list[str] | None, + ) -> dict[str, Any] | None: + per_domain_results = [] + retried_domains: list[str] = [] + for domain in include_domains: + domain_result = self._search_tavily_once( + query=self._build_firecrawl_domain_query( + query=query, + include_domain=domain, + exclude_domains=exclude_domains, + ), + max_results=max_results, + topic=topic, + include_answer=False, + include_content=include_content, + include_domains=None, + exclude_domains=exclude_domains, + ) + filtered_results = self._filter_results_by_domains( + domain_result.get("results", []), + include_domains=[domain], + exclude_domains=exclude_domains, + ) + if not filtered_results: + continue + domain_result = dict(domain_result) + domain_result["results"] = filtered_results + domain_result["citations"] = self._align_citations_with_results( + results=filtered_results, + citations=list(domain_result.get("citations") or []), + ) + per_domain_results.append(domain_result) + retried_domains.append(domain) + + if not per_domain_results: + return None + + merged_results = self._merge_ranked_results( + [result.get("results", []) for result in per_domain_results], + max_results=max_results, + ) + citations = self._align_citations_with_results( + results=merged_results, + citations=self._dedupe_citations( + *[result.get("citations", []) for result in per_domain_results] + ), + ) + return { + "provider": "tavily", + "transport": per_domain_results[0].get("transport", "env"), + "query": query, + "answer": "", + "request_id": "", + "response_time": None, + "results": merged_results, + "citations": citations, + "route_debug": { + "domain_filter_mode": "site_query_retry", + "retried_include_domains": retried_domains, + }, + } + + def _search_tavily_domain_fallback( + self, + *, + query: str, + max_results: int, + include_content: bool, + include_domains: list[str], + exclude_domains: list[str] | None, + ) -> dict[str, Any] | None: + if not self._provider_can_serve(self.config.firecrawl): + return None + + categories = ( + self._firecrawl_categories("docs", "resource") + if self._looks_like_docs_query(query.lower()) or self._looks_like_tutorial_query(query.lower()) + else [] + ) + per_domain_results = [] + citations = [] + seen_urls: set[str] = set() + for domain in include_domains: + domain_result = self._search_firecrawl_once( + query=self._build_firecrawl_domain_query( + query=query, + include_domain=domain, + exclude_domains=exclude_domains, + ), + max_results=max_results, + categories=categories, + include_content=include_content, + ) + if not domain_result.get("results"): + retry_result = self._search_firecrawl_domain_retry( + query=query, + max_results=max_results, + categories=categories, + include_content=include_content, + include_domain=domain, + exclude_domains=exclude_domains, + ) + if retry_result is not None: + domain_result = retry_result + per_domain_results.append(domain_result) + for item in domain_result.get("results", []): + url = item.get("url", "") + if not url or url in seen_urls: + continue + seen_urls.add(url) + citations.append({"title": item.get("title", ""), "url": url}) + + merged_results = self._merge_ranked_results( + [result.get("results", []) for result in per_domain_results], + max_results=max_results, ) + if not merged_results: + return None + return { - "provider": "tavily", - "transport": key.source, - "query": response.get("query", query), - "answer": response.get("answer", ""), - "request_id": response.get("request_id", ""), - "response_time": response.get("response_time"), - "results": [ - { - "provider": "tavily", - "source": "web", - "title": item.get("title", ""), - "url": item.get("url", ""), - "snippet": item.get("content", ""), - "content": item.get("raw_content", "") if include_content else "", - "score": item.get("score"), - } - for item in response.get("results", []) - ], - "citations": [ - {"title": item.get("title", ""), "url": item.get("url", "")} - for item in response.get("results", []) - if item.get("url") - ], + "provider": "hybrid", + "route_selected": "tavily+firecrawl", + "query": query, + "answer": "", + "results": merged_results, + "citations": citations[:max_results], + "primary_search": { + "provider": "tavily", + "query": query, + "results": [], + "citations": [], + }, + "secondary_search": { + "provider": "firecrawl", + "query": query, + "results": merged_results, + "citations": citations[:max_results], + }, + "secondary_error": "", + "evidence": { + "providers_consulted": ["tavily", "firecrawl"], + "matched_results": 0, + "citation_count": len(citations[:max_results]), + "verification": "fallback", + }, + "fallback": { + "from": "tavily", + "to": "firecrawl", + "reason": "tavily returned 0 results for domain-filtered search", + }, } def _search_firecrawl( @@ -1734,6 +2797,10 @@ def _search_firecrawl_once( "url": item.get("url", ""), "snippet": item.get("description", "") or item.get("markdown", ""), "content": item.get("markdown", "") if include_content else "", + "published_date": item.get("publishedDate") + or item.get("published_date") + or item.get("published_at") + or "", } ) @@ -2213,6 +3280,7 @@ def _build_xai_responses_payload( to_date: str | None, include_x_images: bool, include_x_videos: bool, + model: str | None = None, ) -> dict[str, Any]: tools: list[dict[str, Any]] = [] if "web" in sources: @@ -2244,7 +3312,7 @@ def _build_xai_responses_payload( augmented_query = f"{query}\n\nReturn up to {max_results} relevant results with concise sourcing." return { - "model": self.config.xai_model, + "model": (model or self.config.xai_model).strip(), "input": [ { "role": "user", @@ -2528,6 +3596,7 @@ def _rerank_resource_results( return results query_tokens = self._query_brand_tokens(query) + strict_official = bool(include_domains) or self._looks_like_official_query(query) ranked = sorted( enumerate(results), key=lambda pair: ( @@ -2536,6 +3605,7 @@ def _rerank_resource_results( item=pair[1], query_tokens=query_tokens, include_domains=include_domains, + strict_official=strict_official, ), -pair[0], ), @@ -2550,43 +3620,44 @@ def _resource_result_rank( item: dict[str, Any], query_tokens: list[str], include_domains: list[str] | None, - ) -> tuple[int, int, int, int, int, int, int, int, int, int, int]: - url = item.get("url", "") - hostname = self._result_hostname(item) - registered_domain = self._registered_domain(hostname) - title_text = (item.get("title") or "").lower() - include_match = int( - bool(include_domains) - and any(self._domain_matches(hostname, domain) for domain in include_domains or []) - ) - host_brand_match = int( - any(token in hostname or token in registered_domain for token in query_tokens) - ) - title_brand_match = int(any(token in title_text for token in query_tokens)) - docs_shape_match = int( - self._looks_like_resource_result( - url=url, - hostname=hostname, - title_text=title_text, - mode=mode, - ) + strict_official: bool, + ) -> tuple[int, int, int, int, int, int, int, int, int, int, int, int, int]: + flags = self._resource_result_flags( + mode=mode, + item=item, + query_tokens=query_tokens, + include_domains=include_domains, ) + include_match = int(flags["include_match"]) + host_brand_match = int(flags["host_brand_match"]) + registered_domain_label_match = int(flags["registered_domain_label_match"]) + title_brand_match = int(flags["title_brand_match"]) + docs_shape_match = int(flags["docs_shape_match"]) github_bonus = int( mode == "github" - and hostname in {"github.com", "raw.githubusercontent.com"} + and flags["hostname"] in {"github.com", "raw.githubusercontent.com"} ) - pdf_bonus = int(mode == "pdf" and self._looks_like_pdf_url(url)) - non_third_party = int( - not self._is_obvious_third_party_resource( - hostname=hostname, - registered_domain=registered_domain, + pdf_bonus = int(mode == "pdf" and self._looks_like_pdf_url(item.get("url", ""))) + non_third_party = int(flags["non_third_party"]) + official_resource_match = int( + self._is_probably_official_resource_result( mode=mode, + hostname=str(flags["hostname"]), + include_match=bool(include_match), + registered_domain_label_match=bool(registered_domain_label_match), + host_brand_match=bool(host_brand_match), + title_brand_match=bool(title_brand_match), + docs_shape_match=bool(docs_shape_match), + non_third_party=bool(non_third_party), + official_query=strict_official, ) ) matched_provider_count = len(item.get("matched_providers") or []) content_score, snippet_score, title_score = self._result_quality_score(item) return ( include_match, + official_resource_match, + registered_domain_label_match, github_bonus, pdf_bonus, host_brand_match, @@ -2599,6 +3670,38 @@ def _resource_result_rank( title_score, ) + def _is_probably_official_resource_result( + self, + *, + mode: SearchMode, + hostname: str, + include_match: bool, + registered_domain_label_match: bool, + host_brand_match: bool, + title_brand_match: bool, + docs_shape_match: bool, + non_third_party: bool, + official_query: bool, + ) -> bool: + if include_match: + return True + if mode in {"github", "pdf"}: + return True + if not non_third_party: + return False + if official_query and registered_domain_label_match: + return True + if not docs_shape_match: + return False + official_host_surface = any( + part in {"api", "developer", "developers", "docs", "help", "platform", "reference", "support"} + for part in hostname.split(".") + if part + ) + return registered_domain_label_match or (host_brand_match and official_host_surface) or ( + title_brand_match and official_host_surface + ) + def _align_citations_with_results( self, *, @@ -2712,6 +3815,87 @@ def _domain_matches(self, hostname: str, domain: str) -> bool: cleaned_host == cleaned_domain or cleaned_host.endswith(f".{cleaned_domain}") ) + def _registered_domain_label_matches(self, *, registered_domain: str, query_tokens: list[str]) -> bool: + labels = [item for item in self._clean_hostname(registered_domain).split(".") if item] + return any( + label == token or label.startswith(f"{token}-") or label.startswith(f"{token}_") + for token in query_tokens + for label in labels + ) + + def _resource_result_flags( + self, + *, + mode: SearchMode, + item: dict[str, Any], + query_tokens: list[str], + include_domains: list[str] | None, + ) -> dict[str, Any]: + url = item.get("url", "") + hostname = self._result_hostname(item) + registered_domain = self._registered_domain(hostname) + title_text = (item.get("title") or "").lower() + include_match = bool( + include_domains + and any(self._domain_matches(hostname, domain) for domain in include_domains or []) + ) + host_brand_match = any( + token in hostname or token in registered_domain for token in query_tokens + ) + registered_domain_label_match = self._registered_domain_label_matches( + registered_domain=registered_domain, + query_tokens=query_tokens, + ) + title_brand_match = any(token in title_text for token in query_tokens) + docs_shape_match = self._looks_like_resource_result( + url=url, + hostname=hostname, + title_text=title_text, + mode=mode, + ) + non_third_party = not self._is_obvious_third_party_resource( + hostname=hostname, + registered_domain=registered_domain, + mode=mode, + ) + return { + "hostname": hostname, + "registered_domain": registered_domain, + "include_match": include_match, + "host_brand_match": host_brand_match, + "registered_domain_label_match": registered_domain_label_match, + "title_brand_match": title_brand_match, + "docs_shape_match": docs_shape_match, + "non_third_party": non_third_party, + } + + def _result_matches_official_policy( + self, + *, + item: dict[str, Any], + mode: SearchMode, + query_tokens: list[str], + include_domains: list[str] | None, + strict_official: bool, + ) -> bool: + flags = self._resource_result_flags( + mode=mode, + item=item, + query_tokens=query_tokens, + include_domains=include_domains, + ) + return self._is_probably_official_resource_result( + mode=mode, + hostname=str(flags["hostname"]), + include_match=bool(flags["include_match"]), + registered_domain_label_match=bool(flags["registered_domain_label_match"]), + host_brand_match=bool(flags["host_brand_match"]), + title_brand_match=bool(flags["title_brand_match"]), + docs_shape_match=bool(flags["docs_shape_match"]), + non_third_party=bool(flags["non_third_party"]), + official_query=strict_official, + ) + def _query_brand_tokens(self, query: str) -> list[str]: stopwords = { "a", @@ -2849,6 +4033,115 @@ def _is_obvious_third_party_resource( } return registered_domain in third_party_domains + def _collect_source_domains( + self, + *, + results: list[dict[str, Any]], + citations: list[dict[str, Any]], + ) -> list[str]: + domains: list[str] = [] + seen: set[str] = set() + for item in [*results, *citations]: + if not isinstance(item, dict): + continue + hostname = self._result_hostname(item) + registered_domain = self._registered_domain(hostname) + if not registered_domain or registered_domain in seen: + continue + seen.add(registered_domain) + domains.append(registered_domain) + return domains + + def _count_official_resource_results( + self, + *, + query: str, + mode: SearchMode, + intent: ResolvedSearchIntent, + results: list[dict[str, Any]], + include_domains: list[str] | None, + ) -> int: + official_mode = self._resolve_official_result_mode( + query=query, + mode=mode, + intent=intent, + include_domains=include_domains, + ) + if official_mode == "off" and not self._should_rerank_resource_results(mode=mode, intent=intent): + return 0 + query_tokens = self._query_brand_tokens(query) + strict_official = official_mode == "strict" + official_count = 0 + for item in results: + if self._result_matches_official_policy( + item=item, + mode=mode, + query_tokens=query_tokens, + include_domains=include_domains, + strict_official=strict_official, + ): + official_count += 1 + return official_count + + def _detect_evidence_conflicts( + self, + *, + mode: SearchMode, + intent: ResolvedSearchIntent, + results: list[dict[str, Any]], + include_domains: list[str] | None, + source_domains: list[str], + official_source_count: int, + providers_consulted: list[str], + official_mode: str, + ) -> list[str]: + conflicts: list[str] = [] + if len(source_domains) <= 1 and len(results) > 1: + conflicts.append("low-source-diversity") + if len(set(providers_consulted)) <= 1 and len(source_domains) <= 1 and results: + conflicts.append("single-provider-single-domain") + if self._should_rerank_resource_results(mode=mode, intent=intent): + if results and official_source_count <= 0: + conflicts.append("official-source-not-confirmed") + elif results and official_source_count < len(results): + conflicts.append("mixed-official-and-third-party") + if include_domains and not results: + conflicts.append("domain-filter-returned-empty") + if official_mode == "strict" and results and official_source_count <= 0: + conflicts.append("strict-official-unmet") + return conflicts + + def _estimate_search_confidence( + self, + *, + mode: SearchMode, + intent: ResolvedSearchIntent, + result_count: int, + source_domain_count: int, + official_source_count: int, + verification: str, + conflicts: list[str], + official_mode: str, + ) -> str: + if result_count <= 0: + return "low" + if official_mode == "strict" and official_source_count <= 0: + return "low" + if self._should_rerank_resource_results(mode=mode, intent=intent): + if official_source_count > 0 and "official-source-not-confirmed" not in conflicts: + if ( + verification == "cross-provider" + or (source_domain_count >= 2 and "mixed-official-and-third-party" not in conflicts) + ): + return "high" + return "medium" + return "medium" if source_domain_count >= 2 else "low" + if verification == "cross-provider" and source_domain_count >= 2: + return "high" + if source_domain_count >= 2: + return "medium" + return "low" if conflicts else "medium" + def _describe_provider( self, provider: ProviderConfig, @@ -2858,6 +4151,7 @@ def _describe_provider( return { "base_url": provider.base_url, "alternate_base_urls": provider.alternate_base_urls, + "provider_mode": provider.provider_mode, "auth_mode": provider.auth_mode, "paths": provider.default_paths, "search_mode": provider.search_mode, @@ -2872,6 +4166,15 @@ def _describe_provider( def _get_key_or_raise(self, provider: ProviderConfig): record = self.keyring.get_next(provider.name) if record is None: + if provider.name == "tavily": + raise MySearchError( + "Tavily is not configured. Use " + "MYSEARCH_TAVILY_MODE=gateway with MYSEARCH_TAVILY_GATEWAY_TOKEN " + "to consume an upstream gateway, or keep " + "MYSEARCH_TAVILY_MODE=official and import your own Tavily keys " + "with MYSEARCH_TAVILY_API_KEY / MYSEARCH_TAVILY_API_KEYS / " + "MYSEARCH_TAVILY_KEYS_FILE." + ) if provider.name == "xai": raise MySearchError( "xAI / Social search is not configured; MySearch can still use " @@ -2893,13 +4196,13 @@ def _request_json( provider: ProviderConfig, method: str, path: str, - payload: dict[str, Any], + payload: dict[str, Any] | None, key: str, base_url: str | None = None, timeout_seconds: int | None = None, ) -> dict[str, Any]: headers: dict[str, str] = {} - body = dict(payload) + body = dict(payload or {}) if provider.auth_mode == "bearer": token = key if not provider.auth_scheme else f"{provider.auth_scheme} {key}" @@ -2912,7 +4215,9 @@ def _request_json( url = f"{(base_url or provider.base_url)}{path}" headers.setdefault("Content-Type", "application/json") headers.setdefault("User-Agent", "MySearch/0.2") - request_body = json.dumps(body).encode("utf-8") + request_body = None + if method.upper() != "GET": + request_body = json.dumps(body).encode("utf-8") request = Request( url, data=request_body, @@ -2959,6 +4264,106 @@ def _request_json( ) return data + def _request_text( + self, + *, + url: str, + timeout_seconds: int | None = None, + ) -> tuple[int, str]: + request = Request( + url, + headers={ + "User-Agent": "MySearch/0.2", + "Accept": "text/html,application/json;q=0.9,*/*;q=0.8", + }, + method="GET", + ) + try: + with urlopen(request, timeout=timeout_seconds or self.config.timeout_seconds) as response: + return response.status, response.read().decode("utf-8", errors="replace") + except HTTPError as exc: + return exc.code, exc.read().decode("utf-8", errors="replace") + except (URLError, OSError) as exc: + raise MySearchError(str(exc)) from exc + + def _xai_probe_model(self) -> str: + return "grok-4.1-fast" + + def _derive_root_health_base_url(self, provider: ProviderConfig) -> str: + candidate = ( + provider.base_url_for("social_search") + or provider.base_url_for("social_health") + or provider.base_url + ) + parsed = urlparse(str(candidate or "").strip()) + if not parsed.scheme or not parsed.netloc: + return str(candidate or "").strip().rstrip("/") + return urlunparse((parsed.scheme, parsed.netloc, "", "", "", "")).rstrip("/") + + def _probe_xai_official_status_page(self, timeout_seconds: int) -> None: + status_url = "https://status.x.ai/" + status_code, response_text = self._request_text( + url=status_url, + timeout_seconds=timeout_seconds, + ) + if status_code >= 400: + raise MySearchHTTPError( + provider="xai", + status_code=status_code, + detail=f"status.x.ai returned HTTP {status_code}", + url=status_url, + ) + + lowered = " ".join(response_text.lower().split()) + if "all systems operational" in lowered: + return + + matches = re.findall( + r"api(?:\s*\([^)]*\))?[^a-z]{0,40}(available|operational|degraded|outage|unavailable|disrupted)", + lowered, + ) + if matches: + negative = {"degraded", "outage", "unavailable", "disrupted"} + if any(item in negative for item in matches): + raise MySearchError( + "status.x.ai reports xAI API is not fully available" + ) + return + + if "api" in lowered and "available" in lowered: + return + + raise MySearchError("unable to determine xAI API status from status.x.ai") + + def _probe_xai_official_via_responses( + self, + provider: ProviderConfig, + key: str, + timeout_seconds: int, + ) -> None: + fallback_timeout_seconds = min(self.config.timeout_seconds, 20) + self._request_json( + provider=provider, + method="POST", + path=provider.path("responses"), + payload=self._build_xai_responses_payload( + query="openai", + sources=["x"], + max_results=1, + include_domains=None, + exclude_domains=None, + allowed_x_handles=None, + excluded_x_handles=None, + from_date=None, + to_date=None, + include_x_images=False, + include_x_videos=False, + model=self._xai_probe_model(), + ), + key=key, + timeout_seconds=max(timeout_seconds, fallback_timeout_seconds), + ) + def _probe_provider_status( self, provider: ProviderConfig, @@ -3014,6 +4419,46 @@ def _probe_provider_status( } return result + def _probe_xai_compatible_gateway(self, provider: ProviderConfig, key: str, timeout_seconds: int) -> None: + health_path = "/health" + health_base_url = self._derive_root_health_base_url(provider) + try: + payload = self._request_json( + provider=provider, + method="GET", + path=health_path, + payload=None, + key=key, + base_url=health_base_url, + timeout_seconds=timeout_seconds, + ) + if isinstance(payload, dict) and payload.get("ok") is False: + detail = ( + payload.get("error") + or payload.get("detail") + or "social/X gateway health probe reported unavailable" + ) + raise MySearchError(str(detail)) + return + except (MySearchHTTPError, MySearchError): + pass + + fallback_timeout_seconds = min(self.config.timeout_seconds, 20) + self._request_json( + provider=provider, + method="POST", + path=provider.path("social_search"), + payload={ + "query": "openai", + "source": "x", + "max_results": 1, + "model": self._xai_probe_model(), + }, + key=key, + base_url=provider.base_url_for("social_search"), + timeout_seconds=fallback_timeout_seconds, + ) + def _probe_provider_request(self, provider: ProviderConfig, key: str) -> None: timeout_seconds = min(self.config.timeout_seconds, 10) if provider.name == "tavily": @@ -3061,40 +4506,24 @@ def _probe_provider_request(self, provider: ProviderConfig, key: str) -> None: return if provider.name == "xai": if provider.search_mode == "compatible": - self._request_json( + self._probe_xai_compatible_gateway(provider, key, timeout_seconds) + return + try: + self._probe_xai_official_status_page(timeout_seconds=timeout_seconds) + except MySearchError as exc: + if "not fully available" in str(exc): + raise + self._probe_xai_official_via_responses( + provider=provider, + key=key, + timeout_seconds=timeout_seconds, + ) + except MySearchHTTPError as exc: + self._probe_xai_official_via_responses( provider=provider, - method="POST", - path=provider.path("social_search"), - payload={ - "query": "openai", - "source": "x", - "max_results": 1, - }, key=key, - base_url=provider.base_url_for("social_search"), timeout_seconds=timeout_seconds, ) - return - self._request_json( - provider=provider, - method="POST", - path=provider.path("responses"), - payload=self._build_xai_responses_payload( - query="openai", - sources=["x"], - max_results=1, - include_domains=None, - exclude_domains=None, - allowed_x_handles=None, - excluded_x_handles=None, - from_date=None, - to_date=None, - include_x_images=False, - include_x_videos=False, - ), - key=key, - timeout_seconds=timeout_seconds, - ) return def _summarize_route_error(self, error_text: str) -> str: @@ -3298,13 +4727,11 @@ def _looks_like_docs_query(self, query_lower: str) -> bool: "documentation", "api reference", "changelog", - "pricing", "readme", "github", "manual", "文档", "接口", - "价格", "更新日志", ] return any(keyword in query_lower for keyword in keywords) diff --git a/openclaw/runtime/mysearch/config.py b/openclaw/runtime/mysearch/config.py index 9115dd8..acead3f 100644 --- a/openclaw/runtime/mysearch/config.py +++ b/openclaw/runtime/mysearch/config.py @@ -3,7 +3,8 @@ from __future__ import annotations import os -from dataclasses import dataclass, field +import sys +from dataclasses import dataclass as _dataclass, field from pathlib import Path from typing import Literal @@ -13,9 +14,16 @@ tomllib = None # type: ignore[assignment] +def dataclass(*args, **kwargs): + if sys.version_info < (3, 10): + kwargs.pop("slots", None) + return _dataclass(*args, **kwargs) + + MODULE_DIR = Path(__file__).resolve().parent ROOT_DIR = MODULE_DIR.parent AuthMode = Literal["bearer", "body"] +TavilyMode = Literal["official", "gateway"] XAISearchMode = Literal["official", "compatible"] MCPTransport = Literal["stdio", "sse", "streamable-http"] @@ -196,6 +204,46 @@ def _provider_path( return _normalize_path(default) +def _get_tavily_mode(proxy_base_url: str) -> TavilyMode: + explicit = _get_str("MYSEARCH_TAVILY_MODE") + if explicit: + return explicit # type: ignore[return-value] + if _get_str( + "MYSEARCH_TAVILY_GATEWAY_BASE_URL", + "MYSEARCH_TAVILY_GATEWAY_TOKEN", + "MYSEARCH_TAVILY_GATEWAY_API_KEY", + ) or _get_list("MYSEARCH_TAVILY_GATEWAY_TOKENS", "MYSEARCH_TAVILY_GATEWAY_API_KEYS"): + return "gateway" + return "gateway" if proxy_base_url else "official" + + +def _tavily_gateway_base_url(proxy_base_url: str, default: str) -> str: + explicit = _get_str("MYSEARCH_TAVILY_GATEWAY_BASE_URL") + if explicit: + return _normalize_base_url(explicit) + if proxy_base_url: + return _normalize_base_url(proxy_base_url) + return _normalize_base_url(default) + + +def _tavily_gateway_path( + *, + explicit_name: str, + explicit_gateway_base_url: str, + proxy_base_url: str, + proxy_default: str, + default: str, +) -> str: + explicit = _get_str(explicit_name) + if explicit: + return _normalize_path(explicit) + if explicit_gateway_base_url: + return _normalize_path(default) + if proxy_base_url: + return _normalize_path(proxy_default) + return _normalize_path(default) + + _bootstrap_runtime_env() @@ -209,6 +257,7 @@ class ProviderConfig: auth_field: str default_paths: dict[str, str] alternate_base_urls: dict[str, str] = field(default_factory=dict) + provider_mode: str = "" search_mode: XAISearchMode = "official" api_keys: list[str] = field(default_factory=list) keys_file: Path | None = None @@ -243,6 +292,12 @@ class MySearchConfig: def from_env(cls) -> "MySearchConfig": proxy_base_url = _get_str("MYSEARCH_PROXY_BASE_URL") proxy_api_key = _get_str("MYSEARCH_PROXY_API_KEY") + tavily_mode = _get_tavily_mode(proxy_base_url) + tavily_gateway_base_url = _get_str("MYSEARCH_TAVILY_GATEWAY_BASE_URL") + tavily_gateway_token = _get_str( + "MYSEARCH_TAVILY_GATEWAY_TOKEN", + "MYSEARCH_TAVILY_GATEWAY_API_KEY", + ) return cls( server_name=_get_str("MYSEARCH_NAME", "MYSEARCH_SERVER_NAME", default="MySearch"), timeout_seconds=_get_int("MYSEARCH_TIMEOUT_SECONDS", 45), @@ -263,44 +318,104 @@ def from_env(cls) -> "MySearchConfig": mcp_stateless_http=_get_bool("MYSEARCH_MCP_STATELESS_HTTP", False), tavily=ProviderConfig( name="tavily", - base_url=_provider_base_url( - explicit_names=("MYSEARCH_TAVILY_BASE_URL",), - proxy_base_url=proxy_base_url, - default="https://api.tavily.com", + base_url=( + _tavily_gateway_base_url( + proxy_base_url=proxy_base_url, + default="https://api.tavily.com", + ) + if tavily_mode == "gateway" + else _provider_base_url( + explicit_names=("MYSEARCH_TAVILY_BASE_URL",), + proxy_base_url="", + default="https://api.tavily.com", + ) ), - auth_mode=_get_str( - "MYSEARCH_TAVILY_AUTH_MODE", - default="bearer" if proxy_base_url else "body", + auth_mode=( + _get_str( + "MYSEARCH_TAVILY_GATEWAY_AUTH_MODE", + default="bearer", + ) + if tavily_mode == "gateway" + else _get_str("MYSEARCH_TAVILY_AUTH_MODE", default="body") ), # type: ignore[arg-type] - auth_header=_get_str("MYSEARCH_TAVILY_AUTH_HEADER", default="Authorization"), - auth_scheme=_get_str("MYSEARCH_TAVILY_AUTH_SCHEME", default="Bearer"), - auth_field=_get_str("MYSEARCH_TAVILY_AUTH_FIELD", default="api_key"), + auth_header=( + _get_str("MYSEARCH_TAVILY_GATEWAY_AUTH_HEADER", default="Authorization") + if tavily_mode == "gateway" + else _get_str("MYSEARCH_TAVILY_AUTH_HEADER", default="Authorization") + ), + auth_scheme=( + _get_str("MYSEARCH_TAVILY_GATEWAY_AUTH_SCHEME", default="Bearer") + if tavily_mode == "gateway" + else _get_str("MYSEARCH_TAVILY_AUTH_SCHEME", default="Bearer") + ), + auth_field=( + _get_str("MYSEARCH_TAVILY_GATEWAY_AUTH_FIELD", default="api_key") + if tavily_mode == "gateway" + else _get_str("MYSEARCH_TAVILY_AUTH_FIELD", default="api_key") + ), default_paths={ - "search": _provider_path( - explicit_name="MYSEARCH_TAVILY_SEARCH_PATH", - proxy_base_url=proxy_base_url, - proxy_default="/api/search", - default="/search", + "search": ( + _tavily_gateway_path( + explicit_name="MYSEARCH_TAVILY_GATEWAY_SEARCH_PATH", + explicit_gateway_base_url=tavily_gateway_base_url, + proxy_base_url=proxy_base_url, + proxy_default="/api/search", + default="/search", + ) + if tavily_mode == "gateway" + else _provider_path( + explicit_name="MYSEARCH_TAVILY_SEARCH_PATH", + proxy_base_url="", + proxy_default="/api/search", + default="/search", + ) ), - "extract": _provider_path( - explicit_name="MYSEARCH_TAVILY_EXTRACT_PATH", - proxy_base_url=proxy_base_url, - proxy_default="/api/extract", - default="/extract", + "extract": ( + _tavily_gateway_path( + explicit_name="MYSEARCH_TAVILY_GATEWAY_EXTRACT_PATH", + explicit_gateway_base_url=tavily_gateway_base_url, + proxy_base_url=proxy_base_url, + proxy_default="/api/extract", + default="/extract", + ) + if tavily_mode == "gateway" + else _provider_path( + explicit_name="MYSEARCH_TAVILY_EXTRACT_PATH", + proxy_base_url="", + proxy_default="/api/extract", + default="/extract", + ) ), }, + provider_mode=tavily_mode, api_keys=[ - *_get_list("MYSEARCH_TAVILY_API_KEYS"), + *( + _get_list( + "MYSEARCH_TAVILY_GATEWAY_TOKENS", + "MYSEARCH_TAVILY_GATEWAY_API_KEYS", + ) + if tavily_mode == "gateway" + else _get_list("MYSEARCH_TAVILY_API_KEYS") + ), + *( + [tavily_gateway_token] + if tavily_mode == "gateway" and tavily_gateway_token + else ([proxy_api_key] if tavily_mode == "gateway" and proxy_api_key else []) + ), *( [_get_str("MYSEARCH_TAVILY_API_KEY")] - if _get_str("MYSEARCH_TAVILY_API_KEY") - else ([proxy_api_key] if proxy_api_key else []) + if tavily_mode != "gateway" and _get_str("MYSEARCH_TAVILY_API_KEY") + else [] ), ], - keys_file=_resolve_path( - "MYSEARCH_TAVILY_KEYS_FILE", - "MYSEARCH_TAVILY_ACCOUNTS_FILE", - default_name="accounts.txt", + keys_file=( + None + if tavily_mode == "gateway" + else _resolve_path( + "MYSEARCH_TAVILY_KEYS_FILE", + "MYSEARCH_TAVILY_ACCOUNTS_FILE", + default_name="accounts.txt", + ) ), ), firecrawl=ProviderConfig( @@ -400,11 +515,20 @@ def from_env(cls) -> "MySearchConfig": proxy_default="/social/search", default="/social/search", ), + "social_health": _provider_path( + explicit_name="MYSEARCH_XAI_SOCIAL_HEALTH_PATH", + proxy_base_url=proxy_base_url, + proxy_default="/social/health", + default="/social/health", + ), }, alternate_base_urls={ "social_search": _normalize_base_url( _get_str("MYSEARCH_XAI_SOCIAL_BASE_URL") or proxy_base_url - ) + ), + "social_health": _normalize_base_url( + _get_str("MYSEARCH_XAI_SOCIAL_BASE_URL") or proxy_base_url + ), }, search_mode=_get_str( "MYSEARCH_XAI_SEARCH_MODE", diff --git a/openclaw/runtime/mysearch/keyring.py b/openclaw/runtime/mysearch/keyring.py index 7331ad7..90864d6 100644 --- a/openclaw/runtime/mysearch/keyring.py +++ b/openclaw/runtime/mysearch/keyring.py @@ -2,12 +2,19 @@ from __future__ import annotations -from dataclasses import dataclass +import sys +from dataclasses import dataclass as _dataclass from threading import Lock from mysearch.config import MySearchConfig, ProviderConfig +def dataclass(*args, **kwargs): + if sys.version_info < (3, 10): + kwargs.pop("slots", None) + return _dataclass(*args, **kwargs) + + @dataclass(frozen=True, slots=True) class KeyRecord: provider: str diff --git a/openclaw/scripts/install_openclaw_skill.sh b/openclaw/scripts/install_openclaw_skill.sh index 344bab7..fcb6e74 100755 --- a/openclaw/scripts/install_openclaw_skill.sh +++ b/openclaw/scripts/install_openclaw_skill.sh @@ -84,7 +84,8 @@ What changed: Next steps: 1. Prefer injecting env via OpenClaw skill config instead of copying secrets into the skill folder 2. Minimal trusted setup: MYSEARCH_PROXY_BASE_URL + MYSEARCH_PROXY_API_KEY -3. If you do not have a proxy yet, fall back to MYSEARCH_TAVILY_API_KEY + MYSEARCH_FIRECRAWL_API_KEY -4. Only use --copy-env or $TARGET_DIR/.env for local debugging -5. Run: python3 $TARGET_DIR/scripts/mysearch_openclaw.py health +3. If you want Tavily to consume an upstream gateway, set MYSEARCH_TAVILY_MODE=gateway + MYSEARCH_TAVILY_GATEWAY_BASE_URL + MYSEARCH_TAVILY_GATEWAY_TOKEN +4. If you do not have a proxy yet, fall back to MYSEARCH_TAVILY_API_KEY + MYSEARCH_FIRECRAWL_API_KEY +5. Only use --copy-env or $TARGET_DIR/.env for local debugging +6. Run: python3 $TARGET_DIR/scripts/mysearch_openclaw.py health EOF diff --git a/proxy/.dockerignore b/proxy/.dockerignore index 686f68c..656e73d 100644 --- a/proxy/.dockerignore +++ b/proxy/.dockerignore @@ -1,7 +1,16 @@ __pycache__/ *.pyc *.pyo +*.pyd +*.db *.sqlite *.sqlite3 +.DS_Store data/ .env +.env.* +.venv/ +venv/ +README.md +README_EN.md +docker-compose.yml diff --git a/proxy/Dockerfile b/proxy/Dockerfile index f5e149a..bb34cff 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -1,4 +1,5 @@ FROM python:3.11-slim +ENV MYSEARCH_PROXY_DB_PATH=/data/proxy.db WORKDIR /app COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt diff --git a/proxy/README.md b/proxy/README.md index f94451e..f04898d 100644 --- a/proxy/README.md +++ b/proxy/README.md @@ -153,7 +153,7 @@ docker run -d \ --restart unless-stopped \ -p 9874:9874 \ -e ADMIN_PASSWORD=change-me \ - -v $(pwd)/mysearch-proxy-data:/app/data \ + -v $(pwd)/mysearch-proxy-data:/data \ skernelx/mysearch-proxy:latest ``` @@ -170,7 +170,39 @@ cd proxy docker compose up -d ``` -### 方式 C:本地源码运行 +### 方式 C:仓库根目录一套部署 `proxy + mysearch` + +```bash +cd /path/to/MySearch-Proxy +docker compose up -d +``` + +这套 compose 现在会自动通过 `MYSEARCH_PROXY_BOOTSTRAP_TOKEN` 给 `mysearch` 创建或复用一个专用的 `mysp-` token,不需要再手动先创建 MySearch 通用 token 才能拉起远程 MCP。首次进入控制台后,仍然只需要补 provider 配置和 usage sync。 + +启动后: + +- 控制台:`http://localhost:9874` +- MySearch MCP:`http://localhost:8000/mcp` + +### 方式 D:单容器一体化镜像 + +```bash +docker run -d \ + --name mysearch-stack \ + --restart unless-stopped \ + -p 9874:9874 \ + -p 8000:8000 \ + -e ADMIN_PASSWORD=change-me \ + -e MYSEARCH_PROXY_BOOTSTRAP_TOKEN=change-me-bootstrap-token \ + -v $(pwd)/mysearch-proxy-data:/data \ + skernelx/mysearch-stack:latest +``` + +这个镜像会在同一个容器里同时启动 `proxy` 和 `mysearch`,并通过本地回环地址自动完成 token bootstrap。适合你更看重“部署步骤最少”而不是“服务边界最清晰”的场景。 + +默认情况下,`proxy` 会对外监听 `9874`,`mysearch` 会对外监听 `8000/mcp`;`mysearch` 自己仍然通过容器内 `127.0.0.1:9874` 回连 Proxy。 + +### 方式 E:本地源码运行 ```bash cd proxy @@ -191,6 +223,8 @@ ADMIN_PASSWORD=change-me uvicorn server:app --host 0.0.0.0 --port 9874 当前控制台已经带密码登录,不再适合匿名裸放在公网。 +持久化目录现在统一建议挂到 `/data`。无论你跑独立 `mysearch-proxy` 还是单容器 `mysearch-stack`,都保持 `-v ...:/data`,不要再混用 `/app/data` 和 `/app/proxy/data`,否则升级重建容器时会像“数据丢失”,实际只是读到了另一份空 SQLite。 + ## 下游怎么接 ### 给 `mysearch/` MCP @@ -242,9 +276,9 @@ MYSEARCH_PROXY_API_KEY=mysp-... 默认数据目录: - Docker compose - - `./data` + - 宿主目录 `./data` 挂到容器内 `/data` - `docker run` 示例 - - `$(pwd)/mysearch-proxy-data` + - 宿主目录 `$(pwd)/mysearch-proxy-data` 挂到容器内 `/data` ## 认证与安全 diff --git a/proxy/README_EN.md b/proxy/README_EN.md index cb458a5..fe8d7dd 100644 --- a/proxy/README_EN.md +++ b/proxy/README_EN.md @@ -208,7 +208,7 @@ docker run -d \ --restart unless-stopped \ -p 9874:9874 \ -e ADMIN_PASSWORD=your-admin-password \ - -v $(pwd)/mysearch-proxy-data:/app/data \ + -v $(pwd)/mysearch-proxy-data:/data \ your-registry/mysearch-proxy:latest ``` @@ -248,11 +248,11 @@ docker run -d \ --restart unless-stopped \ -p 9874:9874 \ -e ADMIN_PASSWORD=your-admin-password \ - -v /your/data/path:/app/data \ + -v /your/data/path:/data \ your-registry/mysearch-proxy:latest ``` -As long as the `/app/data` volume is preserved, your existing: +As long as the `/data` volume is preserved, your existing: - keys - tokens @@ -261,6 +261,8 @@ As long as the `/app/data` volume is preserved, your existing: remain available. +Use `/data` consistently for both the standalone `mysearch-proxy` container and the single-container `mysearch-stack`. Do not mix `/app/data` and `/app/proxy/data`, or an upgrade/recreate will appear to "lose" data while actually switching to a different empty SQLite file. + ## Configuration Baseline console config: diff --git a/proxy/database.py b/proxy/database.py index d6f6258..ff7dec8 100644 --- a/proxy/database.py +++ b/proxy/database.py @@ -8,7 +8,7 @@ import string from datetime import datetime, timezone -DB_PATH = os.path.join(os.path.dirname(__file__), "data", "proxy.db") +DEFAULT_DB_PATH = os.path.join(os.path.dirname(__file__), "data", "proxy.db") SUPPORTED_SERVICES = ("tavily", "firecrawl", "exa") TOKEN_SERVICES = SUPPORTED_SERVICES + ("mysearch",) TOKEN_PREFIX = { @@ -50,9 +50,15 @@ def normalize_token_service(service): return service +def get_db_path(): + configured = (os.environ.get("MYSEARCH_PROXY_DB_PATH") or "").strip() + return configured or DEFAULT_DB_PATH + + def get_conn(): - os.makedirs(os.path.dirname(DB_PATH), exist_ok=True) - conn = sqlite3.connect(DB_PATH) + db_path = get_db_path() + os.makedirs(os.path.dirname(db_path), exist_ok=True) + conn = sqlite3.connect(db_path) conn.row_factory = sqlite3.Row conn.execute("PRAGMA journal_mode=WAL") return conn @@ -374,6 +380,18 @@ def get_token_by_value(token_value): conn.close() +def get_token_by_name(name, service="tavily"): + service = normalize_token_service(service) + conn = get_conn() + try: + return conn.execute( + "SELECT * FROM tokens WHERE service = ? AND name = ? ORDER BY id LIMIT 1", + (service, name), + ).fetchone() + finally: + conn.close() + + def delete_token(token_id): conn = get_conn() try: diff --git a/proxy/docker-compose.yml b/proxy/docker-compose.yml index 2e0daa9..6242f66 100644 --- a/proxy/docker-compose.yml +++ b/proxy/docker-compose.yml @@ -5,6 +5,7 @@ services: - "9874:9874" environment: - ADMIN_PASSWORD=change-me + - MYSEARCH_PROXY_DB_PATH=/data/proxy.db volumes: - - ./data:/app/data + - ./data:/data restart: unless-stopped diff --git a/proxy/server.py b/proxy/server.py index 556df97..1356165 100644 --- a/proxy/server.py +++ b/proxy/server.py @@ -8,12 +8,14 @@ import os import re import time +from contextlib import asynccontextmanager from datetime import datetime, timezone -from urllib.parse import urlparse +from urllib.parse import urlparse, urlunparse import httpx from fastapi import Depends, FastAPI, HTTPException, Request from fastapi.responses import HTMLResponse, JSONResponse, Response +from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates import database as db @@ -22,7 +24,10 @@ ADMIN_PASSWORD = os.environ.get("ADMIN_PASSWORD", "admin") ADMIN_SESSION_COOKIE = os.environ.get("ADMIN_SESSION_COOKIE", "mysearch_proxy_session") ADMIN_SESSION_MAX_AGE = max(300, int(os.environ.get("ADMIN_SESSION_MAX_AGE", "2592000"))) +MYSEARCH_PROXY_BOOTSTRAP_TOKEN = os.environ.get("MYSEARCH_PROXY_BOOTSTRAP_TOKEN", "").strip() TAVILY_API_BASE = "https://api.tavily.com" +TAVILY_SEARCH_PATH = "/search" +TAVILY_EXTRACT_PATH = "/extract" FIRECRAWL_API_BASE = "https://api.firecrawl.dev" EXA_API_BASE = "https://api.exa.ai" @@ -40,6 +45,140 @@ def _derive_social_gateway_admin_base_url(upstream_base_url): return upstream_base_url +def _is_hikari_access_token(value): + return str(value or "").strip().startswith("th-") + + +def _build_tavily_upstream_url(base_url, path, api_key=""): + normalized_base = str(base_url or TAVILY_API_BASE).strip().rstrip("/") or TAVILY_API_BASE + normalized_path = _normalize_path(path, TAVILY_SEARCH_PATH) + parsed = urlparse(normalized_base) + base_path = parsed.path.rstrip("/") + + if normalized_path == "/api/tavily" or normalized_path.startswith("/api/tavily/"): + return f"{normalized_base}{normalized_path}" + + if _is_hikari_access_token(api_key) and not base_path.endswith("/api/tavily"): + return f"{normalized_base}/api/tavily{normalized_path}" + + return f"{normalized_base}{normalized_path}" + + +def _build_tavily_hikari_gateway_url(base_url, path): + normalized_base = str(base_url or TAVILY_API_BASE).strip().rstrip("/") or TAVILY_API_BASE + normalized_path = _normalize_path(path, TAVILY_SEARCH_PATH) + parsed = urlparse(normalized_base) + base_path = parsed.path.rstrip("/") + if base_path.endswith("/api/tavily"): + return f"{normalized_base}{normalized_path}" + return f"{normalized_base}/api/tavily{normalized_path}" + + +def _should_retry_tavily_hikari_compat(response, current_url, effective_mode): + if effective_mode != "upstream": + return False + if getattr(response, "status_code", None) != 404: + return False + current_path = urlparse(str(current_url or "")).path + return "/api/tavily/" not in current_path and not current_path.endswith("/api/tavily") + + +async def _post_tavily_with_gateway_fallback(*, base_url, path, api_key, payload, effective_mode): + request_target = _build_tavily_upstream_url(base_url, path, api_key) + response = await http_client.post(request_target, json=payload) + if _should_retry_tavily_hikari_compat(response, request_target, effective_mode): + retry_target = _build_tavily_hikari_gateway_url(base_url, path) + retry_response = await http_client.post(retry_target, json=payload) + return retry_response, retry_target, True + return response, request_target, False + + +def _build_tavily_hikari_public_url(base_url, target_path): + normalized_base = str(base_url or "").strip().rstrip("/") + if not normalized_base: + return "" + parsed = urlparse(normalized_base) + base_path = parsed.path.rstrip("/") + if base_path.endswith("/api/tavily"): + next_path = f"{base_path[:-len('/api/tavily')]}{target_path}" or target_path + elif not base_path: + next_path = target_path + else: + next_path = f"{base_path}{target_path}" + return urlunparse((parsed.scheme, parsed.netloc, next_path, "", "", "")) + + +def _looks_like_tavily_hikari_gateway(config): + base_url = str((config or {}).get("upstream_base_url") or "").strip() + api_key = str((config or {}).get("upstream_api_key") or "").strip() + base_path = urlparse(base_url).path.rstrip("/") + return _is_hikari_access_token(api_key) or base_path.endswith("/api/tavily") + + +async def fetch_tavily_upstream_summary(config): + request_target = _build_tavily_hikari_public_url(config.get("upstream_base_url"), "/api/summary") + payload = { + "available": False, + "detail": "", + "request_target": request_target, + "summary_source": "unavailable", + "total_keys": 0, + "active_keys": 0, + "exhausted_keys": 0, + "quarantined_keys": 0, + "total_requests": 0, + "success_count": 0, + "error_count": 0, + "quota_exhausted_count": 0, + "total_quota_limit": 0, + "total_quota_remaining": 0, + "last_activity": None, + } + if not _looks_like_tavily_hikari_gateway(config): + payload["detail"] = "当前上游未显式暴露 Tavily Hikari 公共摘要接口。" + return payload + if not request_target: + payload["detail"] = "当前上游地址为空,无法读取上游摘要。" + return payload + try: + response = await http_client.get(request_target) + data = response.json() if response.headers.get("content-type", "").startswith("application/json") else {} + if response.status_code >= 400: + detail = "" + if isinstance(data, dict): + detail = data.get("detail") or data.get("message") or "" + payload["detail"] = detail or response.text.strip()[:200] or f"HTTP {response.status_code}" + return payload + if not isinstance(data, dict): + payload["detail"] = "上游摘要返回了非 JSON 响应。" + return payload + active_keys = int(data.get("active_keys") or 0) + exhausted_keys = int(data.get("exhausted_keys") or 0) + quarantined_keys = int(data.get("quarantined_keys") or 0) + payload.update( + { + "available": True, + "detail": "已读取上游 Tavily Hikari 公共摘要。", + "summary_source": "hikari_public_summary", + "total_keys": active_keys + exhausted_keys + quarantined_keys, + "active_keys": active_keys, + "exhausted_keys": exhausted_keys, + "quarantined_keys": quarantined_keys, + "total_requests": int(data.get("total_requests") or 0), + "success_count": int(data.get("success_count") or 0), + "error_count": int(data.get("error_count") or 0), + "quota_exhausted_count": int(data.get("quota_exhausted_count") or 0), + "total_quota_limit": int(data.get("total_quota_limit") or 0), + "total_quota_remaining": int(data.get("total_quota_remaining") or 0), + "last_activity": data.get("last_activity"), + } + ) + return payload + except Exception as exc: + payload["detail"] = str(exc) + return payload + + SOCIAL_GATEWAY_UPSTREAM_BASE_URL = os.environ.get( "SOCIAL_GATEWAY_UPSTREAM_BASE_URL", "https://api.x.ai/v1", @@ -107,16 +246,47 @@ def _derive_social_gateway_admin_base_url(upstream_base_url): "mysearch": "MySearch", } -app = FastAPI(title="MySearch Proxy") +@asynccontextmanager +async def lifespan(_: FastAPI): + db.init_db() + try: + yield + finally: + await http_client.aclose() + + +app = FastAPI(title="MySearch Proxy", lifespan=lifespan) +app.mount("/static", StaticFiles(directory=os.path.join(os.path.dirname(__file__), "static")), name="static") templates = Jinja2Templates(directory=os.path.join(os.path.dirname(__file__), "templates")) http_client = httpx.AsyncClient(timeout=60) social_gateway_state_cache = {"expires_at": 0.0, "value": None} -social_gateway_state_lock = asyncio.Lock() +social_gateway_state_lock = None stats_payload_cache = {"expires_at": 0.0, "value": None} -stats_payload_lock = asyncio.Lock() +stats_payload_lock = None background_sync_tasks = {} background_sync_last_started = {} -background_sync_lock = asyncio.Lock() +background_sync_lock = None + + +def get_social_gateway_state_lock(): + global social_gateway_state_lock + if social_gateway_state_lock is None: + social_gateway_state_lock = asyncio.Lock() + return social_gateway_state_lock + + +def get_stats_payload_lock(): + global stats_payload_lock + if stats_payload_lock is None: + stats_payload_lock = asyncio.Lock() + return stats_payload_lock + + +def get_background_sync_lock(): + global background_sync_lock + if background_sync_lock is None: + background_sync_lock = asyncio.Lock() + return background_sync_lock def get_admin_password(): @@ -266,6 +436,268 @@ def get_runtime_social_config(): } +def get_runtime_tavily_config(): + mode = get_setting_text("tavily_mode", "auto").lower() + if mode not in {"auto", "pool", "upstream"}: + mode = "auto" + + upstream_base_url = ( + get_setting_text("tavily_upstream_base_url", TAVILY_API_BASE).rstrip("/") + or TAVILY_API_BASE + ) + return { + "mode": mode, + "upstream_base_url": upstream_base_url, + "upstream_search_path": _normalize_path( + get_setting_text("tavily_upstream_search_path", TAVILY_SEARCH_PATH), + TAVILY_SEARCH_PATH, + ), + "upstream_extract_path": _normalize_path( + get_setting_text("tavily_upstream_extract_path", TAVILY_EXTRACT_PATH), + TAVILY_EXTRACT_PATH, + ), + "upstream_api_key": get_setting_text("tavily_upstream_api_key", ""), + } + + +def resolve_tavily_runtime_mode(config, active_keys): + configured_mode = (config.get("mode") or "auto").lower() + active_key_count = len(active_keys or []) + if configured_mode == "upstream": + return {"configured_mode": configured_mode, "effective_mode": "upstream", "mode_source": "manual_upstream"} + if configured_mode == "pool": + return {"configured_mode": configured_mode, "effective_mode": "pool", "mode_source": "manual_pool"} + if config.get("upstream_api_key"): + return {"configured_mode": configured_mode, "effective_mode": "upstream", "mode_source": "auto_upstream"} + if active_key_count > 0: + return {"configured_mode": configured_mode, "effective_mode": "pool", "mode_source": "auto_pool"} + return {"configured_mode": configured_mode, "effective_mode": "pool", "mode_source": "auto_pending"} + + +def build_tavily_routing_meta(config, active_keys): + resolved = resolve_tavily_runtime_mode(config, active_keys) + using_upstream = resolved["effective_mode"] == "upstream" + if resolved["mode_source"] == "auto_upstream": + summary = "当前 Tavily 处于自动识别模式;检测到上游凭证后,已自动切到上游 Gateway。" + elif resolved["mode_source"] == "auto_pool": + summary = "当前 Tavily 处于自动识别模式;检测到本地可用 Key 后,已自动切到 API Key 池。" + elif resolved["mode_source"] == "auto_pending": + summary = "当前 Tavily 处于自动识别模式;暂时没有检测到上游凭证或本地可用 Key。" + elif using_upstream: + summary = "当前 Tavily 手动固定走上游 Gateway;切回 API Key 池模式后才会重新使用这里导入的 Tavily keys。" + else: + summary = "当前 Tavily 手动固定走 API Key 池,请求会从已导入的 Tavily keys 中轮询。" + return { + "mode": config["mode"], + "effective_mode": resolved["effective_mode"], + "mode_source": resolved["mode_source"], + "upstream_base_url": config["upstream_base_url"], + "upstream_search_path": config["upstream_search_path"], + "upstream_extract_path": config["upstream_extract_path"], + "upstream_api_key_configured": bool(config["upstream_api_key"]), + "local_key_count": len(active_keys), + "summary": summary, + } + + +def build_candidate_tavily_config(body): + config = get_runtime_tavily_config() + if not isinstance(body, dict): + return config + + if "mode" in body: + mode = str(body.get("mode") or "").strip().lower() or "auto" + if mode not in {"auto", "pool", "upstream"}: + raise HTTPException(status_code=400, detail="mode must be 'auto', 'pool' or 'upstream'") + config["mode"] = mode + + if "upstream_base_url" in body: + config["upstream_base_url"] = str(body.get("upstream_base_url") or "").strip().rstrip("/") or TAVILY_API_BASE + if "upstream_search_path" in body: + config["upstream_search_path"] = _normalize_path(body.get("upstream_search_path"), TAVILY_SEARCH_PATH) + if "upstream_extract_path" in body: + config["upstream_extract_path"] = _normalize_path(body.get("upstream_extract_path"), TAVILY_EXTRACT_PATH) + if body.get("clear_upstream_api_key"): + config["upstream_api_key"] = "" + elif "upstream_api_key" in body: + config["upstream_api_key"] = str(body.get("upstream_api_key") or "").strip() + return config + + +def build_candidate_social_config(body): + config = dict(get_runtime_social_config()) + if not isinstance(body, dict): + return config + + text_fields = { + "upstream_base_url": "upstream_base_url", + "admin_base_url": "admin_base_url", + "model": "model", + "fallback_model": "fallback_model", + } + path_fields = { + "upstream_responses_path": ("upstream_responses_path", SOCIAL_GATEWAY_UPSTREAM_RESPONSES_PATH), + "admin_verify_path": ("admin_verify_path", SOCIAL_GATEWAY_ADMIN_VERIFY_PATH), + "admin_config_path": ("admin_config_path", SOCIAL_GATEWAY_ADMIN_CONFIG_PATH), + "admin_tokens_path": ("admin_tokens_path", SOCIAL_GATEWAY_ADMIN_TOKENS_PATH), + } + secret_fields = { + "admin_app_key": "admin_app_key", + "upstream_api_key": "upstream_api_key", + "gateway_token": "gateway_token", + } + + for body_key, config_key in text_fields.items(): + if body_key not in body: + continue + value = str(body.get(body_key) or "").strip() + if body_key.endswith("base_url"): + value = value.rstrip("/") + config[config_key] = value + + for body_key, (config_key, default_value) in path_fields.items(): + if body_key not in body: + continue + config[config_key] = _normalize_path(body.get(body_key), default_value) + + for body_key, config_key in secret_fields.items(): + if body.get(f"clear_{body_key}"): + config[config_key] = "" + elif body_key in body: + config[config_key] = str(body.get(body_key) or "").strip() + + if "cache_ttl_seconds" in body: + try: + config["cache_ttl_seconds"] = max(5, int(body.get("cache_ttl_seconds") or SOCIAL_GATEWAY_CACHE_TTL_SECONDS)) + except (TypeError, ValueError): + raise HTTPException(status_code=400, detail="cache_ttl_seconds must be an integer") + + if "fallback_min_results" in body: + try: + config["fallback_min_results"] = max( + 1, + int(body.get("fallback_min_results") or SOCIAL_GATEWAY_FALLBACK_MIN_RESULTS), + ) + except (TypeError, ValueError): + raise HTTPException(status_code=400, detail="fallback_min_results must be an integer") + + if not config["upstream_base_url"]: + config["upstream_base_url"] = SOCIAL_GATEWAY_UPSTREAM_BASE_URL + if not config["admin_base_url"]: + config["admin_base_url"] = _derive_social_gateway_admin_base_url(config["upstream_base_url"]) + if not config["model"]: + config["model"] = SOCIAL_GATEWAY_MODEL + if not config["fallback_model"]: + config["fallback_model"] = SOCIAL_GATEWAY_FALLBACK_MODEL + return config + + +async def probe_tavily_connection(config, active_keys): + resolved = resolve_tavily_runtime_mode(config, active_keys) + upstream_base_url = config["upstream_base_url"] if resolved["effective_mode"] == "upstream" else TAVILY_API_BASE + upstream_path = config["upstream_search_path"] if resolved["effective_mode"] == "upstream" else TAVILY_SEARCH_PATH + auth_source = "上游 Gateway token" if resolved["effective_mode"] == "upstream" else "本地 API Key 池" + + key_value = "" + if resolved["effective_mode"] == "upstream": + key_value = config["upstream_api_key"] + elif active_keys: + key_value = active_keys[0]["key"] + + test_url = _build_tavily_upstream_url(upstream_base_url, upstream_path, key_value) + + if not key_value: + return { + "ok": False, + "configured_mode": resolved["configured_mode"], + "effective_mode": resolved["effective_mode"], + "mode_source": resolved["mode_source"], + "local_key_count": len(active_keys), + "summary": build_tavily_routing_meta(config, active_keys)["summary"], + "detail": "当前没有可用的上游 key 或本地 API Key,无法执行 live probe。", + "failure_reason": "当前没有可用的上游 key 或本地 API Key。", + "probe_url": test_url, + "request_target": test_url, + "auth_source": auth_source if resolved["effective_mode"] == "upstream" else f"{auth_source}(活跃 {len(active_keys)})", + "status_label": "未执行 live probe", + "recommendation": "配置上游 token,或者导入至少一个活跃的 Tavily API Key 后再测试。", + "status_code": None, + } + + request_body = { + "query": "healthcheck", + "search_depth": "basic", + "max_results": 1, + "api_key": key_value, + } + try: + response, request_target, fallback_used = await _post_tavily_with_gateway_fallback( + base_url=upstream_base_url, + path=upstream_path, + api_key=key_value, + payload=request_body, + effective_mode=resolved["effective_mode"], + ) + except Exception as exc: + return { + "ok": False, + "configured_mode": resolved["configured_mode"], + "effective_mode": resolved["effective_mode"], + "mode_source": resolved["mode_source"], + "local_key_count": len(active_keys), + "summary": build_tavily_routing_meta(config, active_keys)["summary"], + "detail": str(exc), + "failure_reason": str(exc), + "probe_url": test_url, + "request_target": test_url, + "auth_source": auth_source if resolved["effective_mode"] == "upstream" else f"{auth_source}(活跃 {len(active_keys)})", + "status_label": "请求未发出", + "recommendation": "检查上游 Base URL / Search Path 是否可达,或者确认本地网络能访问 Tavily 官方接口。", + "status_code": None, + } + + detail = "" + try: + payload = response.json() + if isinstance(payload, dict): + detail = ( + payload.get("detail") + or payload.get("message") + or f"返回 {len(payload.get('results') or [])} 条结果" + ) + except Exception: + detail = response.text.strip()[:200] + + return { + "ok": response.status_code < 400, + "configured_mode": resolved["configured_mode"], + "effective_mode": resolved["effective_mode"], + "mode_source": resolved["mode_source"], + "local_key_count": len(active_keys), + "summary": build_tavily_routing_meta(config, active_keys)["summary"], + "detail": ( + f"{detail or f'HTTP {response.status_code}'}(已自动回退到 /api/tavily 兼容路径)" + if fallback_used and response.status_code < 400 + else detail or f"HTTP {response.status_code}" + ), + "failure_reason": "" if response.status_code < 400 else (detail or f"HTTP {response.status_code}"), + "probe_url": test_url, + "request_target": request_target, + "auth_source": auth_source if resolved["effective_mode"] == "upstream" else f"{auth_source}(活跃 {len(active_keys)})", + "status_label": f"HTTP {response.status_code}", + "recommendation": ( + "当前链路可用;如果你想固定行为,可以继续保持当前模式。" + if response.status_code < 400 + else ( + "检查上游 token、Base URL 与 Search Path;如果上游是 Tavily Hikari,Base URL 建议填写到主机根或 /api/tavily,系统会自动兼容。" + if resolved["effective_mode"] == "upstream" + else "检查本地 Tavily API Key 是否仍然有效,必要时切到上游 Gateway。" + ) + ), + "status_code": response.status_code, + } + + # ═══ Auth helpers ═══ def verify_admin(request: Request): @@ -277,6 +709,21 @@ def verify_admin(request: Request): raise HTTPException(status_code=401, detail="Unauthorized") +def verify_mysearch_bootstrap(request: Request): + expected = MYSEARCH_PROXY_BOOTSTRAP_TOKEN.strip() + if not expected: + raise HTTPException(status_code=404, detail="Bootstrap endpoint is disabled") + + provided = request.headers.get("X-Bootstrap-Token", "").strip() + auth = request.headers.get("Authorization", "").strip() + if auth.startswith("Bearer "): + provided = auth[7:].strip() or provided + + if provided and hmac.compare_digest(provided, expected): + return True + raise HTTPException(status_code=401, detail="Unauthorized") + + def extract_token(request: Request, body: dict = None): """从请求中提取代理 token。""" auth = request.headers.get("Authorization", "") @@ -341,12 +788,27 @@ def mask_secret(value): return f"{value[:6]}***{value[-4:]}" +def unwrap_social_tokens_payload(tokens_payload): + if isinstance(tokens_payload, dict): + for key_name in ("tokens", "data", "items", "result", "pools"): + candidate = tokens_payload.get(key_name) + if isinstance(candidate, dict): + return candidate + if isinstance(candidate, list): + return {"default": candidate} + return tokens_payload + if isinstance(tokens_payload, list): + return {"default": tokens_payload} + return {} + + def flatten_social_tokens(tokens_payload): flat = [] - if not isinstance(tokens_payload, dict): + normalized = unwrap_social_tokens_payload(tokens_payload) + if not isinstance(normalized, dict): return flat - for pool_name, items in tokens_payload.items(): + for pool_name, items in normalized.items(): if not isinstance(items, list): continue for item in items: @@ -441,6 +903,33 @@ def build_social_token_source(state): return "not_configured" +def build_social_upstream_visibility(state): + upstream_api_key_count = len(state.get("upstream_api_keys") or []) + accepted_token_count = len(state.get("accepted_tokens") or []) + can_proxy_search = bool(state.get("resolved_upstream_api_key") and state.get("accepted_tokens")) + if state.get("admin_connected"): + level = "full" + detail = "已通过后台 admin 接口拉取配置与 token 池,可展示完整上游 token 统计。" + elif can_proxy_search: + level = "basic" + detail = "当前只拿到了基础接线信息,可确认上游 key 与客户端 token 数量,但还没有后台 token 详情。" + elif upstream_api_key_count or accepted_token_count: + level = "partial" + detail = "当前只解析到部分鉴权信息,尚不能稳定转发搜索。" + else: + level = "none" + detail = "当前还没有拿到可用于上游搜索的基础鉴权信息。" + return { + "level": level, + "detail": detail, + "can_proxy_search": can_proxy_search, + "upstream_api_key_count": upstream_api_key_count, + "accepted_token_count": accepted_token_count, + "admin_connected": bool(state.get("admin_connected")), + "token_source": state.get("token_source") or "not_configured", + } + + async def fetch_social_admin_json(config, path): if not config["admin_app_key"]: raise RuntimeError("Missing SOCIAL_GATEWAY_ADMIN_APP_KEY") @@ -462,72 +951,76 @@ async def fetch_social_admin_json(config, path): return payload if isinstance(payload, dict) else {} +async def resolve_social_gateway_state_for_config(config): + state = { + "upstream_base_url": config["upstream_base_url"], + "upstream_responses_path": config["upstream_responses_path"], + "admin_base_url": config["admin_base_url"], + "admin_verify_path": config["admin_verify_path"], + "admin_config_path": config["admin_config_path"], + "admin_tokens_path": config["admin_tokens_path"], + "admin_configured": bool(config["admin_base_url"] and config["admin_app_key"]), + "admin_connected": False, + "manual_upstream_key": bool(config["upstream_api_key"]), + "manual_gateway_token": bool(config["gateway_token"]), + "upstream_api_keys": parse_secret_values(config["upstream_api_key"]), + "accepted_tokens": parse_secret_values(config["gateway_token"]), + "admin_api_keys": [], + "resolved_upstream_api_key": "", + "default_client_token": "", + "token_source": "", + "mode": "manual", + "model": config["model"], + "fallback_model": config["fallback_model"], + "fallback_min_results": config["fallback_min_results"], + "cache_ttl_seconds": config["cache_ttl_seconds"], + "stats": build_empty_social_stats(), + "error": "", + } + + if state["admin_configured"]: + try: + admin_config, admin_tokens = await asyncio.gather( + fetch_social_admin_json(config, config["admin_config_path"]), + fetch_social_admin_json(config, config["admin_tokens_path"]), + ) + app_api_keys = parse_secret_values((admin_config.get("app") or {}).get("api_key")) + state["admin_connected"] = True + state["admin_api_keys"] = app_api_keys + if not state["upstream_api_keys"]: + state["upstream_api_keys"] = app_api_keys + if not state["accepted_tokens"]: + state["accepted_tokens"] = app_api_keys + state["stats"] = build_social_token_stats(admin_tokens) + except Exception as exc: + state["error"] = str(exc) + + if not state["accepted_tokens"] and state["upstream_api_keys"]: + state["accepted_tokens"] = list(state["upstream_api_keys"]) + + state["upstream_api_keys"] = unique_preserve_order(state["upstream_api_keys"]) + state["accepted_tokens"] = unique_preserve_order(state["accepted_tokens"]) + state["resolved_upstream_api_key"] = state["upstream_api_keys"][0] if state["upstream_api_keys"] else "" + state["default_client_token"] = state["accepted_tokens"][0] if state["accepted_tokens"] else "" + state["token_source"] = build_social_token_source(state) + state["mode"] = build_social_gateway_mode(state) + return state + + async def resolve_social_gateway_state(force=False): now = time.time() cached = social_gateway_state_cache.get("value") if not force and cached and social_gateway_state_cache.get("expires_at", 0) > now: return cached - async with social_gateway_state_lock: + async with get_social_gateway_state_lock(): now = time.time() cached = social_gateway_state_cache.get("value") if not force and cached and social_gateway_state_cache.get("expires_at", 0) > now: return cached config = get_runtime_social_config() - state = { - "upstream_base_url": config["upstream_base_url"], - "upstream_responses_path": config["upstream_responses_path"], - "admin_base_url": config["admin_base_url"], - "admin_verify_path": config["admin_verify_path"], - "admin_config_path": config["admin_config_path"], - "admin_tokens_path": config["admin_tokens_path"], - "admin_configured": bool(config["admin_base_url"] and config["admin_app_key"]), - "admin_connected": False, - "manual_upstream_key": bool(config["upstream_api_key"]), - "manual_gateway_token": bool(config["gateway_token"]), - "upstream_api_keys": parse_secret_values(config["upstream_api_key"]), - "accepted_tokens": parse_secret_values(config["gateway_token"]), - "admin_api_keys": [], - "resolved_upstream_api_key": "", - "default_client_token": "", - "token_source": "", - "mode": "manual", - "model": config["model"], - "fallback_model": config["fallback_model"], - "fallback_min_results": config["fallback_min_results"], - "cache_ttl_seconds": config["cache_ttl_seconds"], - "stats": build_empty_social_stats(), - "error": "", - } - - if state["admin_configured"]: - try: - admin_config, admin_tokens = await asyncio.gather( - fetch_social_admin_json(config, config["admin_config_path"]), - fetch_social_admin_json(config, config["admin_tokens_path"]), - ) - app_api_keys = parse_secret_values((admin_config.get("app") or {}).get("api_key")) - state["admin_connected"] = True - state["admin_api_keys"] = app_api_keys - if not state["upstream_api_keys"]: - state["upstream_api_keys"] = app_api_keys - if not state["accepted_tokens"]: - state["accepted_tokens"] = app_api_keys - state["stats"] = build_social_token_stats(admin_tokens) - except Exception as exc: - state["error"] = str(exc) - - if not state["accepted_tokens"] and state["upstream_api_keys"]: - state["accepted_tokens"] = list(state["upstream_api_keys"]) - - state["upstream_api_keys"] = unique_preserve_order(state["upstream_api_keys"]) - state["accepted_tokens"] = unique_preserve_order(state["accepted_tokens"]) - state["resolved_upstream_api_key"] = state["upstream_api_keys"][0] if state["upstream_api_keys"] else "" - state["default_client_token"] = state["accepted_tokens"][0] if state["accepted_tokens"] else "" - state["token_source"] = build_social_token_source(state) - state["mode"] = build_social_gateway_mode(state) - + state = await resolve_social_gateway_state_for_config(config) social_gateway_state_cache["value"] = state social_gateway_state_cache["expires_at"] = now + state["cache_ttl_seconds"] return state @@ -751,6 +1244,19 @@ async def sync_usage_cache(force=False, key_id=None, service=None): else: rows = [dict(row) for row in db.get_all_keys(service)] + if rows and all((row.get("service") or "tavily") == "tavily" for row in rows): + tavily_config = get_runtime_tavily_config() + tavily_active_rows = [row for row in rows if row.get("active")] + if resolve_tavily_runtime_mode(tavily_config, tavily_active_rows)["effective_mode"] == "upstream": + return { + "requested": len(rows), + "synced": 0, + "skipped": len(rows), + "errors": 0, + "supported": False, + "detail": "当前走 Tavily 上游 Gateway,本地 API Key 池额度同步已停用", + } + if not rows: return {"requested": 0, "synced": 0, "skipped": 0, "errors": 0} @@ -776,6 +1282,17 @@ async def worker(row): def build_usage_sync_meta_for_dashboard(service, active_keys): + if service == "tavily" and resolve_tavily_runtime_mode(get_runtime_tavily_config(), active_keys)["effective_mode"] == "upstream": + return { + "supported": False, + "requested": len(active_keys), + "synced": 0, + "skipped": len(active_keys), + "errors": 0, + "stale_keys": 0, + "detail": "当前走 Tavily 上游 Gateway,本地 API Key 池额度同步已停用", + } + if service == "exa": return { "supported": False, @@ -806,6 +1323,8 @@ def build_usage_sync_meta_for_dashboard(service, active_keys): async def schedule_background_usage_sync(service, active_keys): if not DASHBOARD_BACKGROUND_SYNC_ON_STATS: return + if service == "tavily" and resolve_tavily_runtime_mode(get_runtime_tavily_config(), active_keys)["effective_mode"] == "upstream": + return if service == "exa": return if not active_keys: @@ -814,7 +1333,7 @@ async def schedule_background_usage_sync(service, active_keys): return now = time.monotonic() - async with background_sync_lock: + async with get_background_sync_lock(): running = background_sync_tasks.get(service) if running and not running.done(): return @@ -901,12 +1420,18 @@ async def build_service_dashboard(service, auto_sync=False): token["stats"] = db.get_usage_stats(token_id=token["id"], service=service) keys = mask_key_rows([dict(key) for key in db.get_all_keys(service)]) active_keys = [key for key in keys if key["active"]] + routing = None + if service == "tavily": + routing = build_tavily_routing_meta(get_runtime_tavily_config(), active_keys) + upstream_summary = None + if service == "tavily" and routing and routing["effective_mode"] == "upstream": + upstream_summary = await fetch_tavily_upstream_summary(get_runtime_tavily_config()) if auto_sync: sync_result = await sync_usage_cache(force=False, service=service) else: sync_result = build_usage_sync_meta_for_dashboard(service, active_keys) await schedule_background_usage_sync(service, active_keys) - return { + payload = { "service": service, "label": SERVICE_LABELS[service], "overview": overview, @@ -917,6 +1442,11 @@ async def build_service_dashboard(service, auto_sync=False): "real_quota": build_real_quota_summary(active_keys), "usage_sync": sync_result, } + if routing is not None: + payload["routing"] = routing + if upstream_summary is not None: + payload["upstream_summary"] = upstream_summary + return payload async def build_mysearch_dashboard(): @@ -941,6 +1471,16 @@ async def build_mysearch_dashboard(): } +def issue_mysearch_bootstrap_token(name: str) -> tuple[dict, bool]: + normalized_name = (name or "").strip() or "docker-mysearch" + existing = db.get_token_by_name(normalized_name, service="mysearch") + if existing: + return dict(existing), False + created = db.create_token(normalized_name, service="mysearch") + reset_stats_cache() + return dict(created), True + + async def build_social_dashboard(): state = await resolve_social_gateway_state(force=False) return { @@ -963,14 +1503,29 @@ async def build_social_dashboard(): "client_token": state["default_client_token"], "client_token_masked": mask_secret(state["default_client_token"]), "stats": state["stats"], + "upstream_visibility": build_social_upstream_visibility(state), "error": state["error"], } async def build_settings_payload(): + tavily = get_runtime_tavily_config() + tavily_active_keys = [dict(row) for row in db.get_all_keys("tavily") if row["active"]] + tavily_resolved = resolve_tavily_runtime_mode(tavily, tavily_active_keys) config = get_runtime_social_config() state = await resolve_social_gateway_state(force=False) return { + "tavily": { + "mode": tavily["mode"], + "effective_mode": tavily_resolved["effective_mode"], + "mode_source": tavily_resolved["mode_source"], + "upstream_base_url": tavily["upstream_base_url"], + "upstream_search_path": tavily["upstream_search_path"], + "upstream_extract_path": tavily["upstream_extract_path"], + "upstream_api_key_configured": bool(tavily["upstream_api_key"]), + "upstream_api_key_masked": mask_secret(tavily["upstream_api_key"]), + "local_key_count": len(tavily_active_keys), + }, "social": { "upstream_base_url": config["upstream_base_url"], "upstream_responses_path": config["upstream_responses_path"], @@ -1413,10 +1968,16 @@ def has_social_fallback(primary_model, fallback_model): return bool(primary and fallback and fallback != primary) -def should_retry_social_with_fallback(primary_model, fallback_model, response, min_results): +def effective_social_fallback_threshold(min_results, max_results): + configured = max(1, int(min_results or 1)) + requested = max(1, int(max_results or 1)) + return min(configured, requested) + + +def should_retry_social_with_fallback(primary_model, fallback_model, response, min_results, max_results): if not has_social_fallback(primary_model, fallback_model): return False, "" - threshold = max(1, int(min_results or 1)) + threshold = effective_social_fallback_threshold(min_results, max_results) if count_social_results(response) >= threshold: return False, "" return True, "result_count_below_threshold" @@ -1448,6 +2009,7 @@ def build_social_route_metadata( fallback_model, fallback_reason, fallback_min_results, + requested_max_results, ): primary_model = attempts[0]["model"] if attempts else "" selected_model = (selected_attempt or {}).get("model") or primary_model @@ -1477,7 +2039,10 @@ def build_social_route_metadata( "triggered": fallback_attempted, "used": bool(fallback_attempted and selected_model == fallback_target), "reason": fallback_reason or "", - "threshold": max(1, int(fallback_min_results or 1)), + "threshold": effective_social_fallback_threshold( + fallback_min_results, + requested_max_results, + ), "from": primary_model, "to": fallback_target, "selected_model": selected_model, @@ -1493,6 +2058,7 @@ def attach_social_route_metadata( fallback_model, fallback_reason, fallback_min_results, + requested_max_results, ): payload = dict(response or {}) tool_usage = dict(payload.get("tool_usage") or {}) @@ -1505,6 +2071,7 @@ def attach_social_route_metadata( fallback_model=fallback_model, fallback_reason=fallback_reason, fallback_min_results=fallback_min_results, + requested_max_results=requested_max_results, ) return payload @@ -1635,20 +2202,6 @@ def normalize_social_search_response(query, payload, max_results, *, model=None) }, "raw_text": text, } - - -# ═══ 启动 ═══ - -@app.on_event("startup") -def startup(): - db.init_db() - - -@app.on_event("shutdown") -async def shutdown(): - await http_client.aclose() - - # ═══ Tavily 代理端点 ═══ @app.post("/api/search") @@ -1660,23 +2213,69 @@ async def proxy_tavily(request: Request): token_value = extract_token(request, body) token_row = get_token_row_or_401(token_value, "tavily") - key_info = pool.get_next_key("tavily") - if not key_info: - raise HTTPException(status_code=503, detail="No available API keys") + config = get_runtime_tavily_config() + tavily_active_keys = [dict(row) for row in db.get_all_keys("tavily") if row["active"]] + tavily_resolved = resolve_tavily_runtime_mode(config, tavily_active_keys) + path_map = { + "search": config["upstream_search_path"], + "extract": config["upstream_extract_path"], + } + upstream_path = path_map.get(endpoint) + if not upstream_path: + raise HTTPException(status_code=400, detail=f"Unsupported Tavily endpoint: {endpoint}") + + upstream_base_url = TAVILY_API_BASE + upstream_key = "" + key_info = None + if tavily_resolved["effective_mode"] == "upstream": + upstream_base_url = config["upstream_base_url"] + upstream_key = config["upstream_api_key"] + if not upstream_key: + raise HTTPException(status_code=503, detail="Missing Tavily upstream API key") + else: + key_info = pool.get_next_key("tavily") + if not key_info: + raise HTTPException(status_code=503, detail="No available API keys") + upstream_key = key_info["key"] - body["api_key"] = key_info["key"] + body["api_key"] = upstream_key start = time.time() try: - resp = await http_client.post(f"{TAVILY_API_BASE}/{endpoint}", json=body) + resp, request_target, _fallback_used = await _post_tavily_with_gateway_fallback( + base_url=upstream_base_url, + path=upstream_path, + api_key=upstream_key, + payload=body, + effective_mode=tavily_resolved["effective_mode"], + ) latency = int((time.time() - start) * 1000) success = resp.status_code == 200 - pool.report_result("tavily", key_info["id"], success) - db.log_usage(token_row["id"], key_info["id"], endpoint, int(success), latency, service="tavily") - return JSONResponse(content=resp.json(), status_code=resp.status_code) + if key_info is not None: + pool.report_result("tavily", key_info["id"], success) + db.log_usage( + token_row["id"], + key_info["id"] if key_info is not None else None, + endpoint, + int(success), + latency, + service="tavily", + ) + try: + return JSONResponse(content=resp.json(), status_code=resp.status_code) + except Exception: + return Response(content=resp.text, status_code=resp.status_code, media_type=resp.headers.get("content-type")) except Exception as exc: latency = int((time.time() - start) * 1000) - pool.report_result("tavily", key_info["id"], False) - db.log_usage(token_row["id"], key_info["id"], endpoint, 0, latency, service="tavily") + if key_info is not None: + pool.report_result("tavily", key_info["id"], False) + db.log_usage( + token_row["id"], + key_info["id"] if key_info is not None else None, + endpoint, + 0, + latency, + service="tavily", + ) raise HTTPException(status_code=502, detail=str(exc)) @@ -1765,6 +2364,10 @@ async def proxy_exa_search(request: Request): @app.get("/social/health") async def social_health(): state = await resolve_social_gateway_state(force=False) + return _build_social_health_payload(state) + + +def _build_social_health_payload(state): return { "ok": bool(state["resolved_upstream_api_key"] and state["accepted_tokens"]), "mode": state["mode"], @@ -1785,6 +2388,14 @@ async def social_health(): } +@app.get("/health") +async def health(): + state = await resolve_social_gateway_state(force=False) + payload = _build_social_health_payload(state) + payload["service"] = "proxy" + return payload + + @app.post("/social/search") async def proxy_social_search(request: Request): body = await request.json() @@ -1807,11 +2418,12 @@ async def proxy_social_search(request: Request): max_results = max(1, min(int(body.get("max_results") or 5), 10)) attempts = [] + primary_model = str(body.get("model") or state["model"]).strip() or state["model"] primary_attempt = await execute_social_search_attempt( query, body, state, - state["model"], + primary_model, max_results, ) attempts.append(primary_attempt) @@ -1823,10 +2435,11 @@ async def proxy_social_search(request: Request): if primary_attempt.get("ok"): selected_attempt = primary_attempt should_retry, fallback_reason = should_retry_social_with_fallback( - state["model"], + primary_model, fallback_model, primary_attempt.get("response"), fallback_min_results, + max_results, ) if should_retry: fallback_attempt = await execute_social_search_attempt( @@ -1846,9 +2459,10 @@ async def proxy_social_search(request: Request): fallback_model=fallback_model, fallback_reason=fallback_reason, fallback_min_results=fallback_min_results, + requested_max_results=max_results, ) - if has_social_fallback(state["model"], fallback_model): + if has_social_fallback(primary_model, fallback_model): fallback_reason = "upstream_error" fallback_attempt = await execute_social_search_attempt( query, @@ -1866,6 +2480,7 @@ async def proxy_social_search(request: Request): fallback_model=fallback_model, fallback_reason=fallback_reason, fallback_min_results=fallback_min_results, + requested_max_results=max_results, ) detail = fallback_attempt.get("error") or primary_attempt.get("error") or "Social search failed" status_code = fallback_attempt.get("status_code") or primary_attempt.get("status_code") or 502 @@ -1891,6 +2506,18 @@ async def console(request: Request): ) +@app.get("/mysearch", response_class=HTMLResponse) +async def mysearch_console(request: Request): + return templates.TemplateResponse( + "mysearch.html", + { + "request": request, + "base_url": str(request.base_url).rstrip("/"), + "initial_authenticated": has_valid_admin_session(request), + }, + ) + + # ═══ 管理 API ═══ @app.get("/api/session") @@ -1926,7 +2553,7 @@ async def stats(request: Request, _=Depends(verify_admin)): if cached_value is not None and now < stats_payload_cache["expires_at"]: return cached_value - async with stats_payload_lock: + async with get_stats_payload_lock(): now = time.monotonic() cached_value = stats_payload_cache["value"] if cached_value is not None and now < stats_payload_cache["expires_at"]: @@ -1943,6 +2570,98 @@ async def get_settings(request: Request, _=Depends(verify_admin)): return await build_settings_payload() +@app.post("/api/settings/test/tavily") +async def test_tavily_settings(request: Request, _=Depends(verify_admin)): + body = await request.json() if request.headers.get("content-type", "").startswith("application/json") else {} + config = build_candidate_tavily_config(body) + active_keys = [dict(row) for row in db.get_all_keys("tavily") if row["active"]] + return await probe_tavily_connection(config, active_keys) + + +@app.post("/api/settings/test/social") +async def test_social_settings(request: Request, _=Depends(verify_admin)): + body = await request.json() if request.headers.get("content-type", "").startswith("application/json") else {} + config = build_candidate_social_config(body) + state = await resolve_social_gateway_state_for_config(config) + request_target = f"{state['upstream_base_url']}{state['upstream_responses_path']}" + detail = "" + if state["admin_connected"]: + detail = "后台已连通,并成功拉取配置与 token 池。" + elif state["resolved_upstream_api_key"]: + detail = "已检测到可用上游 key,但当前未通过后台接口补充更多 token 元数据。" + elif state["error"]: + detail = state["error"] + else: + detail = "当前没有解析到可用上游 key 或客户端 token。" + ok = bool(state["resolved_upstream_api_key"] and state["accepted_tokens"]) + if state["admin_connected"]: + auth_source = "grok2api 后台自动继承" + status_label = "后台已连通" + recommendation = "当前后台自动继承正常,可以直接下发 MySearch 通用 token。" + elif ok: + auth_source = state["token_source"] or "手动上游 key + 客户端 token" + status_label = "已解析到可用凭证" + recommendation = "当前已经能转发 Social / X 搜索;如果你需要更完整的 token 元数据,继续补 grok2api 后台即可。" + else: + auth_source = state["token_source"] or "未解析到可用鉴权" + status_label = "诊断失败" + recommendation = "优先检查 grok2api 后台地址与 app key;如果没有后台,再补手动上游 key 和客户端 token。" + return { + "ok": ok, + "mode": state["mode"], + "token_source": state["token_source"], + "admin_connected": state["admin_connected"], + "upstream_base_url": state["upstream_base_url"], + "upstream_responses_path": state["upstream_responses_path"], + "accepted_token_count": len(state["accepted_tokens"]), + "upstream_api_key_count": len(state["upstream_api_keys"]), + "detail": detail, + "request_target": request_target, + "auth_source": auth_source, + "status_label": status_label, + "failure_reason": "" if ok else (state["error"] or detail), + "recommendation": recommendation, + "error": state["error"], + } + + +@app.put("/api/settings/tavily") +async def update_tavily_settings(request: Request, _=Depends(verify_admin)): + body = await request.json() + if not isinstance(body, dict): + raise HTTPException(status_code=400, detail="Expected JSON request body") + + if "mode" in body: + mode = str(body.get("mode") or "").strip().lower() or "auto" + if mode not in {"auto", "pool", "upstream"}: + raise HTTPException(status_code=400, detail="mode must be 'auto', 'pool' or 'upstream'") + db.set_setting("tavily_mode", mode) + + text_fields = { + "upstream_base_url": "tavily_upstream_base_url", + "upstream_search_path": "tavily_upstream_search_path", + "upstream_extract_path": "tavily_upstream_extract_path", + } + for field, setting_key in text_fields.items(): + if field not in body: + continue + value = str(body.get(field) or "").strip() + db.set_setting(setting_key, value) + + if body.get("clear_upstream_api_key"): + db.set_setting("tavily_upstream_api_key", "") + elif "upstream_api_key" in body: + value = str(body.get("upstream_api_key") or "").strip() + if value: + db.set_setting("tavily_upstream_api_key", value) + + reset_stats_cache() + return { + "ok": True, + **(await build_settings_payload()), + } + + @app.put("/api/settings/social") async def update_social_settings(request: Request, _=Depends(verify_admin)): body = await request.json() @@ -2077,6 +2796,24 @@ async def list_tokens(request: Request, _=Depends(verify_admin)): return {"tokens": tokens} +@app.post("/api/internal/mysearch/token") +async def bootstrap_mysearch_token(request: Request, _=Depends(verify_mysearch_bootstrap)): + try: + body = await request.json() + if not isinstance(body, dict): + body = {} + except Exception: + body = {} + token, created = issue_mysearch_bootstrap_token(body.get("name", "")) + return { + "ok": True, + "created": created, + "token": token["token"], + "token_name": token["name"], + "service": token["service"], + } + + @app.post("/api/tokens") async def create_token(request: Request, _=Depends(verify_admin)): body = await request.json() diff --git a/proxy/static/css/console.css b/proxy/static/css/console.css new file mode 100644 index 0000000..bfb63bc --- /dev/null +++ b/proxy/static/css/console.css @@ -0,0 +1,3329 @@ +@import url('https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;500;600&family=Noto+Sans+SC:wght@400;500;600;700&family=Space+Grotesk:wght@500;700&display=swap'); + +:root { + --bg: #f3eee4; + --bg-soft: #ece6d8; + --surface: rgba(255, 251, 246, 0.92); + --surface-strong: rgba(255, 248, 239, 0.98); + --surface-muted: rgba(248, 242, 232, 0.86); + --surface-glass: rgba(255, 250, 244, 0.78); + --text: #17212d; + --text-soft: #425063; + --muted: #6f7a88; + --border: rgba(104, 120, 138, 0.18); + --border-strong: rgba(57, 72, 89, 0.28); + --shadow: 0 24px 48px rgba(28, 36, 44, 0.08); + --shadow-lg: 0 32px 80px rgba(21, 28, 34, 0.14); + --radius: 24px; + --radius-lg: 32px; + --radius-sm: 14px; + --primary: #16202b; + --primary-hover: #28384a; + --accent: #0f766e; + --accent-soft: rgba(15, 118, 110, 0.12); + --ok: #157347; + --warn: #b76a11; + --danger: #bb3b31; + --info: #1d4ed8; + --scrollbar-track: rgba(116, 128, 142, 0.12); + --scrollbar-thumb: rgba(37, 52, 67, 0.34); + --scrollbar-thumb-hover: rgba(22, 32, 43, 0.56); + --scrollbar-thumb-active: rgba(15, 118, 110, 0.54); + --scrollbar-outline: rgba(255, 251, 246, 0.92); + + --tavily: #0f766e; + --tavily-soft: rgba(15, 118, 110, 0.12); + --exa: #1d4ed8; + --exa-soft: rgba(29, 78, 216, 0.12); + --firecrawl: #c2410c; + --firecrawl-soft: rgba(194, 65, 12, 0.12); + --social: #c05d22; + --social-soft: rgba(192, 93, 34, 0.12); + --mysearch: #111827; + --mysearch-soft: rgba(17, 24, 39, 0.12); +} + +body.theme-dark { + --bg: #0b1016; + --bg-soft: #141b24; + --surface: rgba(16, 22, 31, 0.9); + --surface-strong: rgba(18, 25, 34, 0.98); + --surface-muted: rgba(24, 31, 42, 0.92); + --surface-glass: rgba(18, 25, 34, 0.78); + --text: #f8f6f1; + --text-soft: #d6d3cd; + --muted: #98a2af; + --border: rgba(130, 148, 169, 0.18); + --border-strong: rgba(183, 196, 211, 0.28); + --shadow: 0 24px 60px rgba(0, 0, 0, 0.3); + --shadow-lg: 0 36px 90px rgba(0, 0, 0, 0.45); + --primary: #f6f1e9; + --primary-hover: #ffffff; + --accent: #2dd4bf; + --accent-soft: rgba(45, 212, 191, 0.14); + --ok: #4ade80; + --warn: #fbbf24; + --danger: #f87171; + --info: #60a5fa; + --scrollbar-track: rgba(148, 163, 184, 0.12); + --scrollbar-thumb: rgba(214, 211, 205, 0.26); + --scrollbar-thumb-hover: rgba(248, 246, 241, 0.42); + --scrollbar-thumb-active: rgba(45, 212, 191, 0.48); + --scrollbar-outline: rgba(18, 25, 34, 0.94); + --tavily: #2dd4bf; + --exa: #60a5fa; + --firecrawl: #fb923c; + --social: #f59e0b; + --mysearch: #f6f1e9; + --tavily-soft: rgba(45, 212, 191, 0.12); + --exa-soft: rgba(96, 165, 250, 0.12); + --firecrawl-soft: rgba(251, 146, 60, 0.12); + --social-soft: rgba(245, 158, 11, 0.12); + --mysearch-soft: rgba(246, 241, 233, 0.12); +} + +* { + box-sizing: border-box; + margin: 0; + padding: 0; +} + +html { + scroll-behavior: smooth; + scrollbar-gutter: stable; +} + +* { + scrollbar-width: thin; + scrollbar-color: var(--scrollbar-thumb) transparent; +} + +*::-webkit-scrollbar { + width: 12px; + height: 12px; +} + +*::-webkit-scrollbar-track { + background: transparent; +} + +*::-webkit-scrollbar-thumb { + background: + linear-gradient(180deg, rgba(255, 255, 255, 0.22), var(--scrollbar-thumb)); + border-radius: 999px; + border: 3px solid var(--scrollbar-outline); + background-clip: padding-box; +} + +*::-webkit-scrollbar-thumb:hover { + background: + linear-gradient(180deg, rgba(255, 255, 255, 0.28), var(--scrollbar-thumb-hover)); +} + +*::-webkit-scrollbar-thumb:active { + background: + linear-gradient(180deg, rgba(255, 255, 255, 0.22), var(--scrollbar-thumb-active)); +} + +*::-webkit-scrollbar-corner { + background: transparent; +} + +body { + min-height: 100vh; + font-family: 'Noto Sans SC', 'PingFang SC', 'Microsoft YaHei', sans-serif; + color: var(--text); + background: + radial-gradient(circle at 10% 20%, rgba(29, 78, 216, 0.08), transparent 28%), + radial-gradient(circle at 88% 18%, rgba(15, 118, 110, 0.12), transparent 24%), + radial-gradient(circle at 50% 110%, rgba(194, 65, 12, 0.12), transparent 30%), + linear-gradient(180deg, #faf6ee 0%, var(--bg) 38%, #ebe3d5 100%); + background-attachment: fixed; + line-height: 1.5; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + transition: background-color 0.35s ease, color 0.35s ease; +} + +body.theme-dark { + background: + radial-gradient(circle at 10% 20%, rgba(96, 165, 250, 0.08), transparent 26%), + radial-gradient(circle at 88% 18%, rgba(45, 212, 191, 0.1), transparent 24%), + radial-gradient(circle at 50% 110%, rgba(251, 146, 60, 0.09), transparent 26%), + linear-gradient(180deg, #0a0f15 0%, var(--bg) 45%, #121923 100%); +} + +body.modal-open { + overflow: hidden; +} + +.hidden { + display: none !important; +} + +.mono { + font-family: 'IBM Plex Mono', monospace; +} + +#dashboard.is-entering .hero, +#dashboard.is-entering .summary-strip, +#dashboard.is-entering .dashboard-flow { + opacity: 0; + transform: translateY(14px); + animation: dashboard-stage-in 0.52s cubic-bezier(0.22, 1, 0.36, 1) forwards; +} + +#dashboard.is-entering .summary-strip { + animation-delay: 0.08s; +} + +#dashboard.is-entering .dashboard-flow { + animation-delay: 0.16s; +} + +@keyframes dashboard-stage-in { + to { + opacity: 1; + transform: translateY(0); + } +} + +.icon { + width: 20px; + height: 20px; + stroke: currentColor; + stroke-width: 2; + stroke-linecap: round; + stroke-linejoin: round; + fill: none; +} + +.w-3 { width: 14px; } +.h-3 { height: 14px; } +.w-4 { width: 16px; } +.h-4 { height: 16px; } +.mr-1 { margin-right: 6px; } +.inline-block { display: inline-block; vertical-align: text-bottom; } + +.container { + width: min(1440px, calc(100vw - 32px)); + margin: 0 auto; + padding: 28px 0 64px; +} + +.card { + position: relative; + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + box-shadow: var(--shadow); + backdrop-filter: blur(14px); + -webkit-backdrop-filter: blur(14px); +} + +.auth { + position: relative; + overflow: hidden; + max-width: 460px; + margin: 10vh auto; + padding: 42px 38px; + text-align: center; + border-radius: 30px; + background: + linear-gradient(160deg, rgba(255, 251, 246, 0.98), rgba(247, 239, 226, 0.92)); +} + +body.theme-dark .auth { + background: + linear-gradient(160deg, rgba(16, 22, 31, 0.96), rgba(18, 26, 36, 0.9)); +} + +.auth-orb { + position: absolute; + border-radius: 999px; + filter: blur(10px); + opacity: 0.72; + pointer-events: none; +} + +.auth-orb-a { + top: -42px; + right: -30px; + width: 128px; + height: 128px; + background: radial-gradient(circle, rgba(29, 78, 216, 0.18), rgba(29, 78, 216, 0)); +} + +.auth-orb-b { + left: -30px; + bottom: -40px; + width: 148px; + height: 148px; + background: radial-gradient(circle, rgba(15, 118, 110, 0.15), rgba(15, 118, 110, 0)); +} + +.auth-kicker, +.switcher-eyebrow, +.settings-kicker, +.credit-kicker, +.hero-brand-kicker, +.hero-focus-kicker, +.hero-lane-kicker, +.service-tool-kicker { + font-size: 11px; + font-weight: 700; + letter-spacing: 0.14em; + text-transform: uppercase; + color: var(--muted); +} + +.auth h2, +.switcher-head h2, +.service-head h2, +.settings-head h2 { + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + letter-spacing: -0.04em; +} + +.auth h2 { + font-size: 32px; + margin: 14px 0 10px; +} + +.auth-badges { + position: relative; + z-index: 1; + display: flex; + justify-content: center; + gap: 8px; + flex-wrap: wrap; + margin-top: 14px; +} + +.auth-badge { + display: inline-flex; + align-items: center; + padding: 6px 10px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface); + font-size: 11px; + font-weight: 700; + color: var(--text-soft); +} + +.auth p { + color: var(--text-soft); + font-size: 14px; + margin-bottom: 28px; + line-height: 1.7; +} + +.auth-meta { + display: grid; + grid-template-columns: repeat(3, minmax(0, 1fr)); + gap: 10px; + margin-top: 18px; +} + +.auth-meta-card { + padding: 14px; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface-glass); + text-align: left; +} + +.auth-meta-card .label { + display: block; + margin-bottom: 6px; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); +} + +.auth-meta-card strong { + display: block; + font-size: 13px; + line-height: 1.55; + color: var(--text); + overflow-wrap: anywhere; +} + +.login-error { + margin-top: 12px; + color: var(--danger); + font-size: 13px; +} + +.auth .stack { + display: flex; + flex-direction: column; + gap: 14px; +} + +input[type="text"], +input[type="password"], +select, +textarea { + width: 100%; + padding: 13px 16px; + border-radius: var(--radius-sm); + border: 1px solid var(--border); + background: var(--surface-strong); + color: var(--text); + font-family: inherit; + font-size: 14px; + transition: border-color 0.2s ease, box-shadow 0.2s ease, background-color 0.2s ease; +} + +.mode-switch { + display: inline-flex; + flex-wrap: wrap; + gap: 8px; +} + +.mode-runtime-strip { + margin-top: 10px; + display: inline-flex; + align-items: center; + gap: 8px; + min-height: 36px; + padding: 0 14px; + border-radius: 999px; + border: 1px solid rgba(29, 78, 216, 0.14); + background: linear-gradient(135deg, rgba(29, 78, 216, 0.08), rgba(15, 118, 110, 0.08)); + color: var(--text); + font-size: 12px; + font-weight: 700; + letter-spacing: 0.01em; +} + +.mode-switch-btn { + display: inline-flex; + align-items: center; + justify-content: center; + min-height: 38px; + padding: 0 14px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface); + color: var(--text-soft); + font-size: 13px; + font-weight: 700; + cursor: pointer; + transition: border-color 0.18s ease, background-color 0.18s ease, color 0.18s ease, transform 0.18s ease; +} + +.mode-switch-btn:hover { + transform: translateY(-1px); + border-color: var(--border-strong); + background: var(--surface-muted); +} + +.mode-switch-btn.is-active { + background: linear-gradient(135deg, rgba(29, 78, 216, 0.12), rgba(15, 118, 110, 0.12)); + border-color: rgba(29, 78, 216, 0.2); + color: var(--text); +} + +.settings-field.is-muted { + opacity: 0.72; +} + +.settings-field.is-muted label, +.settings-field.is-muted .hint { + color: var(--text-soft); +} + +.settings-field.is-emphasis { + opacity: 1; +} + +textarea { + min-height: 136px; + resize: vertical; +} + +input::placeholder, +textarea::placeholder { + color: var(--muted); +} + +input:focus, +select:focus, +textarea:focus { + outline: none; + border-color: rgba(29, 78, 216, 0.45); + box-shadow: 0 0 0 4px rgba(29, 78, 216, 0.12); +} + +.btn { + position: relative; + display: inline-flex; + align-items: center; + justify-content: center; + gap: 8px; + padding: 11px 18px; + border-radius: 999px; + border: 1px solid transparent; + cursor: pointer; + font-size: 14px; + font-weight: 600; + transition: transform 0.18s ease, border-color 0.18s ease, background-color 0.18s ease, color 0.18s ease, box-shadow 0.18s ease; +} + +.btn:hover { + transform: translateY(-1px); +} + +.btn:disabled { + cursor: default; + transform: none; +} + +.btn.is-busy { + pointer-events: none; +} + +.btn.is-busy::after { + content: ''; + width: 12px; + height: 12px; + border-radius: 999px; + border: 2px solid currentColor; + border-right-color: transparent; + animation: btn-spin 0.7s linear infinite; +} + +.btn.is-success { + border-color: rgba(15, 118, 110, 0.22); + background: linear-gradient(135deg, rgba(15, 118, 110, 0.16), rgba(15, 118, 110, 0.08)); + color: var(--text); +} + +.btn.is-error { + border-color: rgba(187, 59, 49, 0.22); + background: linear-gradient(135deg, rgba(187, 59, 49, 0.12), rgba(187, 59, 49, 0.06)); + color: var(--danger); +} + +@keyframes btn-spin { + to { + transform: rotate(360deg); + } +} + +.btn-primary { + background: var(--primary); + color: var(--bg); + box-shadow: 0 14px 24px rgba(17, 24, 39, 0.12); +} + +.btn-primary:hover { + background: var(--primary-hover); +} + +.btn-soft { + background: var(--surface-strong); + color: var(--text); + border-color: var(--border); +} + +.btn-soft:hover, +.btn-ghost:hover { + border-color: var(--border-strong); + background: var(--surface-muted); +} + +.btn-ghost { + background: transparent; + color: var(--text-soft); + border-color: transparent; +} + +.btn-danger { + background: rgba(187, 59, 49, 0.1); + border-color: rgba(187, 59, 49, 0.2); + color: var(--danger); +} + +.btn-sm { + padding: 7px 12px; + font-size: 12px; +} + +.user-btn { + background: var(--surface-glass); + border: 1px solid var(--border); + color: var(--text); + padding: 10px 16px; + border-radius: 999px; + font-size: 13px; + font-weight: 600; + display: inline-flex; + align-items: center; + gap: 8px; + cursor: pointer; + transition: transform 0.18s ease, border-color 0.18s ease, background-color 0.18s ease; +} + +.user-btn:hover { + transform: translateY(-1px); + border-color: var(--border-strong); + background: var(--surface-strong); +} + +.hero { + overflow: hidden; + border-radius: var(--radius-lg); + padding: 34px; + margin-bottom: 24px; + background: + linear-gradient(145deg, rgba(255, 252, 247, 0.92), rgba(245, 238, 227, 0.88)); +} + +body.theme-dark .hero { + background: + linear-gradient(145deg, rgba(14, 20, 28, 0.94), rgba(16, 23, 32, 0.9)); +} + +.hero-orb, +.hero-grid { + position: absolute; + inset: auto; + pointer-events: none; +} + +.hero-orb { + width: 340px; + height: 340px; + border-radius: 999px; + filter: blur(18px); + opacity: 0.55; +} + +.hero-orb-a { + top: -90px; + right: -70px; + background: radial-gradient(circle, rgba(29, 78, 216, 0.2), transparent 68%); +} + +.hero-orb-b { + bottom: -120px; + left: -80px; + background: radial-gradient(circle, rgba(15, 118, 110, 0.18), transparent 68%); +} + +.hero-grid { + inset: 0; + background-image: + linear-gradient(rgba(97, 111, 129, 0.08) 1px, transparent 1px), + linear-gradient(90deg, rgba(97, 111, 129, 0.08) 1px, transparent 1px); + background-size: 22px 22px; + mask-image: linear-gradient(180deg, rgba(0, 0, 0, 0.32), transparent 82%); +} + +.hero-stack, +.service-head, +.service-body { + position: relative; + z-index: 1; +} + +.hero-topbar { + display: flex; + justify-content: space-between; + align-items: center; + gap: 16px; + margin-bottom: 28px; +} + +.hero-brand { + display: inline-flex; + align-items: center; + gap: 14px; +} + +.hero-brand-line { + width: 44px; + height: 1px; + background: linear-gradient(90deg, var(--accent), transparent); +} + +.hero-brand-copy { + font-family: 'IBM Plex Mono', monospace; + font-size: 12px; + color: var(--text-soft); +} + +.hero-actions { + display: flex; + align-items: center; + gap: 12px; + flex-wrap: wrap; +} + +.hero-main { + display: block; +} + +.hero-copy { + width: 100%; + max-width: none; +} + +.hero-pill-row, +.hero-tags { + display: flex; + gap: 10px; + flex-wrap: wrap; +} + +.hero-tag { + display: inline-flex; + align-items: center; + gap: 8px; + padding: 7px 14px; + border-radius: 999px; + border: 1px solid var(--border); + background: rgba(255, 255, 255, 0.46); + color: var(--text-soft); + font-size: 12px; + font-weight: 600; +} + +.hero-tag .icon { + flex: 0 0 auto; +} + +body.theme-dark .hero-tag { + background: rgba(255, 255, 255, 0.03); +} + +.access-shell { + padding: 26px 28px; +} + +.access-shell-head { + display: flex; + justify-content: space-between; + gap: 18px; + align-items: flex-start; +} + +.access-shell-copy { + display: flex; + flex-direction: column; + gap: 8px; + min-width: 0; +} + +.access-shell-kicker { + display: inline-flex; + align-items: center; + width: fit-content; + padding: 7px 12px; + border-radius: 999px; + border: 1px solid rgba(29, 78, 216, 0.12); + background: rgba(29, 78, 216, 0.08); + color: var(--primary); + font-size: 11px; + font-weight: 700; + letter-spacing: 0.12em; + text-transform: uppercase; +} + +.access-shell h1 { + margin: 0; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: clamp(1.9rem, 3vw, 2.6rem); + line-height: 1.08; + letter-spacing: -0.045em; +} + +.access-shell p { + margin: 0; + max-width: 74ch; + font-size: 14px; + line-height: 1.75; + color: var(--text-soft); +} + +.access-shell-actions { + display: flex; + flex-wrap: wrap; + gap: 10px; + justify-content: flex-end; +} + +.hero-heading-shell { + display: flex; + flex-direction: column; + gap: 10px; + margin: 18px 0 16px; + width: 100%; + max-width: none; +} + +.hero-heading-kicker { + display: inline-flex; + align-items: center; + width: fit-content; + padding: 7px 12px; + border-radius: 999px; + border: 1px solid rgba(29, 78, 216, 0.12); + background: rgba(29, 78, 216, 0.08); + color: var(--primary); + font-size: 11px; + font-weight: 700; + letter-spacing: 0.12em; + text-transform: uppercase; +} + +.hero-copy h1 { + margin: 0; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: clamp(2.1rem, 4vw, 3.4rem); + line-height: 1.06; + letter-spacing: -0.055em; + max-width: none; +} + +.hero-heading-line { + display: block; +} + +.hero-heading-accent { + display: block; + color: var(--accent); +} + +body.theme-dark .hero-heading-accent { + color: #66d9c2; +} + +.hero-intro, +.hero-copy p { + font-size: 15px; + line-height: 1.75; + color: var(--text-soft); + width: 100%; + max-width: none; +} + +.hero-command-row { + display: flex; + gap: 12px; + flex-wrap: wrap; + margin: 22px 0 24px; + width: 100%; + max-width: none; +} + +.hero-usage-grid { + display: grid; + grid-template-columns: repeat(4, minmax(0, 1fr)); + gap: 14px; +} + +.hero-usage-card, +.stat-box, +.integration-summary-item, +.summary-box, +.credit-pill, +.service-toggle-metric, +.social-metric, +.hero-focus-metric { + background: var(--surface-glass); + border: 1px solid var(--border); + border-radius: 18px; +} + +.hero-usage-card { + padding: 18px; +} + +.hero-usage-card strong { + display: block; + font-size: 15px; + font-weight: 700; + margin-bottom: 8px; +} + +.hero-usage-card span { + font-size: 13px; + color: var(--muted); + line-height: 1.65; +} + +.hero-focus { + display: flex; + flex-direction: column; + gap: 16px; + padding: 26px; + min-height: 100%; + border-radius: 28px; + background: + linear-gradient(180deg, rgba(16, 23, 32, 0.04), rgba(16, 23, 32, 0.01)), + var(--surface-glass); + border: 1px solid var(--border); +} + +body.theme-dark .hero-focus { + background: + linear-gradient(180deg, rgba(248, 246, 241, 0.05), rgba(248, 246, 241, 0.02)), + var(--surface-glass); +} + +.hero-focus-head, +.service-head { + display: flex; + justify-content: space-between; + align-items: flex-start; + gap: 18px; +} + +.hero-focus-name { + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 32px; + letter-spacing: -0.05em; + margin-top: 6px; +} + +.hero-focus-signal { + width: 14px; + height: 14px; + border-radius: 999px; + background: var(--muted); + box-shadow: 0 0 0 0 rgba(111, 122, 136, 0.35); +} + +.hero-focus-signal.is-ok, +.service-toggle-status.is-ok .service-toggle-signal { + background: var(--ok); + box-shadow: 0 0 0 0 rgba(21, 115, 71, 0.35); + animation: beacon-pulse 1.8s infinite; +} + +.hero-focus-signal.is-warn, +.service-toggle-status.is-warn .service-toggle-signal { + background: var(--warn); + box-shadow: 0 0 0 0 rgba(183, 106, 17, 0.35); + animation: beacon-pulse 1.8s infinite; +} + +.hero-focus-signal.is-danger, +.service-toggle-status.is-danger .service-toggle-signal { + background: var(--danger); + box-shadow: 0 0 0 0 rgba(187, 59, 49, 0.35); + animation: beacon-pulse 1.8s infinite; +} + +.hero-focus-signal.is-idle { + background: var(--muted); +} + +@keyframes beacon-pulse { + 0% { box-shadow: 0 0 0 0 rgba(0, 0, 0, 0.16); } + 70% { box-shadow: 0 0 0 10px rgba(0, 0, 0, 0); } + 100% { box-shadow: 0 0 0 0 rgba(0, 0, 0, 0); } +} + +.hero-focus-status-row { + display: flex; + justify-content: space-between; + align-items: center; + gap: 10px; +} + +.hero-focus-status { + display: inline-flex; + align-items: center; + padding: 7px 14px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface-strong); + font-size: 12px; + font-weight: 700; +} + +.hero-focus-status.is-ok { + color: var(--ok); + border-color: rgba(21, 115, 71, 0.22); + background: rgba(21, 115, 71, 0.08); +} + +.hero-focus-status.is-warn { + color: var(--warn); + border-color: rgba(183, 106, 17, 0.22); + background: rgba(183, 106, 17, 0.08); +} + +.hero-focus-status.is-danger { + color: var(--danger); + border-color: rgba(187, 59, 49, 0.22); + background: rgba(187, 59, 49, 0.08); +} + +.hero-focus-stamp { + font-size: 12px; + color: var(--muted); +} + +.hero-focus-desc { + font-size: 14px; + color: var(--text-soft); + line-height: 1.7; +} + +.hero-focus-metrics { + display: grid; + grid-template-columns: repeat(2, minmax(0, 1fr)); + gap: 12px; +} + +.hero-focus-metric { + padding: 16px; +} + +.hero-focus-metric .label, +.summary-box .label, +.service-toggle-metric .label, +.stat-box .label, +.integration-summary-item .label, +.social-metric .label, +.credit-pill .label, +.brief-item span { + display: block; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); + margin-bottom: 8px; +} + +.hero-focus-metric strong, +.service-toggle-metric .value, +.summary-box .value, +.stat-box .value, +.integration-summary-item .value, +.social-metric .value, +.credit-pill .value { + display: block; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 25px; + font-weight: 700; + letter-spacing: -0.05em; +} + +.hero-focus-actions { + display: flex; + gap: 8px; + flex-wrap: wrap; +} + +.hero-lanes { + display: grid; + grid-template-columns: repeat(4, minmax(0, 1fr)); + gap: 14px; + margin-top: 26px; +} + +.hero-lane { + padding: 18px; + border-radius: 20px; + border: 1px solid var(--border); + background: var(--surface-glass); + transition: transform 0.18s ease, border-color 0.18s ease, box-shadow 0.18s ease; +} + +.hero-lane:hover, +.service-toggle:hover, +.summary-box:hover, +.subcard:hover, +.credit-strip:hover { + transform: translateY(-2px); + border-color: var(--border-strong); + box-shadow: var(--shadow); +} + +.hero-lane strong { + display: block; + margin: 8px 0 6px; + font-size: 17px; + font-weight: 700; +} + +.hero-lane p { + font-size: 13px; + color: var(--text-soft); + line-height: 1.6; +} + +.hero-lane[data-service="tavily"] { box-shadow: inset 0 3px 0 0 var(--tavily); } +.hero-lane[data-service="exa"] { box-shadow: inset 0 3px 0 0 var(--exa); } +.hero-lane[data-service="firecrawl"] { box-shadow: inset 0 3px 0 0 var(--firecrawl); } +.hero-lane[data-service="social"] { box-shadow: inset 0 3px 0 0 var(--social); } + +.summary-strip { + display: grid; + grid-template-columns: repeat(6, minmax(0, 1fr)); + gap: 14px; + margin-bottom: 24px; +} + +.summary-box { + display: flex; + flex-direction: column; + gap: 6px; + min-width: 0; + min-height: 138px; + padding: 20px; +} + +.summary-box .hint, +.table-note, +.settings-field .hint, +.service-head p, +.service-sync-meta, +.credit-copy p, +.service-toggle-title span, +.service-toggle-foot, +.subcard .desc, +.stat-box .hint, +.integration-note, +.switcher-head p, +.settings-head p, +.settings-note, +.social-board-desc, +.social-board-foot { + font-size: 13px; + color: var(--text-soft); + line-height: 1.65; +} + +.summary-box .hint { + margin-top: auto; +} + +.summary-box-accent { + background: + linear-gradient(145deg, rgba(15, 118, 110, 0.1), rgba(29, 78, 216, 0.08)), + var(--surface-glass); +} + +.dashboard-flow { + display: flex; + flex-direction: column; + gap: 20px; +} + +.switcher-shell { + display: flex; + flex-direction: column; + gap: 16px; +} + +.services-root { + display: flex; + flex-direction: column; + gap: 20px; +} + +.services-root.is-switching .service-panel:not(.is-inactive) { + animation: workspace-stage-shift 0.28s cubic-bezier(0.22, 1, 0.36, 1); +} + +.switcher-shell, +.credit-strip, +.mysearch-shell, +.service-panel { + padding: 22px; +} + +.switcher-head { + display: flex; + flex-direction: column; + gap: 14px; + margin-bottom: 0; +} + +.switcher-head h2 { + font-size: 30px; +} + +.switcher-note { + display: inline-flex; + align-items: center; + width: fit-content; + padding: 8px 14px; + border-radius: 999px; + background: var(--surface-muted); + border: 1px solid var(--border); + font-size: 12px; + color: var(--text-soft); +} + +.service-switcher { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(220px, 1fr)); + gap: 14px; + align-items: stretch; +} + +.service-toggle { + width: 100%; + text-align: left; + padding: 15px; + border-radius: 22px; + border: 1px solid var(--border); + background: var(--surface-glass); + color: var(--text); + cursor: pointer; + transition: transform 0.18s ease, border-color 0.18s ease, box-shadow 0.18s ease; + display: flex; + flex-direction: column; + gap: 10px; + min-height: 182px; +} + +.service-toggle.is-active { + border-color: var(--border-strong); + background: + linear-gradient(145deg, rgba(255, 255, 255, 0.16), rgba(255, 255, 255, 0)), + var(--surface-strong); +} + +.service-toggle-top { + display: flex; + justify-content: space-between; + gap: 12px; + align-items: flex-start; +} + +.service-chip { + display: inline-flex; + align-items: center; + width: fit-content; + padding: 6px 10px; + border-radius: 999px; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + margin-bottom: 10px; +} + +.service-chip[data-service="tavily"] { background: var(--tavily-soft); color: var(--tavily); } +.service-chip[data-service="exa"] { background: var(--exa-soft); color: var(--exa); } +.service-chip[data-service="firecrawl"] { background: var(--firecrawl-soft); color: var(--firecrawl); } +.service-chip[data-service="social"] { background: var(--social-soft); color: var(--social); } +.service-chip[data-service="mysearch"] { background: var(--mysearch-soft); color: var(--mysearch); } + +.service-toggle-title strong { + display: block; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 20px; + letter-spacing: -0.04em; + margin-bottom: 0; + line-height: 1.14; +} + +.service-toggle-title { + min-width: 0; + flex: 1 1 auto; +} + +.service-toggle-title span { + display: block; + color: var(--muted); + overflow-wrap: anywhere; +} + +.service-toggle-route { + margin-top: 6px; + display: inline-flex; + align-items: center; + width: fit-content; + max-width: 100%; + padding: 6px 10px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface); + font-size: 11px; + line-height: 1.5; + color: var(--text-soft); +} + +.service-toggle-status-wrap { + display: flex; + flex-direction: column; + align-items: flex-end; + gap: 8px; + min-width: 0; + flex: 0 0 auto; +} + +.service-toggle-flag { + display: inline-flex; + align-items: center; + justify-content: center; + padding: 4px 10px; + border-radius: 999px; + border: 1px solid rgba(15, 118, 110, 0.16); + background: rgba(15, 118, 110, 0.08); + color: var(--accent); + font-size: 10px; + font-weight: 700; + letter-spacing: 0.1em; + text-transform: uppercase; +} + +.service-toggle-status { + display: inline-flex; + align-items: center; + justify-content: center; + gap: 8px; + padding: 6px 10px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface-strong); + font-size: 11px; + font-weight: 700; + line-height: 1.4; + white-space: nowrap; + text-align: right; + max-width: 100%; + min-width: 112px; +} + +.service-toggle-status.is-ok { + color: var(--ok); + border-color: rgba(21, 115, 71, 0.18); +} + +.service-toggle-status.is-warn { + color: var(--warn); + border-color: rgba(183, 106, 17, 0.18); +} + +.service-toggle-status.is-danger { + color: var(--danger); + border-color: rgba(187, 59, 49, 0.18); +} + +.service-toggle-signal { + width: 8px; + height: 8px; + border-radius: 999px; + background: currentColor; + flex: 0 0 auto; +} + +.service-toggle-status span:last-child { + white-space: nowrap; +} + +.service-toggle-grid { + display: grid; + grid-template-columns: repeat(2, minmax(0, 1fr)); + gap: 8px; +} + +.service-toggle-metric { + padding: 12px 13px; +} + +.service-toggle-badge { + display: inline-flex; + align-items: center; + padding: 5px 10px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface-strong); + font-size: 11px; + font-weight: 600; + color: var(--text-soft); + overflow-wrap: anywhere; +} + +.service-toggle-meta { + display: flex; + flex-direction: column; + gap: 10px; + padding-top: 14px; + border-top: 1px solid var(--border); +} + +.service-toggle-footnote { + font-size: 12px; + line-height: 1.55; + color: var(--text-soft); + overflow-wrap: anywhere; +} + +@keyframes workspace-stage-shift { + from { + opacity: 0.08; + transform: translateY(8px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.table-tools { + display: flex; + flex-direction: column; + gap: 12px; + margin: 16px 0 12px; +} + +.table-tools input[type="text"] { + min-height: 36px; + padding: 8px 12px; + border-radius: 16px; + font-size: 12px; + line-height: 1.4; + background: var(--surface); +} + +.table-tools > .input-grow { + flex: 0 0 auto; +} + +.table-tools-row > .input-grow { + flex: 1 1 240px; +} + +.table-tools-stack { + gap: 10px; +} + +.table-tools-row { + display: flex; + flex-wrap: wrap; + gap: 10px; +} + +.mini-switch { + display: inline-flex; + flex-wrap: wrap; + gap: 8px; +} + +.mini-switch-btn { + display: inline-flex; + align-items: center; + justify-content: center; + min-height: 34px; + padding: 0 12px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface); + color: var(--text-soft); + font-size: 12px; + font-weight: 700; + cursor: pointer; + transition: border-color 0.18s ease, background-color 0.18s ease, color 0.18s ease; +} + +.mini-switch-btn:hover { + border-color: var(--border-strong); + background: var(--surface-muted); +} + +.mini-switch-btn.is-active { + border-color: rgba(29, 78, 216, 0.2); + background: linear-gradient(135deg, rgba(29, 78, 216, 0.12), rgba(15, 118, 110, 0.12)); + color: var(--text); +} + +.credit-strip { + display: flex; + flex-direction: column; + gap: 18px; + background: + linear-gradient(145deg, rgba(194, 65, 12, 0.06), rgba(29, 78, 216, 0.05)), + var(--surface-glass); +} + +.credit-strip-inline { + margin-top: 4px; +} + +.credit-copy strong { + display: block; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 20px; + letter-spacing: -0.04em; + margin: 8px 0 8px; +} + +.credit-link { + font-size: 12px; + color: var(--exa); + text-decoration: none; +} + +.credit-link:hover { + text-decoration: underline; +} + +.credit-meta { + display: grid; + grid-template-columns: repeat(2, minmax(0, 1fr)); + gap: 10px; +} + +.credit-pill { + padding: 14px; +} + +.credit-pill .value { + font-size: clamp(1rem, 1.45vw, 1.22rem); + line-height: 1.32; + overflow-wrap: anywhere; +} + +.credit-pill-note { + display: block; + margin-top: 6px; + font-size: 11px; + line-height: 1.55; + color: var(--text-soft); + overflow-wrap: anywhere; +} + +.service-panel { + overflow: hidden; + padding: 0; +} + +.service-panel.is-inactive { + display: none; +} + +.service-panel.is-activating { + animation: service-panel-focus-in 0.3s cubic-bezier(0.22, 1, 0.36, 1); +} + +@keyframes service-panel-focus-in { + from { + opacity: 0.18; + transform: translateY(10px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.service-panel[data-service="tavily"] { border-top: 4px solid var(--tavily); } +.service-panel[data-service="exa"] { border-top: 4px solid var(--exa); } +.service-panel[data-service="firecrawl"] { border-top: 4px solid var(--firecrawl); } +.service-panel[data-service="social"] { border-top: 4px solid var(--social); } + +.service-head { + padding: 28px; + border-bottom: 1px solid var(--border); + background: + linear-gradient(180deg, rgba(255, 255, 255, 0.15), transparent), + var(--surface-muted); +} + +.service-head-copy { + display: flex; + flex-direction: column; + gap: 10px; + max-width: 760px; +} + +.service-head h2 { + font-size: 34px; + margin-bottom: 2px; +} + +.service-head-route { + display: inline-flex; + align-items: center; + flex-wrap: wrap; + gap: 10px; + max-width: 100%; + min-width: 0; +} + +.service-head-route-label { + padding: 4px 9px; + border-radius: 999px; + background: var(--surface-strong); + border: 1px solid var(--border); + font-size: 11px; + font-weight: 700; + letter-spacing: 0.1em; + text-transform: uppercase; +} + +.service-head-route .mono { + display: inline-block; + max-width: 100%; + overflow-wrap: anywhere; + word-break: break-word; + white-space: normal; +} + +.service-tools { + min-width: 280px; + display: flex; + flex-direction: column; + gap: 12px; + padding: 18px; + border-radius: 20px; + border: 1px solid var(--border); + background: var(--surface); +} + +.service-body { + padding: 28px; + display: flex; + flex-direction: column; + gap: 24px; +} + +.stats-grid, +.integration-summary, +.social-board-grid, +.social-board-summary, +.settings-fields, +.settings-panel-grid, +.section-grid, +.brief-list, +.detail-panels { + display: grid; +} + +.stats-grid { + grid-template-columns: repeat(3, minmax(0, 1fr)); + gap: 14px; +} + +.stat-box { + padding: 18px; +} + +.stat-box .hint { + margin-top: 10px; +} + +.section-grid { + grid-template-columns: minmax(0, 1.15fr) minmax(280px, 0.85fr); + gap: 16px; +} + +.quickstart-grid { + display: flex; + flex-direction: column; + gap: 16px; +} + +.quickstart-card { + display: flex; + flex-direction: column; + gap: 16px; +} + +.quickstart-card-head { + display: flex; + justify-content: space-between; + gap: 16px; + align-items: flex-start; +} + +.quickstart-card-copy { + display: flex; + flex-direction: column; + gap: 4px; +} + +.quickstart-primary-layout { + display: grid; + grid-template-columns: minmax(280px, 0.9fr) minmax(0, 1.1fr); + gap: 18px; + align-items: start; +} + +.quickstart-visual-col, +.quickstart-config-col { + display: flex; + flex-direction: column; + gap: 16px; + min-width: 0; +} + +.quickstart-visual-head { + padding: 16px 18px; + border-radius: 20px; + border: 1px solid var(--border); + background: + linear-gradient(145deg, rgba(29, 78, 216, 0.06), rgba(15, 118, 110, 0.05)), + var(--surface); +} + +.quickstart-visual-head .label { + display: block; + margin-bottom: 8px; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); +} + +.quickstart-visual-head strong { + display: block; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 20px; + letter-spacing: -0.04em; + line-height: 1.18; +} + +.quickstart-visual-head span { + display: block; + margin-top: 8px; + font-size: 12px; + line-height: 1.6; + color: var(--text-soft); +} + +.subcard { + padding: 24px; + background: var(--surface-glass); + border: 1px solid var(--border); + border-radius: 24px; +} + +.subcard h3, +.social-board-title { + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 22px; + letter-spacing: -0.04em; +} + +.subcard .desc { + margin-top: 8px; +} + +.inline-meta { + display: flex; + flex-wrap: wrap; + gap: 12px; + margin: 16px 0; +} + +.inline-meta span, +.endpoint { + display: inline-flex; + align-items: flex-start; + gap: 6px; + padding: 7px 10px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface); + max-width: 100%; + min-width: 0; + font-size: 13px; + line-height: 1.6; + color: var(--text-soft); + overflow-wrap: anywhere; + word-break: break-word; + white-space: normal; +} + +.code-toolbar { + display: flex; + justify-content: space-between; + align-items: center; + gap: 12px; + margin-bottom: 12px; + flex-wrap: wrap; +} + +.code-block { + overflow-x: auto; + padding: 18px; + border-radius: 18px; + background: #101722; + color: #edf1f5; + font-size: 13px; + line-height: 1.7; + border: 1px solid rgba(255, 255, 255, 0.08); +} + +.service-brief-head { + display: flex; + justify-content: space-between; + gap: 12px; + align-items: center; + margin-bottom: 12px; +} + +.service-brief-note, +.detail-pill, +.settings-meta-pill { + display: inline-flex; + align-items: center; + padding: 6px 10px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface); + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--text-soft); +} + +.brief-list { + grid-template-columns: 1fr; + gap: 10px; +} + +.brief-item { + padding: 14px 16px; + min-width: 0; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface); +} + +.brief-item strong { + display: block; + font-size: 15px; + line-height: 1.6; + overflow-wrap: anywhere; + word-break: break-word; +} + +.detail-panels { + grid-template-columns: repeat(2, minmax(0, 1fr)); + gap: 16px; +} + +.detail-card summary { + list-style: none; + display: flex; + justify-content: space-between; + gap: 18px; + align-items: start; + cursor: pointer; +} + +.detail-card-static-head { + display: flex; + justify-content: space-between; + gap: 18px; + align-items: start; +} + +.detail-card summary::-webkit-details-marker { + display: none; +} + +.detail-card summary p, +.detail-card-static-head p { + margin-top: 6px; + font-size: 13px; + color: var(--text-soft); + line-height: 1.65; +} + +.detail-card .detail-body { + margin-top: 18px; + display: flex; + flex-direction: column; + gap: 16px; +} + +.toggle-area { + display: flex; + flex-direction: column; + gap: 10px; + padding: 16px; + border-radius: 18px; + border: 1px dashed var(--border-strong); + background: var(--surface); +} + +.form-row { + display: flex; + gap: 10px; + align-items: center; + flex-wrap: wrap; +} + +.token-create-row { + display: grid; + grid-template-columns: minmax(0, 1fr) auto; + align-items: center; +} + +.token-create-row .input-grow { + min-width: 0; +} + +.token-create-row .btn { + white-space: nowrap; +} + +.form-row-end { + justify-content: flex-end; +} + +.input-grow { + flex: 1 1 240px; +} + +.table-wrap { + overflow: auto; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface); +} + +table { + width: 100%; + border-collapse: collapse; + text-align: left; +} + +th, +td { + padding: 15px 16px; + border-bottom: 1px solid var(--border); + vertical-align: top; + font-size: 13px; +} + +th { + position: sticky; + top: 0; + z-index: 1; + background: var(--surface-strong); + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); +} + +tr:last-child td { + border-bottom: none; +} + +.table-actions { + display: flex; + gap: 8px; + flex-wrap: wrap; +} + +.table-row-clickable { + cursor: pointer; + transition: background-color 0.18s ease; +} + +.table-row-clickable:hover td { + background: var(--surface-muted); +} + +.table-row-clickable:focus-visible { + outline: 2px solid rgba(29, 78, 216, 0.45); + outline-offset: -2px; +} + +.table-row-clickable.is-danger td { + background: rgba(187, 59, 49, 0.05); +} + +.table-row-clickable.is-warn td { + background: rgba(183, 106, 17, 0.05); +} + +.table-row-clickable.is-busy td { + background: rgba(29, 78, 216, 0.05); +} + +.table-row-clickable.is-off td { + background: rgba(111, 122, 136, 0.05); +} + +.row-meta { + margin-top: 6px; + font-size: 11px; + color: var(--muted); +} + +.tag { + display: inline-flex; + align-items: center; + padding: 5px 10px; + border-radius: 999px; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.06em; + text-transform: uppercase; +} + +.tag-ok { + color: var(--ok); + background: rgba(21, 115, 71, 0.12); +} + +.tag-off { + color: var(--danger); + background: rgba(187, 59, 49, 0.12); +} + +.quota-bar { + width: 100%; + height: 7px; + margin-top: 8px; + border-radius: 999px; + background: rgba(111, 122, 136, 0.14); + overflow: hidden; +} + +.quota-bar-fill { + height: 100%; + background: linear-gradient(90deg, var(--accent), var(--exa)); +} + +.quota-bar-fill.warn { + background: linear-gradient(90deg, var(--warn), #f59e0b); +} + +.quota-bar-fill.danger { + background: linear-gradient(90deg, var(--danger), #ef4444); +} + +.mysearch-shell { + padding: 0; + overflow: hidden; + border-top: 4px solid var(--mysearch); +} + +.integration-summary { + grid-template-columns: repeat(3, minmax(0, 1fr)); + gap: 12px; + margin: 18px 0 16px; +} + +.quickstart-config-col .integration-summary { + grid-template-columns: repeat(2, minmax(0, 1fr)); + margin: 0; +} + +.quickstart-visual-col .integration-summary { + margin: 0; +} + +.integration-summary-item { + padding: 16px; + min-width: 0; +} + +.integration-summary-item-wide { + grid-column: span 2; +} + +.integration-note, +.social-board-foot, +.settings-secret-meta { + padding: 14px 16px; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface); +} + +.integration-summary-item .label { + margin-bottom: 4px; +} + +.integration-summary-item .value { + font-size: clamp(1.08rem, 1.8vw, 1.42rem); + line-height: 1.38; + letter-spacing: -0.03em; + overflow-wrap: anywhere; + word-break: break-word; +} + +.integration-summary-item .value.is-tight { + font-size: clamp(0.98rem, 1.45vw, 1.18rem); + line-height: 1.5; + letter-spacing: -0.01em; +} + +.integration-summary-item .value.mono { + font-size: clamp(1rem, 1.45vw, 1.2rem); + line-height: 1.55; + letter-spacing: -0.01em; + overflow-wrap: anywhere; + word-break: break-word; + white-space: normal; +} + +.integration-summary-detail .integration-summary-item .value { + font-size: clamp(1rem, 1.4vw, 1.14rem); + line-height: 1.58; +} + +.quickstart-route-strip { + display: grid; + grid-template-columns: repeat(2, minmax(0, 1fr)); + gap: 12px; + margin: 18px 0 16px; +} + +.quickstart-route-card { + padding: 16px; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface-glass); +} + +.quickstart-route-card.is-ok { + border-color: rgba(15, 118, 110, 0.18); + background: + linear-gradient(145deg, rgba(15, 118, 110, 0.06), rgba(15, 118, 110, 0.02)), + var(--surface-glass); +} + +.quickstart-route-card.is-warn { + border-color: rgba(183, 106, 17, 0.2); + background: + linear-gradient(145deg, rgba(183, 106, 17, 0.07), rgba(183, 106, 17, 0.02)), + var(--surface-glass); +} + +.quickstart-route-card.is-danger { + border-color: rgba(187, 59, 49, 0.18); + background: + linear-gradient(145deg, rgba(187, 59, 49, 0.07), rgba(187, 59, 49, 0.02)), + var(--surface-glass); +} + +.quickstart-route-card .label { + display: block; + margin-bottom: 8px; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); +} + +.quickstart-route-card strong { + display: block; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 15px; + line-height: 1.45; + color: var(--text); + overflow-wrap: anywhere; +} + +.quickstart-route-card span { + display: block; + margin-top: 6px; + font-size: 12px; + line-height: 1.55; + color: var(--text-soft); + overflow-wrap: anywhere; +} + +.quickstart-install-strip { + margin: 0; + padding: 16px 18px; + border-radius: 20px; + border: 1px solid var(--border); + background: + linear-gradient(145deg, rgba(29, 78, 216, 0.07), rgba(15, 118, 110, 0.05)), + var(--surface); +} + +.quickstart-install-strip .label { + display: block; + margin-bottom: 8px; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); +} + +.quickstart-install-strip strong { + display: block; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 16px; + line-height: 1.45; + letter-spacing: -0.03em; +} + +.quickstart-install-strip span { + display: block; + margin-top: 6px; + font-size: 12px; + line-height: 1.6; + color: var(--text-soft); +} + +.quickstart-install-layout { + display: grid; + grid-template-columns: minmax(280px, 0.9fr) minmax(0, 1.1fr); + gap: 18px; + align-items: start; +} + +.quickstart-command-col { + display: flex; + flex-direction: column; + gap: 12px; + min-width: 0; +} + +.quickstart-command-shell { + display: flex; + flex-direction: column; + gap: 12px; + min-width: 0; + padding: 16px 18px; + border-radius: 20px; + border: 1px solid var(--border); + background: + linear-gradient(145deg, rgba(15, 23, 42, 0.02), rgba(29, 78, 216, 0.03)), + var(--surface); +} + +.quickstart-command-col .code-toolbar { + margin-bottom: 0; +} + +.quickstart-command-col .code-block { + margin: 0; +} + +.quickstart-install-steps { + display: flex; + flex-wrap: wrap; + gap: 8px; + margin-top: 12px; +} + +.quickstart-install-meta { + display: grid; + grid-template-columns: repeat(2, minmax(0, 1fr)); + gap: 10px; + margin-top: 14px; +} + +.quickstart-install-meta-item { + padding: 12px 14px; + border-radius: 16px; + border: 1px solid var(--border); + background: rgba(255, 255, 255, 0.52); +} + +.quickstart-install-meta-item .label { + margin-bottom: 6px; +} + +.quickstart-install-meta-item strong { + display: block; + font-size: 14px; + letter-spacing: 0; +} + +.quickstart-install-actions { + display: flex; + flex-wrap: wrap; + gap: 10px; + margin-top: 14px; +} + +.quickstart-install-step { + display: inline-flex; + align-items: center; + min-height: 32px; + padding: 0 12px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface-glass); + font-size: 12px; + color: var(--text-soft); +} + +.quickstart-install-step.is-active { + border-color: rgba(29, 78, 216, 0.2); + background: rgba(29, 78, 216, 0.08); + color: var(--text); +} + +.quickstart-install-step.is-done { + border-color: rgba(15, 118, 110, 0.2); + background: rgba(15, 118, 110, 0.09); + color: var(--text); +} + +.integration-summary-item .value-meta { + margin-top: 6px; + font-size: 11px; + color: var(--text-soft); + overflow-wrap: anywhere; +} + +.integration-note strong, +.social-board-foot strong { + color: var(--text); +} + +.integration-note { + display: flex; + flex-wrap: wrap; + align-items: flex-start; + justify-content: space-between; + gap: 14px; + margin-bottom: 16px; +} + +.integration-note-copy { + flex: 1 1 320px; + min-width: 0; +} + +.integration-note strong { + display: block; + margin-bottom: 6px; +} + +.integration-note-error { + display: block; + margin-top: 8px; + color: var(--warn); +} + +.integration-note .btn { + flex: 0 0 auto; +} + +a:focus-visible, +button:focus-visible, +input:focus-visible, +textarea:focus-visible, +summary:focus-visible, +.service-toggle:focus-visible, +.settings-tab:focus-visible, +.mode-switch-btn:focus-visible, +.mini-switch-btn:focus-visible { + outline: none; + box-shadow: 0 0 0 4px rgba(29, 78, 216, 0.14); +} + +.social-integration-head { + display: flex; + justify-content: space-between; + gap: 12px; + align-items: center; +} + +.integration-summary-compact { + grid-template-columns: repeat(4, minmax(0, 1fr)); +} + +.integration-summary-detail { + margin: 14px 0 0; +} + +.integration-fold { + margin-bottom: 16px; + padding-top: 12px; + border-top: 1px dashed var(--border-strong); +} + +.integration-fold summary { + cursor: pointer; + font-size: 12px; + font-weight: 700; + color: var(--text-soft); +} + +.integration-note + .code-toolbar { + margin-top: 0; +} + +.integration-note.is-error, +.social-board-foot.is-error, +.settings-status.is-error { + border-color: rgba(187, 59, 49, 0.22); + background: rgba(187, 59, 49, 0.08); +} + +.detail-glance, +.settings-summary-strip, +.detail-drawer-summary { + display: grid; + grid-template-columns: repeat(3, minmax(0, 1fr)); + gap: 12px; +} + +.glance-card, +.settings-summary-card, +.drawer-metric { + padding: 14px 16px; + min-width: 0; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface); +} + +.glance-card .label, +.settings-summary-card .label, +.drawer-metric .label { + display: block; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); + margin-bottom: 6px; +} + +.glance-card .value, +.drawer-metric .value { + display: block; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 18px; + font-weight: 700; + letter-spacing: -0.04em; +} + +.settings-summary-card .value { + display: block; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 16px; + font-weight: 700; + letter-spacing: -0.03em; + line-height: 1.55; + overflow-wrap: anywhere; + word-break: break-word; +} + +.settings-summary-card .value.mono { + font-family: 'IBM Plex Mono', monospace; + font-size: 13px; + line-height: 1.76; + letter-spacing: 0; +} + +.glance-card .hint, +.drawer-metric .hint { + margin-top: 6px; + font-size: 12px; + color: var(--text-soft); + line-height: 1.5; +} + +.settings-summary-card .hint { + margin-top: 6px; + font-size: 12px; + color: var(--text-soft); + line-height: 1.62; + overflow-wrap: anywhere; + word-break: break-word; +} + +.settings-summary-card .hint.mono { + font-family: 'IBM Plex Mono', monospace; + font-size: 12px; + line-height: 1.72; +} + +.detail-caption { + font-size: 12px; + color: var(--text-soft); + padding: 0 2px; +} + +.table-legend { + display: flex; + flex-wrap: wrap; + gap: 8px; + margin-top: -4px; +} + +.legend-chip { + display: inline-flex; + align-items: center; + gap: 8px; + min-height: 30px; + padding: 0 10px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface); + font-size: 11px; + color: var(--text-soft); +} + +.legend-dot { + width: 8px; + height: 8px; + border-radius: 999px; + background: var(--border-strong); +} + +.legend-chip.is-danger .legend-dot { + background: var(--danger); +} + +.legend-chip.is-warn .legend-dot { + background: var(--warn); +} + +.legend-chip.is-busy .legend-dot { + background: var(--info); +} + +.legend-chip.is-off .legend-dot { + background: var(--muted); +} + +.social-section-grid { + grid-template-columns: minmax(0, 1.15fr) minmax(280px, 0.85fr); +} + +.social-board { + display: flex; + flex-direction: column; + gap: 16px; +} + +.social-board-top { + display: flex; + justify-content: space-between; + align-items: flex-start; +} + +.social-board-kicker { + font-size: 11px; + font-weight: 700; + letter-spacing: 0.12em; + text-transform: uppercase; + color: var(--muted); + margin-bottom: 8px; +} + +.social-board-grid { + grid-template-columns: repeat(4, minmax(0, 1fr)); + gap: 10px; +} + +.social-metric { + padding: 14px; +} + +.social-board-summary { + grid-template-columns: repeat(4, minmax(0, 1fr)); + gap: 10px; +} + +.social-board-summary-item { + padding: 14px; + min-width: 0; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface); +} + +.social-board-summary-item .label { + display: block; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); + margin-bottom: 8px; +} + +.social-board-summary-item .value { + font-size: 16px; + font-weight: 700; + line-height: 1.58; + overflow-wrap: anywhere; + word-break: break-word; +} + +.settings-modal-shell { + position: fixed; + inset: 0; + z-index: 100; + display: flex; + align-items: center; + justify-content: center; + padding: 24px; +} + +.settings-backdrop { + position: absolute; + inset: 0; + background: rgba(17, 24, 39, 0.48); + backdrop-filter: blur(12px); + -webkit-backdrop-filter: blur(12px); +} + +.settings-dialog { + position: relative; + z-index: 1; + width: min(1120px, calc(100vw - 32px)); + max-height: calc(100vh - 48px); + overflow: auto; + padding: 28px; + border-radius: 30px; + background: + linear-gradient(160deg, rgba(255, 252, 247, 0.96), rgba(244, 236, 225, 0.92)); +} + +body.theme-dark .settings-dialog { + background: + linear-gradient(160deg, rgba(13, 19, 28, 0.96), rgba(17, 24, 34, 0.92)); +} + +.settings-head { + display: flex; + justify-content: space-between; + align-items: flex-start; + gap: 16px; + margin-bottom: 22px; +} + +.settings-head h2 { + font-size: 34px; + margin: 6px 0 10px; +} + +.settings-head-meta { + display: flex; + gap: 8px; + flex-wrap: wrap; + margin-top: 14px; +} + +.settings-tabs { + display: flex; + gap: 10px; + flex-wrap: wrap; + margin-bottom: 24px; +} + +.settings-tab { + padding: 10px 16px; + border-radius: 999px; + border: 1px solid var(--border); + background: transparent; + color: var(--text-soft); + font-weight: 700; + cursor: pointer; + transition: border-color 0.18s ease, background-color 0.18s ease, color 0.18s ease; +} + +.settings-tab:hover { + background: var(--surface-muted); +} + +.settings-tab.is-active { + background: var(--primary); + color: var(--bg); + border-color: transparent; +} + +.settings-tab-panel { + display: block; +} + +.settings-tab-panel.hidden { + display: none !important; +} + +.settings-panel-grid { + grid-template-columns: 280px minmax(0, 1fr); + gap: 20px; +} + +.settings-panel-aside { + display: flex; + flex-direction: column; + gap: 14px; +} + +.settings-panel-aside h3 { + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 24px; + letter-spacing: -0.04em; +} + +.settings-note-list { + display: flex; + flex-direction: column; + gap: 10px; +} + +.settings-note { + padding: 14px 16px; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface); +} + +.settings-panel-main { + padding: 22px; + border-radius: 26px; + border: 1px solid var(--border); + background: var(--surface-glass); + display: flex; + flex-direction: column; +} + +.settings-form { + display: flex; + flex-direction: column; + gap: 18px; +} + +.settings-fields { + grid-template-columns: repeat(2, minmax(0, 1fr)); + gap: 18px; +} + +.settings-field { + display: flex; + flex-direction: column; + gap: 8px; +} + +.settings-field.full-width { + grid-column: 1 / -1; +} + +.settings-field label { + font-size: 14px; + font-weight: 700; +} + +.settings-actions, +.settings-footer-actions { + display: flex; + gap: 10px; + flex-wrap: wrap; +} + +.settings-footer { + position: sticky; + bottom: -22px; + z-index: 2; + display: flex; + justify-content: space-between; + gap: 16px; + align-items: center; + flex-wrap: wrap; + margin-top: auto; + padding: 16px 0 0; + border-top: 1px solid var(--border); + background: + linear-gradient(180deg, rgba(248, 246, 241, 0), rgba(248, 246, 241, 0.92) 28%, rgba(248, 246, 241, 0.98)); +} + +body.theme-dark .settings-footer { + background: + linear-gradient(180deg, rgba(17, 24, 34, 0), rgba(17, 24, 34, 0.92) 28%, rgba(17, 24, 34, 0.98)); +} + +.settings-footer-copy { + display: flex; + flex-direction: column; + gap: 4px; + flex: 1 1 280px; + min-width: 0; +} + +.settings-footer-copy strong { + font-size: 14px; +} + +.settings-footer-copy span { + font-size: 12px; + color: var(--text-soft); + line-height: 1.55; +} + +.settings-status { + margin-top: 18px; + padding: 14px 16px; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface); + color: var(--text-soft); + font-size: 13px; +} + +.settings-probe { + margin-top: 18px; + padding: 16px 18px; + border-radius: 22px; + border: 1px solid var(--border); + background: var(--surface); + display: flex; + flex-direction: column; + gap: 14px; +} + +.settings-probe.is-error { + border-color: rgba(187, 59, 49, 0.22); + background: rgba(187, 59, 49, 0.05); +} + +.settings-probe-head { + display: flex; + justify-content: space-between; + gap: 12px; + align-items: flex-start; + flex-wrap: wrap; +} + +.settings-probe-eyebrow { + font-size: 11px; + font-weight: 700; + letter-spacing: 0.12em; + text-transform: uppercase; + color: var(--muted); + margin-bottom: 6px; +} + +.settings-probe-head strong { + display: block; + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 22px; + letter-spacing: -0.04em; +} + +.settings-probe-pills { + display: flex; + flex-wrap: wrap; + gap: 8px; +} + +.settings-probe-pill { + display: inline-flex; + align-items: center; + min-height: 30px; + padding: 0 10px; + border-radius: 999px; + border: 1px solid var(--border); + background: var(--surface-strong); + font-size: 11px; + font-weight: 700; + color: var(--text-soft); +} + +.settings-probe-grid { + display: grid; + grid-template-columns: repeat(2, minmax(0, 1fr)); + gap: 12px; +} + +.settings-probe-card { + padding: 14px 16px; + min-width: 0; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface-glass); +} + +.settings-probe-card.is-wide { + grid-column: 1 / -1; +} + +.settings-probe-card .label { + display: block; + margin-bottom: 8px; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); +} + +.settings-probe-card .value { + display: block; + font-size: 14px; + line-height: 1.65; + color: var(--text); + overflow-wrap: anywhere; + word-break: break-word; +} + +.settings-probe-card .value.mono { + font-size: 13px; + line-height: 1.78; + letter-spacing: 0; +} + +.settings-advanced { + margin-top: 20px; + padding-top: 20px; + border-top: 1px dashed var(--border-strong); +} + +.settings-advanced summary { + cursor: pointer; + font-weight: 700; +} + +.detail-drawer-shell, +.app-dialog-shell { + position: fixed; + inset: 0; +} + +.detail-drawer-shell { + z-index: 110; +} + +.app-dialog-shell { + z-index: 130; +} + +.detail-drawer-backdrop, +.app-dialog-backdrop { + position: absolute; + inset: 0; + background: rgba(17, 24, 39, 0.4); + backdrop-filter: blur(10px); + -webkit-backdrop-filter: blur(10px); +} + +.detail-drawer { + position: absolute; + top: 16px; + right: 16px; + bottom: 16px; + width: min(460px, calc(100vw - 28px)); + padding: 22px; + border-radius: 30px; + display: flex; + flex-direction: column; + gap: 16px; + overflow: auto; + background: + linear-gradient(160deg, rgba(255, 252, 247, 0.96), rgba(244, 236, 225, 0.92)); +} + +body.theme-dark .detail-drawer { + background: + linear-gradient(160deg, rgba(13, 19, 28, 0.96), rgba(17, 24, 34, 0.92)); +} + +.detail-drawer-head, +.app-dialog-head { + display: flex; + justify-content: space-between; + gap: 12px; + align-items: flex-start; +} + +.detail-drawer-kicker, +.app-dialog-kicker { + font-size: 11px; + font-weight: 700; + letter-spacing: 0.12em; + text-transform: uppercase; + color: var(--muted); +} + +.detail-drawer-head h3, +.app-dialog-head h3 { + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 28px; + letter-spacing: -0.04em; + margin: 6px 0 8px; +} + +.detail-drawer-head p, +.app-dialog-message { + font-size: 13px; + color: var(--text-soft); + line-height: 1.65; +} + +.detail-drawer-body { + display: flex; + flex-direction: column; + gap: 14px; +} + +.drawer-section { + padding: 16px 18px; + border-radius: 22px; + border: 1px solid var(--border); + background: var(--surface); +} + +.drawer-section h4 { + font-family: 'Space Grotesk', 'Noto Sans SC', sans-serif; + font-size: 18px; + letter-spacing: -0.03em; + margin-bottom: 10px; +} + +.drawer-section-body { + display: flex; + flex-direction: column; + gap: 10px; +} + +.drawer-grid { + display: grid; + gap: 12px; +} + +.drawer-grid-compact { + grid-template-columns: repeat(2, minmax(0, 1fr)); +} + +.drawer-inline-card { + padding: 12px 14px; + border-radius: 16px; + border: 1px solid var(--border); + background: var(--surface-muted); +} + +.drawer-inline-card span { + display: block; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); + margin-bottom: 6px; +} + +.drawer-inline-card strong { + display: block; + font-size: 14px; + line-height: 1.55; +} + +.detail-drawer-actions, +.app-dialog-actions { + display: flex; + flex-direction: column; + gap: 10px; +} + +.drawer-action-group { + padding: 14px 16px; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface); +} + +.drawer-action-group.is-danger { + border-color: rgba(187, 59, 49, 0.2); + background: rgba(187, 59, 49, 0.06); +} + +.drawer-action-kicker { + display: block; + margin-bottom: 10px; + font-size: 11px; + font-weight: 700; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--muted); +} + +.drawer-action-group.is-danger .drawer-action-kicker { + color: var(--danger); +} + +.drawer-action-row { + display: flex; + justify-content: flex-end; + gap: 10px; + flex-wrap: wrap; +} + +.app-dialog { + position: relative; + z-index: 1; + width: min(460px, calc(100vw - 28px)); + margin: 12vh auto 0; + padding: 24px; + border-radius: 28px; + display: flex; + flex-direction: column; + gap: 18px; + background: + linear-gradient(160deg, rgba(255, 252, 247, 0.98), rgba(244, 236, 225, 0.94)); +} + +body.theme-dark .app-dialog { + background: + linear-gradient(160deg, rgba(13, 19, 28, 0.98), rgba(17, 24, 34, 0.94)); +} + +#app-dialog[data-tone="danger"] .app-dialog-kicker { + color: var(--danger); +} + +#app-dialog[data-tone="info"] .app-dialog-kicker { + color: var(--info); +} + +.toast-root { + position: fixed; + right: 20px; + bottom: 20px; + z-index: 999; + display: flex; + flex-direction: column; + gap: 10px; + pointer-events: none; +} + +.toast { + padding: 12px 16px; + border-radius: 18px; + border: 1px solid var(--border); + background: var(--surface-strong); + color: var(--text); + box-shadow: var(--shadow-lg); + font-size: 13px; + font-weight: 700; + pointer-events: auto; + animation: toast-slide-in 0.28s ease forwards; +} + +.toast-success { border-left: 4px solid var(--ok); } +.toast-error { border-left: 4px solid var(--danger); } +.toast-warn { border-left: 4px solid var(--warn); } +.toast-info { border-left: 4px solid var(--info); } + +@keyframes toast-slide-in { + from { opacity: 0; transform: translateY(12px); } + to { opacity: 1; transform: translateY(0); } +} + +.toast-fade-out { + animation: toast-slide-out 0.24s ease forwards; +} + +@keyframes toast-slide-out { + from { opacity: 1; transform: translateY(0); } + to { opacity: 0; transform: translateY(12px); } +} + +.muted { color: var(--muted); } +.ok { color: var(--ok); } +.warn { color: var(--warn); } +.danger { color: var(--danger); } +.info { color: var(--info); } + +@media (max-width: 1280px) { + .container { + width: min(100vw - 24px, 1320px); + } + + .summary-strip { + grid-template-columns: repeat(3, minmax(0, 1fr)); + } + + .hero-usage-grid { + grid-template-columns: repeat(2, minmax(0, 1fr)); + } + + .hero-lanes, + .social-board-grid, + .social-board-summary { + grid-template-columns: repeat(2, minmax(0, 1fr)); + } +} + +@media (max-width: 1024px) { + .hero-main, + .settings-panel-grid, + .section-grid, + .social-section-grid { + grid-template-columns: 1fr; + } + + .quickstart-primary-layout, + .quickstart-install-layout, + .service-switcher { + grid-template-columns: repeat(2, minmax(0, 1fr)); + } + + .service-head, + .hero-topbar, + .settings-head, + .access-shell-head { + flex-direction: column; + } + + .stats-grid, + .integration-summary, + .detail-glance, + .settings-summary-strip, + .detail-drawer-summary, + .detail-panels { + grid-template-columns: repeat(2, minmax(0, 1fr)); + } + + .service-switcher { + overflow: visible; + padding-right: 0; + } +} + +@media (max-width: 720px) { + .container { + width: min(100vw - 18px, 100%); + padding: 18px 0 44px; + } + + .auth { + margin: 6vh auto; + padding: 28px 22px; + } + + .hero, + .switcher-shell, + .credit-strip, + .settings-dialog, + .service-head, + .service-body, + .subcard, + .access-shell { + padding: 18px; + } + + .hero-copy h1 { + max-width: none; + font-size: 2rem; + } + + .hero-usage-grid, + .hero-focus-metrics, + .summary-strip, + .stats-grid, + .auth-meta, + .integration-summary, + .quickstart-route-strip, + .quickstart-install-meta, + .detail-glance, + .settings-summary-strip, + .settings-probe-grid, + .detail-drawer-summary, + .credit-meta, + .social-board-grid, + .social-board-summary, + .settings-fields, + .detail-panels { + grid-template-columns: 1fr; + } + + .service-toggle-grid { + grid-template-columns: 1fr 1fr; + } + + .service-switcher, + .quickstart-primary-layout, + .quickstart-install-layout { + grid-template-columns: 1fr; + } + + .service-toggle { + min-height: auto; + } + + .service-toggle-top, + .service-toggle-status-wrap, + .hero-focus-status-row, + .quickstart-card-head, + .settings-probe-head, + .detail-card summary, + .detail-card-static-head, + .access-shell-head { + flex-direction: column; + align-items: flex-start; + } + + .integration-summary-item-wide { + grid-column: auto; + } + + .code-toolbar, + .form-row, + .settings-actions, + .settings-footer, + .detail-drawer-actions, + .app-dialog-actions { + align-items: stretch; + } + + .token-create-row { + grid-template-columns: 1fr; + } + + .detail-drawer { + top: auto; + left: 14px; + right: 14px; + bottom: 14px; + width: auto; + } + + .quickstart-install-steps, + .table-legend { + flex-direction: column; + } + + .drawer-grid-compact { + grid-template-columns: 1fr; + } + + .toast-root { + left: 14px; + right: 14px; + bottom: 14px; + } + + .toast { + width: 100%; + } +} + +@media (prefers-reduced-motion: reduce) { + html:focus-within { + scroll-behavior: auto; + } + + *, + *::before, + *::after { + animation-duration: 0.01ms !important; + animation-iteration-count: 1 !important; + transition-duration: 0.01ms !important; + scroll-behavior: auto !important; + } +} diff --git a/proxy/static/js/console.js b/proxy/static/js/console.js new file mode 100644 index 0000000..c9c34ce --- /dev/null +++ b/proxy/static/js/console.js @@ -0,0 +1,3966 @@ + +function showToast(message, type = 'info') { + const root = document.getElementById('toast-root'); + if (!root) return; + const toast = document.createElement('div'); + toast.className = `toast toast-${type}`; + toast.setAttribute('role', 'status'); + toast.setAttribute('aria-live', 'polite'); + toast.textContent = message; + root.appendChild(toast); + setTimeout(() => { + toast.classList.add('toast-fade-out'); + setTimeout(() => toast.remove(), 300); + }, 3000); +} +const STORAGE_KEY = 'multi_service_proxy_pwd'; +const LEGACY_STORAGE_KEY = 'tavily_proxy_pwd'; +const ACTIVE_SERVICE_KEY = 'multi_service_proxy_active_service'; +const THEME_KEY = 'mysearch_proxy_console_theme'; +const THEME_CYCLE = ['light', 'dark', 'auto']; +const AUTO_THEME_LIGHT_HOUR_START = 7; +const AUTO_THEME_DARK_HOUR_START = 19; +const API = ''; +const PAGE_KIND = window.PAGE_KIND || 'console'; +const BUTTON_MIN_BUSY_MS = 320; +const SERVICE_META = { + tavily: { + label: 'Tavily', + emailPrefix: 'tavily-', + tokenPrefix: 'tvly-', + keyPlaceholder: 'tvly-xxxxxxxx', + importPlaceholder: '支持粘贴 email,password,tvly-xxx,timestamp 或仅 tvly-xxx,每行一条', + quotaSource: '真实额度来自 Tavily 官方 GET /usage', + routeHint: '代理端点: POST /api/search, POST /api/extract', + syncButton: '同步 Tavily 额度', + syncSupported: true, + panelIntro: '适合新闻、网页线索和基础搜索入口;现在既支持本地 API Key 池,也支持接上游 Tavily Gateway。', + tokenPoolDesc: '给业务侧发放 Tavily 代理 Token,和 Exa / Firecrawl 完全分开创建、限流、统计。', + keyPoolDesc: 'Tavily Key 独立存储,导入时只写入 Tavily 池,不会和 Exa 或 Firecrawl 混用。', + switcherRoute: '/api/search · /api/extract', + switcherBadges: ['网页发现', '官方同步'], + switcherFoot: 'API Key 池 + Gateway 双模式', + spotlightDesc: 'Tavily 继续负责第一层网页发现,这一栏保留现有功能与额度同步逻辑。', + }, + exa: { + label: 'Exa', + emailPrefix: 'exa-', + tokenPrefix: 'exat-', + keyPlaceholder: 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx', + importPlaceholder: '支持粘贴 email,password,uuid,timestamp 或仅 UUID Key,每行一条', + quotaSource: 'Exa 实时额度暂时无法查询,控制台当前只统计代理调用', + routeHint: '代理端点: POST /exa/search', + syncButton: 'Exa 暂不支持同步', + syncSupported: false, + panelIntro: '适合补充网页发现入口,已经独立成 Exa 工作台、Exa Key 池和 Exa Token 池。', + tokenPoolDesc: '给业务侧发放 Exa 代理 Token,和 Tavily / Firecrawl 完全分开创建、限流、统计。', + keyPoolDesc: 'Exa Key 独立存储,支持直接导入 UUID key,不和别的服务共用池子。', + switcherRoute: '/exa/search', + switcherBadges: ['网页发现', '代理统计'], + switcherFoot: '独立搜索池', + spotlightDesc: 'Exa 已经收成单独工作台,现在可以单独导入 Key、签发 Token,并通过 /exa/search 直接代理搜索。', + }, + firecrawl: { + label: 'Firecrawl', + emailPrefix: 'fc-', + tokenPrefix: 'fctk-', + keyPlaceholder: 'fc-xxxxxxxx', + importPlaceholder: '支持粘贴 email,password,fc-xxx,timestamp 或仅 fc-xxx,每行一条', + quotaSource: '真实额度来自 Firecrawl /v2/team/credit-usage', + routeHint: '代理端点: /firecrawl/*,例如 POST /firecrawl/v2/scrape', + syncButton: '同步 Firecrawl 额度', + syncSupported: true, + panelIntro: '适合正文抓取、文档页、PDF 和结构化抽取,继续保持独立 Firecrawl 工作台。', + tokenPoolDesc: '给业务侧发放 Firecrawl 代理 Token,和 Tavily / Exa 完全分开创建、限流、统计。', + keyPoolDesc: 'Firecrawl Key 独立存储,导入时只写入 Firecrawl 池,不会和其他服务混用。', + switcherRoute: '/firecrawl/*', + switcherBadges: ['正文抓取', '官方同步'], + switcherFoot: '抽取与 credits', + spotlightDesc: 'Firecrawl 继续负责正文抓取与页面抽取,额度同步仍按 Firecrawl credits 展示。', + }, +}; + +const WORKSPACE_META = { + ...SERVICE_META, + social: { + label: 'Social / X', + emailPrefix: 'X search', + tokenPrefix: 'shared auth', + routeHint: '代理端点: POST /social/search', + quotaSource: 'grok2api / xAI-compatible social router', + switcherRoute: '/social/search', + switcherBadges: ['X Search', '自动继承'], + switcherFoot: '兼容路由 + 统一输出', + spotlightDesc: 'Social / X 工作台负责舆情路由和 token 池映射,对外统一暴露 /social/search。', + }, +}; + +let PWD = localStorage.getItem(STORAGE_KEY) || localStorage.getItem(LEGACY_STORAGE_KEY) || ''; +let activeService = localStorage.getItem(ACTIVE_SERVICE_KEY) || 'tavily'; +let latestServices = {}; +let latestSocial = {}; +let latestMySearch = {}; +let latestSettings = {}; +let latestStatsMeta = {}; +let activeTheme = localStorage.getItem(THEME_KEY) || 'light'; +let effectiveTheme = 'light'; +let appDialogResolver = null; +let autoThemeIntervalId = 0; +const tableControls = { tokens: {}, keys: {} }; +const overlayFocusMemory = {}; +const OVERLAY_PRIORITY = ['app-dialog', 'detail-drawer', 'settings-modal']; + +function isShellVisible(id) { + const element = document.getElementById(id); + return Boolean(element && !element.classList.contains('hidden')); +} + +function syncOverlayState() { + const overlayOpen = ['settings-modal', 'detail-drawer', 'app-dialog'].some(isShellVisible); + document.body.classList.toggle('modal-open', overlayOpen); +} + +function getFocusableElements(root) { + if (!root) return []; + return Array.from(root.querySelectorAll( + 'button:not([disabled]), [href], input:not([disabled]), select:not([disabled]), textarea:not([disabled]), [tabindex]:not([tabindex="-1"])', + )).filter((item) => !item.classList.contains('hidden') && item.offsetParent !== null); +} + +function rememberOverlayFocus(id) { + if (document.activeElement instanceof HTMLElement) { + overlayFocusMemory[id] = document.activeElement; + } +} + +function restoreOverlayFocus(id) { + const target = overlayFocusMemory[id]; + delete overlayFocusMemory[id]; + if (target && target.isConnected) { + target.focus({ preventScroll: true }); + } +} + +function focusOverlay(id) { + const shell = document.getElementById(id); + if (!shell) return; + requestAnimationFrame(() => { + const candidates = getFocusableElements(shell); + const target = candidates.find((item) => item.hasAttribute('data-overlay-autofocus')) || candidates[0]; + target?.focus({ preventScroll: true }); + }); +} + +function getTopOpenOverlayId() { + return OVERLAY_PRIORITY.find((id) => isShellVisible(id)) || ''; +} + +function trapOverlayFocus(event) { + if (event.key !== 'Tab') return false; + const overlayId = getTopOpenOverlayId(); + if (!overlayId) return false; + const shell = document.getElementById(overlayId); + const focusable = getFocusableElements(shell); + if (!focusable.length) return false; + const first = focusable[0]; + const last = focusable[focusable.length - 1]; + const active = document.activeElement; + if (event.shiftKey) { + if (active === first || !shell.contains(active)) { + event.preventDefault(); + last.focus({ preventScroll: true }); + return true; + } + return false; + } + if (active === last || !shell.contains(active)) { + event.preventDefault(); + first.focus({ preventScroll: true }); + return true; + } + return false; +} + +function handleSegmentedControlKey(event) { + const trigger = event.target.closest('.mini-switch-btn, .mode-switch-btn, .settings-tab'); + if (!trigger) return false; + const isPrev = ['ArrowLeft', 'ArrowUp'].includes(event.key); + const isNext = ['ArrowRight', 'ArrowDown'].includes(event.key); + const isHome = event.key === 'Home'; + const isEnd = event.key === 'End'; + if (!isPrev && !isNext && !isHome && !isEnd) return false; + + const container = trigger.closest('.mini-switch, .mode-switch, .settings-tabs'); + if (!container) return false; + const selector = trigger.classList.contains('settings-tab') + ? '.settings-tab' + : trigger.classList.contains('mode-switch-btn') + ? '.mode-switch-btn' + : '.mini-switch-btn'; + const buttons = Array.from(container.querySelectorAll(selector)).filter((item) => !item.disabled); + if (!buttons.length) return false; + + const currentIndex = Math.max(0, buttons.indexOf(trigger)); + let targetIndex = currentIndex; + if (isHome) { + targetIndex = 0; + } else if (isEnd) { + targetIndex = buttons.length - 1; + } else if (isPrev) { + targetIndex = (currentIndex - 1 + buttons.length) % buttons.length; + } else if (isNext) { + targetIndex = (currentIndex + 1) % buttons.length; + } + + const next = buttons[targetIndex]; + if (!next) return false; + event.preventDefault(); + next.focus({ preventScroll: true }); + if (next !== trigger) { + next.click(); + } + return true; +} + +function getServiceDisplayLabel(service) { + if (service === 'mysearch') return 'MySearch'; + return WORKSPACE_META[service]?.label || SERVICE_META[service]?.label || service; +} + +function getServicePayload(service) { + if (service === 'mysearch') return latestMySearch || {}; + if (service === 'social') return latestSocial || {}; + return latestServices[service] || {}; +} + +function getTokenTableState(service) { + if (!tableControls.tokens[service]) { + tableControls.tokens[service] = { search: '', sort: 'risk' }; + } + return tableControls.tokens[service]; +} + +function getKeyTableState(service) { + if (!tableControls.keys[service]) { + tableControls.keys[service] = { search: '', filter: 'all', sort: 'risk' }; + } + return tableControls.keys[service]; +} + +function parseTimeValue(value) { + if (!value) return 0; + const stamp = Date.parse(value); + return Number.isFinite(stamp) ? stamp : 0; +} + +function getTokenActivity(token) { + const stats = token?.stats || {}; + return Number(stats.hour_count || 0) * 1000000 + + (Number(stats.today_success || 0) + Number(stats.today_failed || 0)) * 1000 + + Number(stats.month_success || 0) + + Number(stats.month_failed || 0); +} + +function getKeyRemaining(key) { + if (key.usage_key_remaining !== null && key.usage_key_remaining !== undefined) { + return Number(key.usage_key_remaining || 0); + } + if (key.usage_account_remaining !== null && key.usage_account_remaining !== undefined) { + return Number(key.usage_account_remaining || 0); + } + return Number.POSITIVE_INFINITY; +} + +function getTokenRiskScore(token) { + const stats = token?.stats || {}; + const failed = Number(stats.today_failed || 0); + const success = Number(stats.today_success || 0); + const today = failed + success; + const hour = Number(stats.hour_count || 0); + let score = failed * 1000; + if (failed > 0 && failed >= Math.max(success, 1)) { + score += 300000; + } else if (failed > 0) { + score += 120000; + } + if (today >= 120 || hour >= 24) { + score += 40000; + } + score += hour * 100 + today; + return score; +} + +function getKeyRiskScore(service, key) { + let score = 0; + const remaining = getKeyRemaining(key); + const failed = Number(key.total_failed || 0); + const used = Number(key.total_used || 0); + if (String(key.usage_sync_error || '').trim()) { + score += 500000; + } + if (Number(key.active) !== 1) { + score += 300000; + } + if (Number.isFinite(remaining)) { + score += Math.max(0, 200000 - Math.min(200000, remaining)); + } + if (failed > used && failed > 0) { + score += 80000; + } + if (service === 'exa' && used >= 24) { + score += 30000; + } + score += failed * 1000; + score += Math.max(0, 200 - Math.min(200, used)); + return score; +} + +function hasKeyIssue(service, key) { + return Boolean(getKeyRowClass(service, key)); +} + +function getTokenRowClass(token) { + const stats = token?.stats || {}; + const failed = Number(stats.today_failed || 0); + const success = Number(stats.today_success || 0); + const today = failed + success; + const hour = Number(stats.hour_count || 0); + if (failed > 0 && failed >= Math.max(success, 1)) { + return 'is-danger'; + } + if (today >= 120 || hour >= 24) { + return 'is-busy'; + } + if (failed > 0) { + return 'is-warn'; + } + return ''; +} + +function getKeyRowClass(service, key) { + if (String(key.usage_sync_error || '').trim()) { + return 'is-danger'; + } + if (Number(key.active) !== 1) { + return 'is-off'; + } + const remaining = getKeyRemaining(key); + if (Number.isFinite(remaining) && remaining <= 100) { + return 'is-warn'; + } + if (Number(key.total_failed || 0) > Number(key.total_used || 0) && Number(key.total_failed || 0) > 0) { + return 'is-warn'; + } + if (service === 'exa' && Number(key.total_used || 0) >= 24) { + return 'is-busy'; + } + return ''; +} + +function getFilteredTokens(service, tokens) { + const state = getTokenTableState(service); + const keyword = (state.search || '').trim().toLowerCase(); + let items = [...(tokens || [])]; + if (keyword) { + items = items.filter((token) => { + const haystack = [ + token.id, + token.name, + token.token, + token.created_at, + ].map((value) => String(value || '').toLowerCase()).join(' '); + return haystack.includes(keyword); + }); + } + + items.sort((left, right) => { + if (state.sort === 'risk') { + const riskDelta = getTokenRiskScore(right) - getTokenRiskScore(left); + if (riskDelta !== 0) return riskDelta; + const todayDelta = (Number(right?.stats?.today_failed || 0) + Number(right?.stats?.today_success || 0)) + - (Number(left?.stats?.today_failed || 0) + Number(left?.stats?.today_success || 0)); + if (todayDelta !== 0) return todayDelta; + return parseTimeValue(right.created_at) - parseTimeValue(left.created_at); + } + if (state.sort === 'name') { + return String(left.name || left.token || '').localeCompare(String(right.name || right.token || ''), 'zh-CN'); + } + if (state.sort === 'today') { + const leftToday = Number(left?.stats?.today_success || 0) + Number(left?.stats?.today_failed || 0); + const rightToday = Number(right?.stats?.today_success || 0) + Number(right?.stats?.today_failed || 0); + if (rightToday !== leftToday) return rightToday - leftToday; + return parseTimeValue(right.created_at) - parseTimeValue(left.created_at); + } + const delta = getTokenActivity(right) - getTokenActivity(left); + if (delta !== 0) return delta; + return parseTimeValue(right.created_at) - parseTimeValue(left.created_at); + }); + return items; +} + +function getFilteredKeys(service, keys) { + const state = getKeyTableState(service); + const keyword = (state.search || '').trim().toLowerCase(); + let items = [...(keys || [])]; + + if (keyword) { + items = items.filter((key) => { + const haystack = [ + key.id, + key.key, + key.key_masked, + key.email, + key.last_used_at, + ].map((value) => String(value || '').toLowerCase()).join(' '); + return haystack.includes(keyword); + }); + } + + if (state.filter === 'active') { + items = items.filter((key) => Number(key.active) === 1); + } else if (state.filter === 'disabled') { + items = items.filter((key) => Number(key.active) !== 1); + } else if (state.filter === 'error') { + items = items.filter((key) => Boolean((key.usage_sync_error || '').trim())); + } else if (state.filter === 'issue') { + items = items.filter((key) => hasKeyIssue(service, key)); + } + + items.sort((left, right) => { + if (state.sort === 'risk') { + const riskDelta = getKeyRiskScore(service, right) - getKeyRiskScore(service, left); + if (riskDelta !== 0) return riskDelta; + return parseTimeValue(right.last_used_at) - parseTimeValue(left.last_used_at); + } + if (state.sort === 'usage') { + const usageDelta = Number(right.total_used || 0) - Number(left.total_used || 0); + if (usageDelta !== 0) return usageDelta; + return parseTimeValue(right.last_used_at) - parseTimeValue(left.last_used_at); + } + if (state.sort === 'quota') { + const quotaDelta = getKeyRemaining(left) - getKeyRemaining(right); + if (quotaDelta !== 0) return quotaDelta; + return parseTimeValue(right.last_used_at) - parseTimeValue(left.last_used_at); + } + return parseTimeValue(right.last_used_at) - parseTimeValue(left.last_used_at); + }); + return items; +} + +function handleTableRowKey(event, kind, service, id) { + if (!['Enter', ' '].includes(event.key)) return; + event.preventDefault(); + if (kind === 'token') { + openTokenDetail(service, id); + return; + } + openKeyDetail(service, id); +} + +function closeAppDialog(result = false) { + const shell = document.getElementById('app-dialog'); + if (!shell) return; + shell.classList.add('hidden'); + syncOverlayState(); + restoreOverlayFocus('app-dialog'); + if (appDialogResolver) { + const resolve = appDialogResolver; + appDialogResolver = null; + resolve(result); + } +} + +function showConfirmDialog({ + title = '请确认操作', + message = '确认后会继续执行当前操作。', + confirmText = '确认', + cancelText = '取消', + tone = 'info', + kicker = 'Action Required', +} = {}) { + const shell = document.getElementById('app-dialog'); + if (!shell) return Promise.resolve(false); + rememberOverlayFocus('app-dialog'); + document.getElementById('app-dialog-kicker').textContent = kicker; + document.getElementById('app-dialog-title').textContent = title; + document.getElementById('app-dialog-message').textContent = message; + shell.dataset.tone = tone; + document.getElementById('app-dialog-actions').innerHTML = ` + ${cancelText ? `` : ''} + + `; + shell.classList.remove('hidden'); + syncOverlayState(); + focusOverlay('app-dialog'); + return new Promise((resolve) => { + appDialogResolver = resolve; + }); +} + +function showAlertDialog({ + title = '提示', + message = '请查看当前状态。', + confirmText = '知道了', + tone = 'info', + kicker = 'Notice', +} = {}) { + return showConfirmDialog({ + title, + message, + confirmText, + cancelText: '', + tone, + kicker, + }); +} + +function openDetailDrawer({ + kicker = 'Detail', + title = '查看详情', + subtitle = '', + tone = 'info', + summaryHtml = '', + bodyHtml = '', + actionsHtml = '', +} = {}) { + const shell = document.getElementById('detail-drawer'); + if (!shell) return; + rememberOverlayFocus('detail-drawer'); + shell.dataset.tone = tone; + document.getElementById('detail-drawer-kicker').textContent = kicker; + document.getElementById('detail-drawer-title').textContent = title; + document.getElementById('detail-drawer-subtitle').textContent = subtitle; + document.getElementById('detail-drawer-summary').innerHTML = summaryHtml; + document.getElementById('detail-drawer-body').innerHTML = bodyHtml; + document.getElementById('detail-drawer-actions').innerHTML = actionsHtml; + shell.classList.remove('hidden'); + syncOverlayState(); + focusOverlay('detail-drawer'); +} + +function closeDetailDrawer() { + const shell = document.getElementById('detail-drawer'); + if (!shell) return; + shell.classList.add('hidden'); + document.getElementById('detail-drawer-summary').innerHTML = ''; + document.getElementById('detail-drawer-body').innerHTML = ''; + document.getElementById('detail-drawer-actions').innerHTML = ''; + syncOverlayState(); + restoreOverlayFocus('detail-drawer'); +} + +function summaryCard(label, value, hint = '', options = {}) { + const valueClass = options.valueClass ? ` ${options.valueClass}` : ''; + const hintClass = options.hintClass ? ` ${options.hintClass}` : ''; + return ` +
+
${escapeHtml(label)}
+
${escapeHtml(value)}
+
${escapeHtml(hint)}
+
+ `; +} + +function drawerMetric(label, value, hint = '') { + return ` +
+
${escapeHtml(label)}
+
${escapeHtml(value)}
+
${escapeHtml(hint)}
+
+ `; +} + +function drawerSection(title, body) { + return ` +
+

${escapeHtml(title)}

+
${body}
+
+ `; +} + +function renderSettingsSummaries(settings = latestSettings) { + const tavily = settings?.tavily || {}; + const social = settings?.social || {}; + const consoleSummary = document.getElementById('settings-console-summary'); + if (consoleSummary) { + consoleSummary.innerHTML = [ + summaryCard('当前主题', getThemePreferenceLabel(), getThemeSummaryHint()), + summaryCard('当前工作台', getServiceDisplayLabel(activeService), '保存设置后会回到这个工作台'), + summaryCard('会话身份', 'admin', '当前控制面使用单管理员入口'), + ].join(''); + } + + const tavilySummary = document.getElementById('settings-tavily-summary'); + if (tavilySummary) { + tavilySummary.innerHTML = [ + summaryCard('配置模式', tavilyModeLabel(tavily.mode || 'auto'), tavilyModeSourceLabel(tavily.mode_source || 'auto_pending')), + summaryCard('当前实际', tavilyModeLabel(tavily.effective_mode || tavily.mode || 'auto'), tavily.effective_mode === 'upstream' ? '当前请求直接转发到上游' : '当前请求从 API Key 池轮询'), + summaryCard( + '上游地址', + tavily.upstream_base_url || '未配置', + tavily.upstream_search_path || '/search', + { valueClass: 'mono is-address', hintClass: 'mono is-address-hint' }, + ), + summaryCard('凭证状态', tavily.upstream_api_key_configured ? '已配置' : '未配置', tavily.upstream_api_key_masked || `本地活跃 Key ${fmtNum(tavily.local_key_count || 0)}`), + ].join(''); + } + + const socialSummary = document.getElementById('settings-social-summary'); + if (socialSummary) { + socialSummary.innerHTML = [ + summaryCard('工作模式', socialModeLabel(social.mode || 'manual'), social.admin_connected ? '后台已连通' : '可手动覆写上游'), + summaryCard('Token 来源', socialTokenSourceLabel(social.token_source || ''), social.gateway_token_configured ? '客户端 token 已配置' : '可直接复用统一 token'), + summaryCard('默认模型', social.model || 'grok-4.1-fast', social.fallback_model ? `Fallback ${social.fallback_model}` : '未配置 fallback'), + ].join(''); + } +} + +function getLocalThemeClock() { + const now = new Date(); + let timeZone = ''; + try { + timeZone = Intl.DateTimeFormat().resolvedOptions().timeZone || ''; + } catch (error) { + timeZone = ''; + } + return { + hour: Number.isFinite(now.getHours()) ? now.getHours() : 12, + timeZone: timeZone || 'browser-local', + }; +} + +function resolveEffectiveTheme(theme = activeTheme) { + if (theme === 'dark') return 'dark'; + if (theme === 'auto') { + const { hour } = getLocalThemeClock(); + return hour >= AUTO_THEME_LIGHT_HOUR_START && hour < AUTO_THEME_DARK_HOUR_START ? 'light' : 'dark'; + } + return 'light'; +} + +function getThemePreferenceLabel(theme = activeTheme) { + if (theme === 'auto') return '自动模式'; + return theme === 'dark' ? '夜间模式' : '浅色模式'; +} + +function getThemeEffectiveLabel(theme = activeTheme) { + return resolveEffectiveTheme(theme) === 'dark' ? '夜间模式' : '浅色模式'; +} + +function getThemeSummaryHint(theme = activeTheme) { + if (theme === 'auto') { + const { hour, timeZone } = getLocalThemeClock(); + return `按浏览器本地时间自动切换 · ${timeZone} ${String(hour).padStart(2, '0')}:00 当前${getThemeEffectiveLabel(theme)}`; + } + return '控制台会记住你的偏好'; +} + +function getNextTheme(theme = activeTheme) { + const index = THEME_CYCLE.indexOf(theme); + if (index < 0) return THEME_CYCLE[0]; + return THEME_CYCLE[(index + 1) % THEME_CYCLE.length]; +} + +function syncAutoThemeWatcher() { + if (autoThemeIntervalId) { + window.clearInterval(autoThemeIntervalId); + autoThemeIntervalId = 0; + } + if (activeTheme === 'auto') { + autoThemeIntervalId = window.setInterval(() => { + refreshAutoThemeFromClock(); + }, 60_000); + } +} + +function applyTheme(theme, options = {}) { + const normalized = THEME_CYCLE.includes(theme) ? theme : 'light'; + const persist = options.persist !== false; + activeTheme = normalized; + effectiveTheme = resolveEffectiveTheme(activeTheme); + document.body.classList.toggle('theme-dark', effectiveTheme === 'dark'); + document.body.dataset.themePreference = activeTheme; + document.body.dataset.themeEffective = effectiveTheme; + if (persist) { + localStorage.setItem(THEME_KEY, activeTheme); + } + syncAutoThemeWatcher(); + syncThemeToggle(); + renderSettingsSummaries(); +} + +function refreshAutoThemeFromClock(force = false) { + if (activeTheme !== 'auto') return; + const nextEffective = resolveEffectiveTheme('auto'); + if (!force && nextEffective === effectiveTheme) return; + effectiveTheme = nextEffective; + document.body.classList.toggle('theme-dark', effectiveTheme === 'dark'); + document.body.dataset.themePreference = activeTheme; + document.body.dataset.themeEffective = effectiveTheme; + syncThemeToggle(); + renderSettingsSummaries(); +} + +function syncThemeToggle() { + const label = document.getElementById('theme-toggle-label'); + const button = document.getElementById('theme-toggle'); + if (label) { + label.textContent = getThemePreferenceLabel(); + } + if (button) { + button.dataset.theme = activeTheme; + button.dataset.effectiveTheme = effectiveTheme; + button.title = activeTheme === 'auto' + ? getThemeSummaryHint() + : `点击切换到${getThemePreferenceLabel(getNextTheme())}`; + } +} + +function toggleTheme() { + applyTheme(getNextTheme()); +} + +function scrollToCurrentPanel() { + if (PAGE_KIND === 'mysearch') { + window.location.href = '/'; + return; + } + const panel = document.querySelector(`.service-panel[data-service="${activeService}"]`); + if (panel) { + panel.scrollIntoView({ behavior: 'smooth', block: 'start' }); + } +} + +function scrollToQuickstart() { + if (PAGE_KIND !== 'mysearch') { + window.location.href = '/mysearch'; + return; + } + const shell = document.getElementById('mysearch-quickstart'); + if (shell) { + shell.scrollIntoView({ behavior: 'smooth', block: 'start' }); + } +} + +function openMySearchAccess() { + scrollToQuickstart(); +} + +function clearStoredPasswords() { + localStorage.removeItem(STORAGE_KEY); + localStorage.removeItem(LEGACY_STORAGE_KEY); +} + +function setLoginBusy(isBusy) { + const input = document.getElementById('pwd-input'); + const button = document.getElementById('login-submit'); + if (input) input.disabled = isBusy; + if (button) { + button.disabled = isBusy; + button.classList.toggle('is-busy', isBusy); + if (isBusy) { + button.classList.remove('is-success', 'is-error'); + } + button.textContent = isBusy ? '登录中...' : '进入控制台'; + } +} + +function showDashboard(options = {}) { + const animate = Boolean(options.animate); + document.getElementById('login-err').classList.add('hidden'); + const loginBox = document.getElementById('login-box'); + const dashboard = document.getElementById('dashboard'); + loginBox.classList.add('hidden'); + dashboard.classList.remove('hidden'); + dashboard.classList.remove('is-entering'); + if (animate) { + dashboard.classList.add('is-entering'); + setTimeout(() => { + dashboard.classList.remove('is-entering'); + }, 760); + } + renderSettingsSummaries(); +} + +function showLogin() { + const dashboard = document.getElementById('dashboard'); + const loginBox = document.getElementById('login-box'); + dashboard.classList.remove('is-entering'); + dashboard.classList.add('hidden'); + loginBox.classList.remove('hidden'); + closeSettingsModal(); + closeDetailDrawer(); + closeAppDialog(false); + setLoginBusy(false); +} + +async function fetchSession(method, path, body) { + const options = { + method, + credentials: 'same-origin', + headers: { + 'Content-Type': 'application/json', + }, + }; + if (body !== undefined) { + options.body = JSON.stringify(body); + } + const response = await fetch(API + path, options); + const text = await response.text(); + let payload = {}; + try { + payload = text ? JSON.parse(text) : {}; + } catch { + payload = text ? { detail: text } : {}; + } + if (!response.ok) { + throw new Error(payload.detail || `HTTP ${response.status}`); + } + return payload; +} + +async function loginWithPassword(password) { + await fetchSession('POST', '/api/session/login', { password }); +} + +async function hasServerSession() { + try { + await fetchSession('GET', '/api/session'); + return true; + } catch { + return false; + } +} + +async function migrateStoredPasswordIfNeeded() { + if (!PWD) return false; + try { + await loginWithPassword(PWD); + PWD = ''; + clearStoredPasswords(); + return true; + } catch { + PWD = ''; + clearStoredPasswords(); + return false; + } +} + +function socialModeLabel(mode) { + if (mode === 'admin-auto') return '后台自动继承'; + if (mode === 'hybrid') return '后台继承 + 手动覆写'; + return '手动模式'; +} + +function tavilyModeLabel(mode) { + if (mode === 'upstream') return '上游 Gateway'; + if (mode === 'pool') return 'API Key 池'; + return '自动识别'; +} + +function tavilyModeSourceLabel(source) { + if (source === 'manual_upstream') return '手动固定上游'; + if (source === 'manual_pool') return '手动固定本地池'; + if (source === 'auto_upstream') return '自动识别到上游凭证'; + if (source === 'auto_pool') return '自动识别到本地可用 Key'; + return '等待识别'; +} + +function socialTokenSourceLabel(source) { + if (source === 'grok2api app.api_key') return '后台自动继承'; + if (source === 'SOCIAL_GATEWAY_UPSTREAM_API_KEY') return '手动上游 API key'; + if (source === 'manual SOCIAL_GATEWAY_TOKEN') return '手动客户端 token'; + return '尚未配置'; +} + +function socialStatusLabel(social) { + if (social?.admin_connected) return '后台已接通'; + if (social?.upstream_key_configured) return '已可转发搜索'; + return '等待配置'; +} + +function getTavilyRuntimeState(payload) { + const routing = payload?.routing || {}; + const settings = latestSettings?.tavily || {}; + return { + configuredMode: routing.mode || settings.mode || 'auto', + effectiveMode: routing.effective_mode || settings.effective_mode || settings.mode || 'auto', + modeSource: routing.mode_source || settings.mode_source || 'auto_pending', + upstreamConfigured: Boolean( + (routing.upstream_api_key_configured ?? settings.upstream_api_key_configured) || false, + ), + localKeyCount: Number( + routing.local_key_count + ?? settings.local_key_count + ?? payload?.keys_active + ?? 0, + ), + }; +} + +function getTavilyUpstreamSummary(payload) { + const summary = payload?.upstream_summary || {}; + const activeKeys = Number(summary.active_keys || 0); + const exhaustedKeys = Number(summary.exhausted_keys || 0); + const quarantinedKeys = Number(summary.quarantined_keys || 0); + const totalKeys = Number(summary.total_keys || (activeKeys + exhaustedKeys + quarantinedKeys)); + return { + available: Boolean(summary.available), + detail: summary.detail || '', + requestTarget: summary.request_target || '', + activeKeys, + exhaustedKeys, + quarantinedKeys, + totalKeys, + totalRequests: Number(summary.total_requests || 0), + successCount: Number(summary.success_count || 0), + errorCount: Number(summary.error_count || 0), + quotaExhaustedCount: Number(summary.quota_exhausted_count || 0), + totalQuotaLimit: Number(summary.total_quota_limit || 0), + totalQuotaRemaining: Number(summary.total_quota_remaining || 0), + lastActivity: summary.last_activity || null, + }; +} + +function getSocialUpstreamState(social) { + const visibility = social?.upstream_visibility || {}; + const upstreamApiKeyCount = Number( + visibility.upstream_api_key_count + ?? social?.upstream_api_key_count + ?? 0, + ); + const acceptedTokenCount = Number( + visibility.accepted_token_count + ?? social?.accepted_token_count + ?? 0, + ); + const level = visibility.level + || (social?.admin_connected + ? 'full' + : ((upstreamApiKeyCount > 0 || acceptedTokenCount > 0) + ? 'basic' + : 'none')); + return { + level, + detail: visibility.detail || '', + canProxySearch: Boolean( + visibility.can_proxy_search + ?? (social?.upstream_key_configured && social?.client_auth_configured), + ), + upstreamApiKeyCount, + acceptedTokenCount, + adminConnected: Boolean(visibility.admin_connected ?? social?.admin_connected), + tokenSource: visibility.token_source || social?.token_source || 'not_configured', + }; +} + +function isSocialUpstreamManaged(social) { + const state = getSocialUpstreamState(social || {}); + return Boolean( + state.adminConnected + || state.canProxySearch + || social?.upstream_key_configured + || (String(social?.upstream_base_url || '').trim() && state.upstreamApiKeyCount > 0), + ); +} + +function getBlankServicePayload() { + return { + tokens: [], + keys: [], + overview: {}, + real_quota: {}, + usage_sync: {}, + keys_total: 0, + keys_active: 0, + }; +} + +function normalizeRefreshScope(scope) { + if (!scope) { + return { + core: true, + mysearch: true, + social: true, + services: Object.keys(SERVICE_META), + }; + } + const services = new Set(Array.isArray(scope.services) ? scope.services : []); + if (scope.service) { + services.add(scope.service); + } + return { + core: scope.core !== false, + mysearch: Boolean(scope.mysearch), + social: Boolean(scope.social), + services: [...services].filter((service) => SERVICE_META[service]), + }; +} + +function getRefreshScopeForService(service, options = {}) { + const scope = { + core: options.core !== false, + mysearch: options.mysearch !== false, + social: false, + services: [], + }; + if (service === 'mysearch') { + scope.mysearch = true; + return scope; + } + if (service === 'social') { + scope.social = true; + return scope; + } + if (SERVICE_META[service]) { + scope.services.push(service); + } + return scope; +} + +function getQuickstartProviderCards(services = latestServices, social = latestSocial) { + const tavilyPayload = services?.tavily || {}; + const tavilyState = getTavilyRuntimeState(tavilyPayload); + const tavilyUpstream = getTavilyUpstreamSummary(tavilyPayload); + const tavilyKeysActive = Number(tavilyPayload.keys_active || 0); + const tavilyKeysTotal = Number(tavilyPayload.keys_total || 0); + const socialState = getSocialUpstreamState(social || {}); + const cards = []; + + if (tavilyState.effectiveMode === 'upstream') { + cards.push({ + label: 'Tavily', + tone: tavilyState.upstreamConfigured ? 'ok' : 'danger', + title: tavilyState.upstreamConfigured ? '上游 Gateway' : '待配置上游', + desc: tavilyState.upstreamConfigured + ? (tavilyUpstream.available + ? `上游活跃 ${fmtNum(tavilyUpstream.activeKeys)} / 总 ${fmtNum(tavilyUpstream.totalKeys)} · 剩余 ${fmtNum(tavilyUpstream.totalQuotaRemaining)}` + : `${tavilyModeSourceLabel(tavilyState.modeSource)} · 当前直接转发 /api/search,也可回退本地池`) + : '当前已切上游模式,但还没有可用的上游凭证', + }); + } else if (tavilyKeysActive > 0) { + cards.push({ + label: 'Tavily', + tone: 'ok', + title: `API Key 池 · ${fmtNum(tavilyKeysActive)} Key`, + desc: tavilyKeysTotal > tavilyKeysActive + ? `活跃 ${fmtNum(tavilyKeysActive)} / 总数 ${fmtNum(tavilyKeysTotal)}` + : `${tavilyModeSourceLabel(tavilyState.modeSource)} · 默认从本地池轮询,也可切上游 Gateway`, + }); + } else if (tavilyKeysTotal > 0) { + cards.push({ + label: 'Tavily', + tone: 'warn', + title: 'Key 全部停用', + desc: `已导入 ${fmtNum(tavilyKeysTotal)} 个 Key,但当前没有活跃 Key;也可以直接改走上游 Gateway`, + }); + } else { + cards.push({ + label: 'Tavily', + tone: 'danger', + title: '待配置上游 / 待导入 Key', + desc: 'Tavily 现在既可配置上游 Gateway,也可直接导入 API Key;auto 会优先识别上游', + }); + } + + ['exa', 'firecrawl'].forEach((service) => { + const payload = services?.[service] || {}; + const active = Number(payload.keys_active || 0); + const total = Number(payload.keys_total || 0); + const remaining = service === 'exa' + ? null + : Number(payload.real_quota?.total_remaining ?? Number.NaN); + if (total <= 0) { + cards.push({ + label: getServiceDisplayLabel(service), + tone: 'danger', + title: '待导入 Key', + desc: service === 'exa' + ? '导入后即可启用独立网页发现路由' + : '导入后即可启用正文抓取与抽取链路', + }); + return; + } + if (active <= 0) { + cards.push({ + label: getServiceDisplayLabel(service), + tone: 'warn', + title: 'Key 全部停用', + desc: `已导入 ${fmtNum(total)} 个 Key,但当前没有活跃 Key`, + }); + return; + } + if (Number.isFinite(remaining) && remaining <= 100) { + cards.push({ + label: getServiceDisplayLabel(service), + tone: 'warn', + title: `额度偏低 · ${fmtNum(remaining)}`, + desc: `活跃 ${fmtNum(active)} / 总数 ${fmtNum(total)}`, + }); + return; + } + cards.push({ + label: getServiceDisplayLabel(service), + tone: 'ok', + title: service === 'exa' ? '独立搜索池' : '抽取线路就绪', + desc: `活跃 ${fmtNum(active)} / 总数 ${fmtNum(total)}`, + }); + }); + + if (social?.admin_connected) { + cards.push({ + label: 'Social / X', + tone: 'ok', + title: '后台自动继承', + desc: `${socialTokenSourceLabel(social?.token_source || '')} · /social/search 已就绪`, + }); + } else if (socialState.canProxySearch) { + cards.push({ + label: 'Social / X', + tone: 'warn', + title: '已可转发搜索', + desc: `上游 key ${fmtNum(socialState.upstreamApiKeyCount)} · 客户端 token ${fmtNum(socialState.acceptedTokenCount)} · 后台统计未接通`, + }); + } else { + cards.push({ + label: 'Social / X', + tone: 'danger', + title: '待配置上游', + desc: '补 grok2api 后台或兼容上游后,统一 token 会自动复用', + }); + } + + return cards; +} + +function getQuickstartInstallHint(tokenCount, routeCards) { + const readyProviders = routeCards.filter((card) => card.tone === 'ok').map((card) => card.label); + const pendingProviders = routeCards.filter((card) => card.tone !== 'ok').map((card) => card.label); + if (!tokenCount) { + return { + title: '先创建通用 token', + detail: '创建后控制台会立刻刷新可复制的 .env,并把当前 provider 接线结果一起写进去。', + }; + } + if (pendingProviders.length) { + return { + title: `先接入 ${readyProviders.join(' / ') || '已就绪路由'}`, + detail: `${pendingProviders.join(' / ')} 还没完全接通,但后续补线后会自动复用同一个通用 token。`, + }; + } + return { + title: '复制 .env → 执行 ./install.sh → 验收 mysearch_health', + detail: '当前统一 token 已可覆盖控制台里的全部路由,按最短路径安装即可完成接入。', + }; +} + +function getWorkspaceSnapshot(service, services, social) { + if (service === 'social') { + const stats = social?.stats || {}; + const socialState = getSocialUpstreamState(social || {}); + const hasFullStats = socialState.level === 'full'; + return { + keysTotal: hasFullStats ? Number(stats.token_total || 0) : socialState.acceptedTokenCount, + keysActive: hasFullStats ? Number(stats.token_normal || 0) : socialState.upstreamApiKeyCount, + tokensCount: hasFullStats ? Number(stats.token_total || 0) : socialState.acceptedTokenCount, + todayCount: hasFullStats ? Number(stats.total_calls || 0) : 0, + remaining: hasFullStats ? Number(stats.chat_remaining || 0) : null, + remainingLabel: hasFullStats ? 'Chat 剩余' : '客户端 Token', + primaryMetricLabel: hasFullStats ? '正常 Token' : '上游 Key', + primaryMetricValue: hasFullStats ? Number(stats.token_normal || 0) : socialState.upstreamApiKeyCount, + quaternaryMetricLabel: hasFullStats ? 'Chat 剩余' : '客户端 Token', + quaternaryMetricValue: hasFullStats ? Number(stats.chat_remaining || 0) : socialState.acceptedTokenCount, + modeLabel: socialModeLabel(social?.mode || 'manual'), + }; + } + + const payload = services?.[service] || {}; + const quota = payload.real_quota || {}; + const tavilyState = service === 'tavily' ? getTavilyRuntimeState(payload) : null; + const tavilyUpstream = service === 'tavily' ? getTavilyUpstreamSummary(payload) : null; + const tavilyUsingUpstream = tavilyState?.effectiveMode === 'upstream'; + return { + keysTotal: tavilyUsingUpstream + ? (tavilyUpstream?.available ? tavilyUpstream.totalKeys : (tavilyState.upstreamConfigured ? 1 : 0)) + : Number(payload.keys_total || 0), + keysActive: tavilyUsingUpstream + ? (tavilyUpstream?.available ? tavilyUpstream.activeKeys : (tavilyState.upstreamConfigured ? 1 : 0)) + : Number(payload.keys_active || 0), + tokensCount: Number((payload.tokens || []).length), + todayCount: Number(payload.overview?.today_count || 0), + remaining: service === 'exa' || tavilyUsingUpstream + ? (tavilyUsingUpstream && tavilyUpstream?.available ? tavilyUpstream.totalQuotaRemaining : null) + : (quota.total_remaining ?? null), + remainingLabel: tavilyUsingUpstream + ? '上游剩余' + : (service === 'exa' ? '实时额度' : '真实剩余'), + primaryMetricLabel: tavilyUsingUpstream ? '上游活跃 Key' : '活跃 Key', + primaryMetricValue: tavilyUsingUpstream + ? (tavilyUpstream?.available ? tavilyUpstream.activeKeys : (tavilyState.upstreamConfigured ? 1 : 0)) + : Number(payload.keys_active || 0), + quaternaryMetricLabel: tavilyUsingUpstream + ? (tavilyUpstream?.available ? '上游剩余' : '本地 Key') + : (service === 'exa' ? '实时额度' : '真实剩余'), + quaternaryMetricValue: tavilyUsingUpstream + ? (tavilyUpstream?.available ? tavilyUpstream.totalQuotaRemaining : tavilyState.localKeyCount) + : (service === 'exa' ? null : (quota.total_remaining ?? null)), + modeLabel: service === 'tavily' + ? tavilyModeLabel(tavilyState.effectiveMode) + : '独立池', + }; +} + +function workspaceSignal(service, services, social) { + const snapshot = getWorkspaceSnapshot(service, services, social); + const meta = WORKSPACE_META[service] || {}; + + if (service === 'social') { + if (!(social?.admin_connected || social?.upstream_key_configured)) { + return { + tone: 'danger', + label: '待配置', + summary: `${meta.label} 还没有接通上游兼容路由。`, + snapshot, + }; + } + if (snapshot.keysActive <= 0) { + return { + tone: 'warn', + label: '需要关注', + summary: `${meta.label} 已接通,但当前没有正常 token。`, + snapshot, + }; + } + return { + tone: 'ok', + label: '运行中', + summary: `${meta.label} 已接通,可直接向外转发 /social/search。`, + snapshot, + }; + } + + if (service === 'tavily') { + const tavilyState = getTavilyRuntimeState(services?.[service] || {}); + if (tavilyState.effectiveMode === 'upstream') { + if (!tavilyState.upstreamConfigured) { + return { + tone: 'danger', + label: '待配置上游', + summary: `${meta.label} 当前切到了上游模式,但还没有可用的上游凭证。`, + snapshot, + }; + } + return { + tone: 'ok', + label: '上游转发中', + summary: `${meta.label} 当前通过上游 Gateway 转发;本地 API Key 池只作为备用库存。`, + snapshot, + }; + } + } + + if (snapshot.keysTotal <= 0) { + return { + tone: 'danger', + label: '待导入 Key', + summary: service === 'tavily' + ? `${meta.label} 还没有接通;你可以配置上游 Gateway,也可以直接导入 API Key 池。` + : `${meta.label} 还没有导入可用 Key。`, + snapshot, + }; + } + + if (snapshot.keysActive <= 0) { + return { + tone: 'warn', + label: 'Key 全部停用', + summary: `${meta.label} 当前没有活跃 Key,请先启用或重新导入。`, + snapshot, + }; + } + + if (snapshot.remaining !== null && Number.isFinite(Number(snapshot.remaining)) && Number(snapshot.remaining) <= 100) { + return { + tone: 'warn', + label: '额度偏低', + summary: `${meta.label} 剩余额度较低,建议尽快同步或补充 Key。`, + snapshot, + }; + } + + return { + tone: 'ok', + label: '运行中', + summary: `${meta.label} 当前工作台状态稳定,可以继续签发 Token 或同步额度。`, + snapshot, + }; +} + +function renderHeroFocus(services, social) { + const root = document.getElementById('hero-focus'); + if (!root) return; + + const signal = workspaceSignal(activeService, services, social); + const snapshot = signal.snapshot; + const meta = WORKSPACE_META[activeService] || {}; + const focusName = root.querySelector('.hero-focus-name'); + const focusStatus = root.querySelector('.hero-focus-status'); + const focusDesc = root.querySelector('.hero-focus-desc'); + const focusStamp = document.getElementById('hero-focus-stamp'); + const signalDot = document.getElementById('hero-focus-signal'); + const metrics = document.getElementById('hero-focus-metrics'); + + if (focusName) focusName.textContent = meta.label || '未知工作台'; + if (focusStatus) { + focusStatus.textContent = signal.label; + focusStatus.className = `hero-focus-status is-${signal.tone}`; + } + if (focusDesc) { + focusDesc.textContent = `${signal.summary} 当前模式:${snapshot.modeLabel}。`; + } + if (focusStamp) { + focusStamp.textContent = latestStatsMeta.generated_at + ? `最近刷新 ${formatTime(latestStatsMeta.generated_at)}` + : '等待刷新'; + } + if (signalDot) { + signalDot.className = `hero-focus-signal is-${signal.tone}`; + } + if (metrics) { + const metricOneLabel = snapshot.primaryMetricLabel || (activeService === 'social' ? '正常 Token' : '活跃 Key'); + const metricOneValue = snapshot.primaryMetricValue ?? snapshot.keysActive; + const metricFourLabel = snapshot.quaternaryMetricLabel || snapshot.remainingLabel; + const metricFourValue = snapshot.quaternaryMetricValue ?? snapshot.remaining; + metrics.innerHTML = ` +
+ ${metricOneLabel} + ${fmtNum(metricOneValue)} +
+
+ Token + ${fmtNum(snapshot.tokensCount)} +
+
+ ${metricFourLabel} + ${metricFourValue === null ? '暂不可查' : fmtNum(metricFourValue)} +
+
+ ${activeService === 'social' ? '总调用' : '今日调用'} + ${fmtNum(snapshot.todayCount)} +
+ `; + } +} + +function buildSocialProxyEnv(social) { + const baseUrl = social.upstream_base_url || 'https://media.example.com/v1'; + const adminBaseUrl = social.admin_base_url || baseUrl.replace(/\/v1$/, ''); + return `# 推荐:只填 grok2api 后台地址和后台 app_key,proxy 会自动继承上游凭证与 token 池 +SOCIAL_GATEWAY_UPSTREAM_BASE_URL=${baseUrl} +SOCIAL_GATEWAY_ADMIN_BASE_URL=${adminBaseUrl} +SOCIAL_GATEWAY_ADMIN_APP_KEY=YOUR_GROK2API_APP_KEY +SOCIAL_GATEWAY_MODEL=grok-4.1-fast + +# 可选:只有你想覆写默认行为时才需要 +# SOCIAL_GATEWAY_UPSTREAM_API_KEY= +# SOCIAL_GATEWAY_TOKEN=`; +} + +function buildSocialMySearchEnv(social) { + const baseUrl = location.origin; + const socialReady = social?.admin_connected || social?.upstream_key_configured; + return `# 推荐:直接用 MySearch 通用 token,一次接上 Tavily / Firecrawl / Exa / Social +MYSEARCH_PROXY_BASE_URL=${baseUrl} +MYSEARCH_PROXY_API_KEY=YOUR_MYSEARCH_PROXY_TOKEN + +# 当前 Social / X ${socialReady ? '已经接通,可直接复用上面的通用 token。' : '还没完全接通;上面的通用 token 先可用于 Tavily / Firecrawl / Exa。'} + +# 如果你只想单独接 Social / X,也可以显式写 compatible 模式: +MYSEARCH_XAI_SEARCH_MODE=compatible +MYSEARCH_XAI_SOCIAL_BASE_URL=${baseUrl} +MYSEARCH_XAI_SOCIAL_SEARCH_PATH=/social/search +MYSEARCH_XAI_API_KEY=YOUR_MYSEARCH_PROXY_TOKEN`; +} + +function buildMySearchEnv(mysearch, social) { + const baseUrl = location.origin; + const token = mysearch?.tokens?.[0]?.token || 'YOUR_MYSEARCH_PROXY_TOKEN'; + const routeCards = getQuickstartProviderCards(latestServices, social || {}); + const readyProviders = routeCards.filter((card) => card.tone === 'ok').map((card) => card.label); + const pendingProviders = routeCards.filter((card) => card.tone !== 'ok').map((card) => `${card.label}: ${card.title}`); + const socialReady = social?.admin_connected || social?.upstream_key_configured; + return `# 最省事的接法:只填这两项,MySearch 会默认走当前 proxy +MYSEARCH_PROXY_BASE_URL=${baseUrl} +MYSEARCH_PROXY_API_KEY=${token} + +# 当前路由状态: +${routeCards.map((card) => `# - ${card.label}: ${card.title}${card.desc ? ` · ${card.desc}` : ''}`).join('\n')} + +# 说明: +# - 当前已就绪 provider:${readyProviders.length ? readyProviders.join(' / ') : '暂无,先在控制台接线'} +# - Social / X ${socialReady ? '当前已接通,会默认复用同一个 token' : '当前还没完全接通,后续接好后也会自动复用同一个 token'} +# - ${pendingProviders.length ? `仍需关注:${pendingProviders.join(';')}` : '当前统一 token 已可覆盖控制台里的所有路由'} + +# 可选:如果你想把 MCP 额外暴露成远程 HTTP,再补这一段 +# MYSEARCH_MCP_HOST=0.0.0.0 +# MYSEARCH_MCP_PORT=8000 +# MYSEARCH_MCP_STREAMABLE_HTTP_PATH=/mcp`; +} + +function buildMySearchInstall() { + return `git clone https://github.com/skernelx/MySearch-Proxy.git +cd MySearch-Proxy +cp mysearch/.env.example mysearch/.env + +# 把上面的 MYSEARCH_PROXY_* 粘进去后执行 +./install.sh + +# 如果你想作为远程 MCP 提供给别的客户端: +./venv/bin/python -m mysearch \\ + --transport streamable-http \\ + --host 0.0.0.0 \\ + --port 8000 \\ + --streamable-http-path /mcp`; +} + +function renderSocialBoard(social) { + const stats = social?.stats || {}; + const socialState = getSocialUpstreamState(social || {}); + const mode = socialModeLabel(social?.mode || 'manual'); + const statusText = socialStatusLabel(social); + const tokenSource = socialTokenSourceLabel(social?.token_source || ''); + const authText = social?.client_auth_configured ? '已允许客户端调用 /social/search' : '还没有设置客户端 token'; + const videoValue = stats.video_remaining === null + ? '
无法统计
' + : `
${fmtNum(stats.video_remaining)}
`; + let foot = '现在还没有连上 Social / X 上游。补齐 grok2api 后台地址和 app key,或者手动填写上游 key 后,这里会开始显示完整状态。'; + if (social?.admin_connected) { + foot = '当前通过 grok2api 后台自动同步 token 状态和剩余额度。对外仍然统一提供 MySearch 的 /social/search 结果结构。'; + } else if (social?.upstream_key_configured) { + foot = '当前已经能转发 Social 搜索,但还没有连上后台统计。补上 grok2api app key 后,这里会显示完整 token 状态。'; + } + const errorLine = social?.error + ? `
最近错误:${escapeHtml(social.error)}
` + : ''; + + if (socialState.level !== 'full') { + document.getElementById('social-board').innerHTML = ` +
+
+ + +
+
+
+ 当前还没有拿到完整后台 token 面板,所以这里只展示基础接线状态,而不是 token 池运行统计。 +
+
+ + + + +
+
+ + + + +
+
+ ${statusText} ${socialState.detail || foot} +
+ ${errorLine} + `; + return; + } + + document.getElementById('social-board').innerHTML = ` +
+
+ + +
+
+
+ 这里看的是 MySearch 的 Social / X 路由运行面。底层可以接 grok2api,也可以兼容别的 xAI-compatible 上游,但对外始终是一套统一结果结构。 +
+
+ + + + + + + + +
+
+ + + + +
+
+ ${statusText} ${foot} +
+ ${errorLine} + `; +} + +function renderSocialIntegration(social) { + const socialState = getSocialUpstreamState(social || {}); + const mode = socialModeLabel(social?.mode || 'manual'); + const source = socialTokenSourceLabel(social?.token_source || ''); + const proxyConfigured = Boolean(social?.upstream_key_configured); + const clientConfigured = Boolean(social?.client_auth_configured); + const authLabel = clientConfigured ? '已就绪' : '未配置'; + const statusLabel = socialStatusLabel(social); + const upstreamBase = social?.upstream_base_url || 'https://media.example.com/v1'; + const adminBase = social?.admin_base_url || '未设置'; + let note = '推荐只填写 grok2api 后台地址和 app key,让 proxy 自动继承上游密钥和 token 池。'; + if (social?.admin_connected) { + note = '当前已经走后台自动继承,后面通常不需要再手动维护上游 key 和客户端 token。'; + } else if (social?.upstream_key_configured) { + note = '当前已经可以调用,但还没有接上后台 token 面板;如果你想看到完整统计,补上 grok2api app key 即可。'; + } + const noteClass = social?.error ? 'integration-note is-error' : 'integration-note'; + + document.getElementById('social-integration').innerHTML = ` +
+
+
Compatibility Layer
+

Social / X 接入

+
+ 摘要优先 +
+

推荐优先接 grok2api 后台。这样只要填后台地址和 app key,proxy 就能自动继承上游密钥与 token 池,不需要再把配置拆成很多手动变量。

+
+
+
当前状态
+
${escapeHtml(statusLabel)}
+
+
+
工作模式
+
${escapeHtml(mode)}
+
+
+
Token 来源
+
${escapeHtml(source)}
+
+
+
客户端鉴权
+
${escapeHtml(authLabel)}
+
+
+
+ 接线详情 +
+
+
上游接口
+
${escapeHtml(upstreamBase)}
+
+
+
后台地址
+
${escapeHtml(adminBase)}
+
+
+
接入结果
+
${proxyConfigured ? '已拿到可用上游 key' : '尚未拿到上游 key'}
+
+
+
上游 Key 数
+
${fmtNum(socialState.upstreamApiKeyCount)}
+
+
+
客户端 Token 数
+
${fmtNum(socialState.acceptedTokenCount)}
+
+
+
兼容形态
+
X Search Router
+
compatible route
+
+
+
+
+
+ ${escapeHtml(statusLabel)} + ${escapeHtml(note)} + ${social?.error ? `最近错误:${escapeHtml(social.error)}` : ''} +
+
+
+
Proxy 端环境变量。通常只要复制这一段,再补你自己的 grok2api app key。
+ +
+

+    
+
MySearch / MCP / Skill 端环境变量。现在更推荐直接使用 MySearch 通用 token,一次接上全部路由。
+ +
+

+  `;
+
+  document.getElementById('social-proxy-env').textContent = buildSocialProxyEnv(social || {});
+  document.getElementById('social-mysearch-env').textContent = buildSocialMySearchEnv(social || {});
+}
+
+function renderMySearchQuickstart(mysearch, social) {
+  const root = document.getElementById('mysearch-quickstart');
+  if (!root) return;
+
+  const tokens = mysearch?.tokens || [];
+  const tokenCount = tokens.length;
+  const todayCount = mysearch?.overview?.today_count || 0;
+  const monthCount = mysearch?.overview?.month_count || 0;
+  const routeCards = getQuickstartProviderCards(latestServices, social || {});
+  const readyProviders = routeCards.filter((card) => card.tone === 'ok');
+  const pendingProviders = routeCards.filter((card) => card.tone !== 'ok');
+  const installHint = getQuickstartInstallHint(tokenCount, routeCards);
+  const noteClass = tokenCount > 0 ? 'integration-note' : 'integration-note is-error';
+  const note = tokenCount > 0
+    ? pendingProviders.length
+      ? `这里创建的是 MySearch 通用 token。它已经能被当前已接通的 provider 直接复用;${pendingProviders.map((card) => card.label).join(' / ')} 还没完全接通,后续补齐后也会自动纳入统一入口。`
+      : '这里创建的是 MySearch 通用 token。它会同时被 Tavily / Firecrawl / Exa / Social 路由接受。'
+    : '先创建一个 MySearch 通用 token。创建后控制台会自动生成可直接复制的 .env 配置。';
+  const noteAction = tokenCount > 0
+    ? ''
+    : '';
+
+  root.innerHTML = `
+    
+
+ Unified Client Entry +

统一接入配置

+

这块不是 provider 池,而是给 Codex / Claude Code / 其他 MCP 客户端准备的统一接入层。目标是把连接方式、安装命令和通用 token 收成一条稳定入口,减少手动区分底层服务。

+
+ Route + 统一入口: MYSEARCH_PROXY_BASE_URL + MYSEARCH_PROXY_API_KEY +
+
+
+
Client Ready · ${fmtNum(readyProviders.length)}/${fmtNum(routeCards.length)}
+
推荐直接复制下面的 MYSEARCH_PROXY_* 配置,不再手写一堆 provider 地址。
+
当前已就绪:${escapeHtml(readyProviders.map((card) => card.label).join(' / ') || '暂无')}
+
通用 Token 前缀:${escapeHtml(mysearch?.token_prefix || 'mysp-')}
+
+
+
+
+
+
+
+
Access Config
+

一键配置

+

推荐把 MySearch 统一接到当前 proxy。这样客户端只认一个 base URL 和一个通用 token,底层 Tavily / Firecrawl / Exa / Social 都由 proxy 负责收口。

+
+ 统一入口 +
+
+
+
+
Provider Readiness
+ ${fmtNum(readyProviders.length)} / ${fmtNum(routeCards.length)} 已接通 + ${pendingProviders.length ? `待补:${pendingProviders.map((card) => card.label).join(' / ')}` : '四条路由都已接通,可直接复制统一接入配置。'} +
+
+ ${routeCards.map((card) => ` +
+
${escapeHtml(card.label)}
+ ${escapeHtml(card.title)} + ${escapeHtml(card.desc)} +
+ `).join('')} +
+
+
+
Proxy Base URL
+
${escapeHtml(location.origin)}
+
+
+
通用 Token
+
${fmtNum(tokenCount)}
+
+
+
今日 / 本月调用
+
${fmtNum(todayCount)} / ${fmtNum(monthCount)}
+
+
+
Provider Ready
+
${fmtNum(readyProviders.length)} / ${fmtNum(routeCards.length)}
+
+
+
+
+
+
+ ${tokenCount > 0 ? '可以直接复制配置了' : '还差一个通用 token'} + ${escapeHtml(note)} +
+ ${noteAction} +
+
+
复制到 mysearch/.env 就能用。默认已经包含统一 proxy 接法。
+ +
+

+            
+
+
+
+
+
+
Install Path
+

安装路径

+

把接入配置和安装命令分开看。先确认 token 与 .env,再决定走本机安装还是远程 streamable-http。

+
+ Shortest Path +
+
+
+
最短安装路径
+ ${escapeHtml(installHint.title)} + ${escapeHtml(installHint.detail)} +
+
+
默认形态
+ stdio +
+
+
远程备选
+ streamable-http +
+
+
+ 1. ${tokenCount > 0 ? '已具备通用 token' : '创建通用 token'} + 2. 复制 .env + 3. 执行 ./install.sh +
+
+ +
+
+
+
+
+
本地安装 / 远程启动命令,按仓库默认流程直接走。
+ +
+

+              
+
+
+
+
+
+
+

MySearch 通用 Token

+

这个 token 专门给上层 MCP / Skill 用。和 Tavily / Firecrawl / Exa 各自的服务 token 分开管理,但调用时会被三条 provider 路由一起接受。

+
+ 统一入口 +
+
+
+ + +
+
+
表格只保留摘要。点击任一行,会在右侧抽屉里查看完整 token、调用统计和维护动作。
+ ${renderTableLegend('token')} +
+
+ +
+
+
+ + + + +
+
+
+
+ + + + + + + + + + +
Token名称运行摘要操作
+
+
+
+
+
+ `; + + document.getElementById('mysearch-proxy-env').textContent = buildMySearchEnv(mysearch || {}, social || {}); + document.getElementById('mysearch-install-cmd').textContent = buildMySearchInstall(); + renderTokens('mysearch', tokens); + renderPoolGlance('mysearch', mysearch || {}); +} + +async function createMySearchBootstrapToken(button) { + await runWithBusyButton(button, { + busyLabel: '创建中...', + successLabel: '已创建', + errorLabel: '创建失败', + minBusyMs: 560, + }, async () => { + await api('POST', '/api/tokens', { + service: 'mysearch', + name: 'MySearch General Token', + }); + }); + await sleep(180); + showToast('已创建 MySearch 通用 token,下面的 .env 已自动更新。', 'success'); + await refresh({ force: true, scope: getRefreshScopeForService('mysearch') }); + const envBlock = document.getElementById('mysearch-proxy-env'); + if (envBlock) { + envBlock.scrollIntoView({ behavior: 'smooth', block: 'center' }); + } +} + +function collectTavilySettingsForm() { + const body = { + mode: document.getElementById('settings-tavily-mode').value, + upstream_base_url: document.getElementById('settings-tavily-upstream-base-url').value.trim(), + upstream_search_path: document.getElementById('settings-tavily-upstream-search-path').value.trim(), + upstream_extract_path: document.getElementById('settings-tavily-upstream-extract-path').value.trim(), + }; + const upstreamApiKey = document.getElementById('settings-tavily-upstream-api-key').value.trim(); + if (upstreamApiKey) body.upstream_api_key = upstreamApiKey; + return body; +} + +function collectSocialSettingsForm() { + const body = { + upstream_base_url: document.getElementById('settings-social-upstream-base-url').value.trim(), + upstream_responses_path: document.getElementById('settings-social-upstream-responses-path').value.trim(), + admin_base_url: document.getElementById('settings-social-admin-base-url').value.trim(), + admin_verify_path: document.getElementById('settings-social-admin-verify-path').value.trim(), + admin_config_path: document.getElementById('settings-social-admin-config-path').value.trim(), + admin_tokens_path: document.getElementById('settings-social-admin-tokens-path').value.trim(), + model: document.getElementById('settings-social-model').value.trim(), + fallback_model: document.getElementById('settings-social-fallback-model').value.trim(), + cache_ttl_seconds: document.getElementById('settings-social-cache-ttl-seconds').value.trim(), + fallback_min_results: document.getElementById('settings-social-fallback-min-results').value.trim(), + }; + + const adminAppKey = document.getElementById('settings-social-admin-app-key').value.trim(); + const upstreamApiKey = document.getElementById('settings-social-upstream-api-key').value.trim(); + const gatewayToken = document.getElementById('settings-social-gateway-token').value.trim(); + + if (adminAppKey) body.admin_app_key = adminAppKey; + if (upstreamApiKey) body.upstream_api_key = upstreamApiKey; + if (gatewayToken) body.gateway_token = gatewayToken; + return body; +} + +function renderSettingsProbeMessage(kind, payload = {}) { + if (kind === 'tavily') { + const mode = payload.effective_mode === 'upstream' ? '上游 Gateway' : 'API Key 池'; + const detail = payload.detail || payload.summary || '诊断已完成。'; + return `Tavily ${payload.ok ? '接线正常' : '接线失败'}:当前实际 ${mode},${detail}`; + } + const detail = payload.detail || payload.token_source || '诊断已完成。'; + return `Social / X ${payload.ok ? '接线正常' : '接线失败'}:${detail}`; +} + +function clearSettingsProbe(kind) { + const shell = document.getElementById(`settings-${kind}-probe`); + if (!shell) return; + shell.innerHTML = ''; + shell.classList.add('hidden'); + shell.classList.remove('is-error'); +} + +function getSettingsProbeMeta(kind, payload = {}) { + if (kind === 'tavily') { + const mode = payload.effective_mode === 'upstream' ? '上游 Gateway' : 'API Key 池'; + const requestTarget = payload.request_target || payload.probe_url || '未配置'; + const authSource = payload.auth_source || (payload.effective_mode === 'upstream' + ? '上游 API key / Gateway token' + : `本地 API Key 池(活跃 ${fmtNum(payload.local_key_count || 0)})`); + const returnStatus = payload.status_label || (payload.status_code ? `HTTP ${payload.status_code}` : '未执行 live probe'); + const failureReason = payload.ok ? '无' : (payload.failure_reason || payload.error || payload.detail || '未通过诊断'); + let recommendation = payload.recommendation || ''; + if (!recommendation) { + if (payload.ok) { + recommendation = payload.effective_mode === 'upstream' + ? '当前链路可用;如果想固定行为,可以保持 upstream,或者切回 auto 让控制台自动识别。' + : '当前本地 API Key 池可用;如果想统一上游维护,可以继续配置 Tavily Gateway。'; + } else if ((payload.local_key_count || 0) <= 0 && payload.effective_mode !== 'upstream') { + recommendation = '导入至少一个 Tavily API Key,或者改成上游 Gateway 并补上凭证。'; + } else if (payload.effective_mode === 'upstream') { + recommendation = '检查上游 Base URL、Search Path 和上游 token 是否有效。'; + } else { + recommendation = '检查本地 API Key 是否可用,必要时切到上游 Gateway 做统一接线。'; + } + } + return { + tone: payload.ok ? 'ok' : 'error', + title: `Tavily ${payload.ok ? '测试通过' : '测试失败'}`, + eyebrow: 'Latest Probe', + pills: [mode, tavilyModeSourceLabel(payload.mode_source || 'auto_pending')], + items: [ + { label: '请求目标', value: requestTarget, mono: true }, + { label: '鉴权来源', value: authSource }, + { label: '返回状态', value: returnStatus }, + { label: '失败原因', value: failureReason }, + { label: '建议动作', value: recommendation, wide: true }, + ], + }; + } + + const requestTarget = payload.request_target || `${payload.upstream_base_url || '未配置'}${payload.upstream_responses_path || '/responses'}`; + const authSource = payload.auth_source || payload.token_source || '未解析到可用鉴权'; + const returnStatus = payload.status_label || (payload.admin_connected ? '后台已连通' : (payload.ok ? '已解析到可用凭证' : '诊断失败')); + const failureReason = payload.ok ? '无' : (payload.failure_reason || payload.error || payload.detail || '未通过诊断'); + let recommendation = payload.recommendation || ''; + if (!recommendation) { + if (payload.ok && payload.admin_connected) { + recommendation = '当前后台自动继承正常,可以直接下发 MySearch 通用 token 给客户端。'; + } else if (payload.ok) { + recommendation = '当前已能转发 Social / X 搜索;如果要更完整的 token 元数据,继续补 grok2api 后台。'; + } else { + recommendation = '优先检查 grok2api 后台地址与 app key;如果没有后台,再补手动上游 key 和客户端 token。'; + } + } + return { + tone: payload.ok ? 'ok' : 'error', + title: `Social / X ${payload.ok ? '测试通过' : '测试失败'}`, + eyebrow: 'Latest Probe', + pills: [socialModeLabel(payload.mode || 'manual'), socialTokenSourceLabel(payload.token_source || '')], + items: [ + { label: '请求目标', value: requestTarget, mono: true }, + { label: '鉴权来源', value: authSource }, + { label: '返回状态', value: returnStatus }, + { label: '失败原因', value: failureReason }, + { label: '建议动作', value: recommendation, wide: true }, + ], + }; +} + +function renderSettingsProbe(kind, payload = {}) { + const shell = document.getElementById(`settings-${kind}-probe`); + if (!shell) return; + const meta = getSettingsProbeMeta(kind, payload); + shell.classList.remove('hidden'); + shell.classList.toggle('is-error', meta.tone === 'error'); + shell.innerHTML = ` +
+
+
${escapeHtml(meta.eyebrow)}
+ ${escapeHtml(meta.title)} +
+
+ ${meta.pills.map((pill) => `${escapeHtml(pill)}`).join('')} +
+
+
+ ${meta.items.map((item) => ` +
+
${escapeHtml(item.label)}
+
${escapeHtml(item.value)}
+
+ `).join('')} +
+ `; +} + +async function testTavilySettings(button) { + setStatus('settings-tavily-status', ''); + clearSettingsProbe('tavily'); + try { + await runWithBusyButton(button, { + busyLabel: '测试中...', + successLabel: '测试通过', + errorLabel: '测试失败', + minBusyMs: 640, + }, async () => { + const payload = await api('POST', '/api/settings/test/tavily', collectTavilySettingsForm()); + renderSettingsProbe('tavily', payload); + setStatus('settings-tavily-status', renderSettingsProbeMessage('tavily', payload), !payload.ok); + showToast(payload.ok ? 'Tavily 测试通过' : 'Tavily 测试失败', payload.ok ? 'success' : 'warn'); + }); + } catch (error) { + clearSettingsProbe('tavily'); + setStatus('settings-tavily-status', `Tavily 测试失败:${error.message}`, true); + } +} + +async function testSocialSettings(button) { + setStatus('settings-social-status', ''); + clearSettingsProbe('social'); + try { + await runWithBusyButton(button, { + busyLabel: '测试中...', + successLabel: '测试通过', + errorLabel: '测试失败', + minBusyMs: 640, + }, async () => { + const payload = await api('POST', '/api/settings/test/social', collectSocialSettingsForm()); + renderSettingsProbe('social', payload); + setStatus('settings-social-status', renderSettingsProbeMessage('social', payload), !payload.ok); + showToast(payload.ok ? 'Social / X 测试通过' : 'Social / X 测试失败', payload.ok ? 'success' : 'warn'); + }); + } catch (error) { + clearSettingsProbe('social'); + setStatus('settings-social-status', `Social / X 测试失败:${error.message}`, true); + } +} + +function headers() { + const base = { + 'Content-Type': 'application/json', + }; + if (PWD) { + base['X-Admin-Password'] = PWD; + } + return base; +} + +async function api(method, path, body) { + const options = { method, headers: headers(), credentials: 'same-origin' }; + if (body !== undefined) { + options.body = JSON.stringify(body); + } + + const response = await fetch(API + path, options); + const text = await response.text(); + let payload = {}; + try { + payload = text ? JSON.parse(text) : {}; + } catch { + payload = text ? { detail: text } : {}; + } + + if (response.status === 401) { + logout(); + throw new Error('Unauthorized'); + } + + if (!response.ok) { + throw new Error(payload.detail || `HTTP ${response.status}`); + } + + return payload; +} + +function setStatus(id, message, isError = false) { + const el = document.getElementById(id); + if (!el) return; + if (!message) { + el.textContent = ''; + el.classList.add('hidden'); + el.classList.remove('is-error'); + return; + } + el.textContent = message; + el.classList.remove('hidden'); + el.classList.toggle('is-error', Boolean(isError)); +} + +function describeConfiguredSecret(masked, configured) { + if (!configured) return '当前未配置。'; + return `当前已配置 ${masked || 'secret'},留空表示保持不变。`; +} + +function setTavilyMode(mode) { + const nextMode = ['auto', 'pool', 'upstream'].includes(mode) ? mode : 'auto'; + const input = document.getElementById('settings-tavily-mode'); + if (input) { + input.value = nextMode; + } + document.querySelectorAll('.mode-switch-btn[data-tavily-mode]').forEach((button) => { + const active = button.dataset.tavilyMode === nextMode; + button.classList.toggle('is-active', active); + button.setAttribute('aria-selected', active ? 'true' : 'false'); + button.setAttribute('tabindex', active ? '0' : '-1'); + }); + const tavily = latestSettings?.tavily || {}; + const runtimeMode = nextMode === 'auto' + ? (tavily.effective_mode || (tavily.upstream_api_key_configured ? 'upstream' : ((tavily.local_key_count || 0) > 0 ? 'pool' : 'auto'))) + : nextMode; + const runtimeSource = nextMode === 'auto' + ? (tavily.mode_source || (tavily.upstream_api_key_configured ? 'auto_upstream' : ((tavily.local_key_count || 0) > 0 ? 'auto_pool' : 'auto_pending'))) + : (nextMode === 'upstream' ? 'manual_upstream' : 'manual_pool'); + const hint = document.getElementById('settings-tavily-mode-hint'); + if (hint) { + if (nextMode === 'upstream') { + hint.textContent = '手动固定到上游 Gateway,请求不再消耗本地 API Key 池。'; + } else if (nextMode === 'pool') { + hint.textContent = '手动固定到 API Key 池,请求会从导入的 Tavily keys 中轮询。'; + } else { + hint.textContent = '自动模式会先检测上游凭证;如果你只是导入 Tavily key,就会默认回到 API Key 池。'; + } + } + const runtimeStrip = document.getElementById('settings-tavily-runtime-strip'); + if (runtimeStrip) { + runtimeStrip.textContent = `当前实际:${tavilyModeLabel(runtimeMode)} · ${tavilyModeSourceLabel(runtimeSource)}`; + } + document.querySelectorAll('[data-tavily-upstream-field]').forEach((field) => { + field.classList.toggle('is-muted', nextMode === 'pool'); + field.classList.toggle('is-emphasis', nextMode !== 'pool'); + }); +} + +function fillSettingsForm(settings) { + const tavily = settings?.tavily || {}; + const social = settings?.social || {}; + setTavilyMode(tavily.mode || 'auto'); + document.getElementById('settings-tavily-upstream-base-url').value = tavily.upstream_base_url || ''; + document.getElementById('settings-tavily-upstream-search-path').value = tavily.upstream_search_path || '/search'; + document.getElementById('settings-tavily-upstream-extract-path').value = tavily.upstream_extract_path || '/extract'; + document.getElementById('settings-tavily-upstream-api-key').value = ''; + document.getElementById('settings-tavily-upstream-api-key-hint').textContent = + describeConfiguredSecret(tavily.upstream_api_key_masked, tavily.upstream_api_key_configured); + document.getElementById('settings-tavily-meta').textContent = [ + `配置模式:${tavilyModeLabel(tavily.mode || 'auto')}`, + `当前实际:${tavilyModeLabel(tavily.effective_mode || tavily.mode || 'auto')}`, + `来源:${tavilyModeSourceLabel(tavily.mode_source || 'auto_pending')}`, + tavily.upstream_base_url ? `Base URL:${tavily.upstream_base_url}` : '', + tavily.upstream_api_key_configured ? '已配置上游凭证' : `本地活跃 Key ${fmtNum(tavily.local_key_count || 0)}`, + ].filter(Boolean).join(' · '); + + document.getElementById('settings-social-upstream-base-url').value = social.upstream_base_url || ''; + document.getElementById('settings-social-upstream-responses-path').value = social.upstream_responses_path || '/responses'; + document.getElementById('settings-social-admin-base-url').value = social.admin_base_url || ''; + document.getElementById('settings-social-admin-verify-path').value = social.admin_verify_path || '/v1/admin/verify'; + document.getElementById('settings-social-admin-config-path').value = social.admin_config_path || '/v1/admin/config'; + document.getElementById('settings-social-admin-tokens-path').value = social.admin_tokens_path || '/v1/admin/tokens'; + document.getElementById('settings-social-model').value = social.model || 'grok-4.1-fast'; + document.getElementById('settings-social-fallback-model').value = social.fallback_model || 'grok-4.1-fast'; + document.getElementById('settings-social-cache-ttl-seconds').value = String(social.cache_ttl_seconds || 60); + document.getElementById('settings-social-fallback-min-results').value = String(social.fallback_min_results || 3); + + document.getElementById('settings-social-admin-app-key').value = ''; + document.getElementById('settings-social-upstream-api-key').value = ''; + document.getElementById('settings-social-gateway-token').value = ''; + + document.getElementById('settings-social-admin-app-key-hint').textContent = + describeConfiguredSecret(social.admin_app_key_masked, social.admin_app_key_configured); + document.getElementById('settings-social-upstream-api-key-hint').textContent = + describeConfiguredSecret(social.upstream_api_key_masked, social.upstream_api_key_configured); + document.getElementById('settings-social-gateway-token-hint').textContent = + describeConfiguredSecret(social.gateway_token_masked, social.gateway_token_configured); + + const bits = [ + `当前模式:${socialModeLabel(social.mode || 'manual')}`, + social.model ? `主模型:${social.model}` : '', + social.fallback_model ? `Fallback:${social.fallback_model} (< ${social.fallback_min_results || 3})` : '', + social.token_source ? `Token 来源:${social.token_source}` : '', + social.admin_connected ? '后台连通正常' : '', + ].filter(Boolean); + if (social.error) { + bits.push(`最近错误:${social.error}`); + } + document.getElementById('settings-social-meta').textContent = bits.join(' · '); + renderSettingsSummaries(settings); +} + +async function loadSettings() { + const payload = await api('GET', '/api/settings'); + latestSettings = payload || {}; + fillSettingsForm(latestSettings); + setStatus('settings-password-status', ''); + setStatus('settings-tavily-status', ''); + setStatus('settings-social-status', ''); + clearSettingsProbe('tavily'); + clearSettingsProbe('social'); +} + +async function openSettingsModal() { + rememberOverlayFocus('settings-modal'); + document.getElementById('settings-modal').classList.remove('hidden'); + setActiveSettingsTab(document.querySelector('.settings-tab.is-active')?.dataset.settingsTab || 'console'); + syncOverlayState(); + focusOverlay('settings-modal'); + try { + await loadSettings(); + } catch (error) { + showAlertDialog({ + title: '读取设置失败', + message: `控制台没能读取当前配置:${error.message}`, + tone: 'danger', + kicker: 'Settings Error', + }); + } +} + +function closeSettingsModal() { + document.getElementById('settings-modal').classList.add('hidden'); + syncOverlayState(); + restoreOverlayFocus('settings-modal'); +} + +function logoutFromSettings() { + closeSettingsModal(); + logout(); +} + +function renderServiceShells() { + const servicesRoot = document.getElementById('services-root'); + if (!servicesRoot) return; + const providerHtml = Object.keys(SERVICE_META).map((service) => { + const meta = SERVICE_META[service]; + return ` +
+
+
+ ${meta.label} +

${meta.label} 工作台

+

${meta.panelIntro} 账号前缀 ${meta.emailPrefix},代理 Token 前缀 ${meta.tokenPrefix}。${meta.quotaSource}。

+
+ Route + ${meta.routeHint} +
+
+
+
Live Status
+
等待同步状态...
+ +
+
+
+
+ +
+
+

调用方式

+

${meta.routeHint}

+
+ Base URL: + 代理 Token 前缀: ${meta.tokenPrefix} +
+
+
${meta.quotaSource}
+ +
+

+            
+ +
+
+

接线摘要

+ 摘要优先 +
+

先在这里看当前 provider 的可用状态,再决定是否下钻到 Token、Key 和批量导入细节。

+
+
+ 代理 Token 前缀 + ${meta.tokenPrefix} +
+
+ 账号前缀 + ${meta.emailPrefix} +
+
+ 额度来源 + ${meta.quotaSource} +
+
+ 控制面行为 + ${meta.switcherFoot} +
+
+
+
+ +
+
+
+
+

Token 池

+

${meta.tokenPoolDesc}

+
+ 创建与分发 +
+
+
+ + +
+
+
+ +
+ + + + +
+
+
表格只保留摘要。点击任一行,在右侧抽屉查看完整 token、配额策略和使用统计。
+ ${renderTableLegend('token')} +
+ + + + + + + + + + +
Token备注运行摘要操作
+
+
+
+ +
+
+
+

API Key 池

+

${meta.keyPoolDesc}

+
+ 导入与维护 +
+
+
+ + + +
+ +
+
+ +
+
+ + + + + +
+
+ + + + +
+
+
+
主表只保留同步状态和代理摘要。点击行可在右侧查看额度、账户层级信息和维护动作。
+ ${renderTableLegend('key')} +
+ + + + + + + + + + + + + +
IDKey邮箱同步 / 状态代理摘要状态操作
+
+
+
+
+
+
+ `; + }).join(''); + + const socialHtml = ` +
+
+
+ Social / X +

Social / X 工作台

+

这里收口的是 X / Social 搜索路由,不再把底层实现名字放成主标题。你看到的是 MySearch 的 Social 工作台,底层可以复用 grok2api 后台,也可以兼容别的 xAI-compatible 上游。

+
+ Route + 代理端点: POST /social/search +
+
+
+
Live Status
+
等待 Social 状态...
+
用于查看 token 池、剩余额度、调用次数和客户端接线方式。
+
+
+
+
+ +
+
+ `; + + servicesRoot.innerHTML = providerHtml + socialHtml; + renderSocialBoard({}); + renderSocialIntegration({}); + renderSocialWorkspace({}); + renderServiceSwitcher({}, {}); + applyActiveService(); +} + +function renderServiceSwitcher(services, social) { + const root = document.getElementById('service-switcher'); + if (!root) return; + const html = Object.entries(WORKSPACE_META).map(([service, meta]) => { + const isSocial = service === 'social'; + const snapshot = getWorkspaceSnapshot(service, services, social); + const signal = workspaceSignal(service, services, social); + const foot = meta.switcherFoot || (isSocial ? '统一 Social 路由 + 统一输出结构' : '独立 Key 池 + 独立额度同步'); + const metricOneLabel = snapshot.primaryMetricLabel || (isSocial ? '可用 Token' : '活跃 Key'); + const metricTwoLabel = isSocial ? '今日调用' : 'Token'; + const metricTwoValue = isSocial ? fmtNum(snapshot.todayCount) : fmtNum(snapshot.tokensCount); + const badge = (meta.switcherBadges || [isSocial ? 'X Search' : '池状态'])[0]; + const footnote = snapshot.remaining !== null && snapshot.remaining !== undefined + ? `${snapshot.remainingLabel} ${fmtNum(snapshot.remaining)}` + : foot; + + return ` + + `; + }).join(''); + + root.innerHTML = html; +} + +function applyActiveService() { + if (!WORKSPACE_META[activeService]) { + activeService = 'tavily'; + } + + for (const service of Object.keys(WORKSPACE_META)) { + const panel = document.querySelector(`.service-panel[data-service="${service}"]`); + if (!panel) continue; + panel.classList.toggle('is-inactive', service !== activeService); + } + + document.querySelectorAll('.service-toggle').forEach((item) => { + const isActive = item.dataset.service === activeService; + item.classList.toggle('is-active', isActive); + item.setAttribute('aria-pressed', isActive ? 'true' : 'false'); + const status = item.querySelector('.service-toggle-status'); + if (status) { + const label = status.querySelector('span:last-child'); + if (label) { + const signal = workspaceSignal(item.dataset.service, latestServices, latestSocial); + label.textContent = signal.label; + } + } + const flag = item.querySelector('.service-toggle-flag'); + if (flag) { + flag.classList.toggle('hidden', !isActive); + } + }); + + const switcherNote = document.getElementById('switcher-note'); + if (switcherNote) { + switcherNote.textContent = `当前工作台:${WORKSPACE_META[activeService].label} · 已记住你的切换偏好`; + } +} + +function animateWorkspacePanel(service) { + const main = document.querySelector('.services-root'); + const panel = document.querySelector(`.service-panel[data-service="${service}"]`); + if (main) { + main.classList.remove('is-switching'); + void main.offsetWidth; + main.classList.add('is-switching'); + setTimeout(() => { + main.classList.remove('is-switching'); + }, 320); + } + if (panel) { + panel.classList.remove('is-activating'); + void panel.offsetWidth; + panel.classList.add('is-activating'); + setTimeout(() => { + panel.classList.remove('is-activating'); + }, 320); + } +} + +function setActiveService(service) { + if (!WORKSPACE_META[service]) return; + activeService = service; + localStorage.setItem(ACTIVE_SERVICE_KEY, service); + applyActiveService(); + animateWorkspacePanel(service); + renderHeroFocus(latestServices, latestSocial); + renderGlobalSummary(latestServices, latestSocial); + renderSettingsSummaries(); + const panel = document.querySelector(`.service-panel[data-service="${service}"]`); + if (panel) { + panel.scrollIntoView({ behavior: 'smooth', block: 'start' }); + } +} + +function doLogin(event) { + event?.preventDefault?.(); + const input = document.getElementById('pwd-input'); + const password = input.value.trim(); + if (!password) { + document.getElementById('login-err').textContent = '请输入管理密码。'; + document.getElementById('login-err').classList.remove('hidden'); + return; + } + setLoginBusy(true); + loginWithPassword(password) + .then(async () => { + PWD = ''; + clearStoredPasswords(); + showDashboard({ animate: true }); + await refresh(); + }) + .catch((error) => { + document.getElementById('login-err').textContent = error.message === 'Unauthorized' + ? '密码错误。' + : '登录失败,请检查管理 API 是否可用。'; + document.getElementById('login-err').classList.remove('hidden'); + }) + .finally(() => { + setLoginBusy(false); + }); +} + +function logout() { + PWD = ''; + clearStoredPasswords(); + showLogin(); + closeSettingsModal(); + closeDetailDrawer(); + closeAppDialog(false); + fetch(API + '/api/session/logout', { + method: 'POST', + credentials: 'same-origin', + }).catch(() => {}); +} + +function fmtNum(value) { + if (value === null || value === undefined || value === '') { + return '--'; + } + const numeric = Number(value); + return Number.isFinite(numeric) ? numeric.toLocaleString() : String(value); +} + +function escapeHtml(value) { + return String(value ?? '') + .replaceAll('&', '&') + .replaceAll('<', '<') + .replaceAll('>', '>') + .replaceAll('"', '"') + .replaceAll("'", '''); +} + +function formatTime(iso) { + if (!iso) return '未同步'; + const date = new Date(iso); + if (Number.isNaN(date.getTime())) return '未同步'; + return date.toLocaleString(); +} + +function quotaBar(used, limit) { + const safeLimit = Number(limit || 0); + const safeUsed = Number(used || 0); + if (!safeLimit) return ''; + const pct = Math.min(100, (safeUsed / safeLimit) * 100); + const cls = pct >= 90 ? 'danger' : pct >= 70 ? 'warn' : ''; + return ` +
+
+
+ `; +} + +function buildCurlExample(service, tokenValue) { + const baseUrl = location.origin; + const token = tokenValue || 'YOUR_PROXY_TOKEN'; + if (service === 'firecrawl') { + return `# Firecrawl Scrape +curl -X POST ${baseUrl}/firecrawl/v2/scrape \\ + -H "Content-Type: application/json" \\ + -H "Authorization: Bearer ${token}" \\ + -d '{"url":"https://example.com","formats":["markdown"]}' + +# Firecrawl 额度查询 +curl -X GET ${baseUrl}/firecrawl/v2/team/credit-usage \\ + -H "Authorization: Bearer ${token}" + +# 也支持 body 里传 api_key +curl -X POST ${baseUrl}/firecrawl/v2/scrape \\ + -H "Content-Type: application/json" \\ + -d '{"api_key":"${token}","url":"https://example.com"}'`; + } + + if (service === 'exa') { + return `# Exa Search +curl -X POST ${baseUrl}/exa/search \\ + -H "Content-Type: application/json" \\ + -H "Authorization: Bearer ${token}" \\ + -d '{"query":"OpenAI latest model","numResults":3,"contents":{"text":true}}' + +# 也支持 body 里传 api_key +curl -X POST ${baseUrl}/exa/search \\ + -H "Content-Type: application/json" \\ + -d '{"api_key":"${token}","query":"OpenAI latest model","numResults":3}'`; + } + + return `# Tavily Search +curl -X POST ${baseUrl}/api/search \\ + -H "Content-Type: application/json" \\ + -H "Authorization: Bearer ${token}" \\ + -d '{"query":"hello world","max_results":1}' + +# Tavily Extract +curl -X POST ${baseUrl}/api/extract \\ + -H "Content-Type: application/json" \\ + -H "Authorization: Bearer ${token}" \\ + -d '{"urls":["https://example.com"]}' + +# 也支持 body 里传 api_key +curl -X POST ${baseUrl}/api/search \\ + -H "Content-Type: application/json" \\ + -d '{"api_key":"${token}","query":"hello world"}'`; +} + +function renderGlobalSummary(services, social) { + const root = document.getElementById('global-summary'); + if (!root) return; + const list = Object.values(services || {}); + const todayCount = list.reduce((sum, item) => sum + Number(item.overview?.today_count || 0), 0); + const monthCount = list.reduce((sum, item) => sum + Number(item.overview?.month_count || 0), 0); + const activeSignal = workspaceSignal(activeService, services, social); + const activeMeta = WORKSPACE_META[activeService] || {}; + const routeCards = getQuickstartProviderCards(services, social); + const totalWorkspaces = routeCards.length; + const connectedWorkspaces = routeCards.filter((card) => card.tone !== 'danger').length; + const tavilyPayload = services?.tavily || {}; + const tavilyState = getTavilyRuntimeState(tavilyPayload); + const tavilyUsesUpstream = tavilyState.effectiveMode === 'upstream'; + const socialUsesUpstream = isSocialUpstreamManaged(social || {}); + const localProviderTokenSources = [ + ...(!tavilyUsesUpstream ? [{ label: 'Tavily', count: Number((tavilyPayload.tokens || []).length) }] : []), + { label: 'Exa', count: Number((services?.exa?.tokens || []).length) }, + { label: 'Firecrawl', count: Number((services?.firecrawl?.tokens || []).length) }, + ...(!socialUsesUpstream ? [{ label: 'Social / X', count: Number(getSocialUpstreamState(social || {}).acceptedTokenCount || 0) }] : []), + ]; + const localProviderTokenTotal = localProviderTokenSources.reduce((sum, item) => sum + Number(item.count || 0), 0); + const localProviderTokenLabels = localProviderTokenSources.map((item) => item.label); + + root.innerHTML = ` +
+
当前工作台
+
${escapeHtml(activeMeta.label || '未知')}
+
${escapeHtml(activeSignal.label)} · ${escapeHtml(activeSignal.snapshot.modeLabel)}
+
+
+
已接通工作台
+
${fmtNum(connectedWorkspaces)} / ${fmtNum(totalWorkspaces)}
+
按当前接线状态自动统计全部工作台
+
+
+
Provider 代理 Token
+
${fmtNum(localProviderTokenTotal)}
+
${localProviderTokenLabels.length ? `${escapeHtml(localProviderTokenLabels.join(' / '))} 当前走本地代理池` : '当前没有启用本地 provider 代理池'}
+
+
+
今日调用
+
${fmtNum(todayCount)}
+
来自本地 usage_logs 聚合
+
+
+
本月调用
+
${fmtNum(monthCount)}
+
来自本地 usage_logs 聚合,不含上游后台自己的历史请求总量
+
+
+
MySearch Token
+
${fmtNum(latestMySearch?.token_count || 0)}
+
给 MCP / Skill / 客户端统一接入
+
+ `; +} + +function renderSyncMeta(service, payload) { + if (service === 'exa') { + const detail = payload.usage_sync?.detail || 'Exa 实时额度暂时无法查询'; + document.getElementById(`sync-meta-${service}`).textContent = [ + `已导入 ${fmtNum(payload.keys_total || 0)} 个 Key`, + `已签发 ${fmtNum((payload.tokens || []).length)} 个 Token`, + detail, + ].join(' · '); + return; + } + + const quota = payload.real_quota || {}; + const usageSync = payload.usage_sync || {}; + const parts = []; + const tavilyUpstream = service === 'tavily' ? getTavilyUpstreamSummary(payload) : null; + + if (service === 'tavily' && payload.routing?.effective_mode) { + parts.push(`配置 ${tavilyModeLabel(payload.routing.mode || 'auto')}`); + parts.push(`当前走 ${tavilyModeLabel(payload.routing.effective_mode)}`); + parts.push(tavilyModeSourceLabel(payload.routing.mode_source || 'auto_pending')); + if (payload.routing.effective_mode === 'upstream') { + if (tavilyUpstream?.available) { + parts.push(`上游活跃 ${fmtNum(tavilyUpstream.activeKeys)} / 总 ${fmtNum(tavilyUpstream.totalKeys)}`); + parts.push(`上游剩余 ${fmtNum(tavilyUpstream.totalQuotaRemaining)}`); + } else if (tavilyUpstream?.detail) { + parts.push(tavilyUpstream.detail); + } + } + } + + parts.push(`已同步 ${fmtNum(quota.synced_keys || 0)} / ${fmtNum(quota.total_keys || 0)} 个 Key`); + if ((quota.key_level_count || 0) > 0) { + parts.push(`Key 级额度 ${fmtNum(quota.key_level_count)}`); + } + if ((quota.account_fallback_count || 0) > 0) { + parts.push(`账户正常数量:${fmtNum(quota.account_fallback_count)}`); + } + if (quota.last_synced_at) { + parts.push(`最近同步 ${formatTime(quota.last_synced_at)}`); + } + if ((quota.error_keys || 0) > 0) { + parts.push(`错误 ${fmtNum(quota.error_keys)}`); + } + if ((usageSync.synced || 0) > 0 || (usageSync.errors || 0) > 0) { + parts.push(`本轮同步 ${fmtNum(usageSync.synced || 0)} 成功 / ${fmtNum(usageSync.errors || 0)} 失败`); + } + + document.getElementById(`sync-meta-${service}`).textContent = parts.join(' · '); +} + +function renderOverview(service, payload) { + const overview = payload.overview || {}; + const quota = payload.real_quota || {}; + const tavilyUpstream = service === 'tavily' ? getTavilyUpstreamSummary(payload) : null; + + if (service === 'exa') { + const todayCount = Number(overview.today_count || 0); + const todaySuccess = Number(overview.today_success || 0); + const successRate = todayCount ? `${Math.round((todaySuccess / todayCount) * 100)}%` : '暂无'; + + document.getElementById(`overview-${service}`).innerHTML = ` +
+
实时额度
+
暂时无法查询
+
控制台当前只统计 Exa 代理调用
+
+
+
Key 池状态
+
${fmtNum(payload.keys_active || 0)} / ${fmtNum(payload.keys_total || 0)}
+
活跃 / 总数
+
+
+
Token 池状态
+
${fmtNum((payload.tokens || []).length)}
+
Exa 独立代理 Token 池
+
+
+
今日代理调用
+
${fmtNum(overview.today_count || 0)}
+
成功 ${fmtNum(overview.today_success || 0)} / 失败 ${fmtNum(overview.today_failed || 0)}
+
+
+
本月代理调用
+
${fmtNum(overview.month_count || 0)}
+
本月成功 ${fmtNum(overview.month_success || 0)}
+
+
+
今日成功率
+
${successRate}
+
${payload.usage_sync?.detail || 'Exa 实时额度暂时无法查询,后续如果接入官方读取会补充显示。'}
+
+ `; + return; + } + + if (service === 'tavily' && payload.routing?.effective_mode === 'upstream') { + const upstreamAvailable = Boolean(tavilyUpstream?.available); + const upstreamRemainStyle = upstreamAvailable && tavilyUpstream.totalQuotaLimit > 0 + ? (tavilyUpstream.totalQuotaRemaining / tavilyUpstream.totalQuotaLimit <= 0.1 + ? 'color: var(--danger)' + : tavilyUpstream.totalQuotaRemaining / tavilyUpstream.totalQuotaLimit <= 0.3 + ? 'color: var(--warn)' + : 'color: var(--ok)') + : ''; + document.getElementById(`overview-${service}`).innerHTML = ` +
+
上游 Key 状态
+
${upstreamAvailable ? `${fmtNum(tavilyUpstream.activeKeys)} / ${fmtNum(tavilyUpstream.totalKeys)}` : '未读取到'}
+
${upstreamAvailable ? `耗尽 ${fmtNum(tavilyUpstream.exhaustedKeys)} · 隔离 ${fmtNum(tavilyUpstream.quarantinedKeys)}` : escapeHtml(tavilyUpstream?.detail || '当前上游没有提供公开摘要接口。')}
+
+
+
上游剩余额度
+
${upstreamAvailable ? fmtNum(tavilyUpstream.totalQuotaRemaining) : '未读取到'}
+
${upstreamAvailable ? `上限 ${fmtNum(tavilyUpstream.totalQuotaLimit)} · 来自 Hikari 公共摘要` : '当前仍可继续使用本地池作为回退库存。'}
+
+
+
上游累计请求
+
${upstreamAvailable ? fmtNum(tavilyUpstream.totalRequests) : '未读取到'}
+
${upstreamAvailable ? `成功 ${fmtNum(tavilyUpstream.successCount)} · 错误 ${fmtNum(tavilyUpstream.errorCount)} · 配额耗尽 ${fmtNum(tavilyUpstream.quotaExhaustedCount)}` : '当前只确认了 Gateway 可转发,未拿到上游请求统计。'}
+
+
+
本地回退 Key
+
${fmtNum(payload.keys_active || 0)} / ${fmtNum(payload.keys_total || 0)}
+
这些 Key 在 Tavily upstream 模式下不参与转发,只作为回退库存保留。
+
+
+
今日代理调用
+
${fmtNum(overview.today_count || 0)}
+
成功 ${fmtNum(overview.today_success || 0)} / 失败 ${fmtNum(overview.today_failed || 0)}
+
+
+
工作模式
+
${escapeHtml(tavilyModeLabel(payload.routing?.effective_mode || 'upstream'))}
+
${escapeHtml(tavilyModeSourceLabel(payload.routing?.mode_source || 'auto_pending'))}
+
+ `; + return; + } + + const totalLimit = Number(quota.total_limit || 0); + const totalUsed = Number(quota.total_used || 0); + const totalRemaining = Number(quota.total_remaining || 0); + const remainStyle = totalLimit && totalUsed / totalLimit >= 0.9 + ? 'color: var(--danger)' + : totalLimit && totalUsed / totalLimit >= 0.7 + ? 'color: var(--warn)' + : 'color: var(--ok)'; + + document.getElementById(`overview-${service}`).innerHTML = ` +
+
真实总额度
+
${fmtNum(totalLimit)}
+
${SERVICE_META[service].quotaSource}
+
+
+
真实已用
+
${fmtNum(totalUsed)}
+
按已同步 Key 汇总
+
+
+
真实剩余
+
${fmtNum(totalRemaining)}
+
${quotaBar(totalUsed, totalLimit) || '尚未获得完整上限信息'}
+
+
+
Key 池状态
+
${fmtNum(payload.keys_active || 0)} / ${fmtNum(payload.keys_total || 0)}
+
活跃 / 总数
+
+
+
今日代理调用
+
${fmtNum(overview.today_count || 0)}
+
成功 ${fmtNum(overview.today_success || 0)} / 失败 ${fmtNum(overview.today_failed || 0)}
+
+
+
本月代理调用
+
${fmtNum(overview.month_count || 0)}
+
成功 ${fmtNum(overview.month_success || 0)}
+
+ `; +} + +function renderSocialWorkspace(social) { + const stats = social?.stats || {}; + const socialState = getSocialUpstreamState(social || {}); + const mode = socialModeLabel(social?.mode || 'manual'); + const source = socialTokenSourceLabel(social?.token_source || ''); + const syncLine = socialState.level === 'full' + ? [ + mode, + `Token ${fmtNum(stats.token_total || 0)}`, + `Chat ${fmtNum(stats.chat_remaining || 0)}`, + `总调用 ${fmtNum(stats.total_calls || 0)}`, + ].join(' · ') + : [ + mode, + `上游 key ${fmtNum(socialState.upstreamApiKeyCount)}`, + `客户端 token ${fmtNum(socialState.acceptedTokenCount)}`, + socialState.canProxySearch ? '已可转发搜索' : '待补鉴权', + ].join(' · '); + + const syncMeta = document.getElementById('sync-meta-social'); + if (syncMeta) { + syncMeta.textContent = social?.error ? `${syncLine} · 最近错误 ${social.error}` : syncLine; + } + + if (socialState.level !== 'full') { + document.getElementById('overview-social').innerHTML = ` +
+
工作模式
+
${escapeHtml(mode)}
+
当前 Social / X 工作台的路由状态
+
+
+
上游 Key 数
+
${fmtNum(socialState.upstreamApiKeyCount)}
+
当前已解析到的上游 API key 数量
+
+
+
客户端 Token 数
+
${fmtNum(socialState.acceptedTokenCount)}
+
可被 /social/search 接受的客户端 token 数量
+
+
+
Token 来源
+
${escapeHtml(source)}
+
${escapeHtml(socialState.detail || '当前只有基础接线可视化,完整 token 统计需要后台 tokens 面板。')}
+
+ `; + return; + } + + document.getElementById('overview-social').innerHTML = ` +
+
工作模式
+
${escapeHtml(mode)}
+
当前 Social / X 工作台的路由状态
+
+
+
Token 正常 / 总数
+
${fmtNum(stats.token_normal || 0)} / ${fmtNum(stats.token_total || 0)}
+
兼容上游 token 池实时汇总
+
+
+
Chat 剩余
+
${fmtNum(stats.chat_remaining || 0)}
+
Image ${fmtNum(stats.image_remaining || 0)} · Video ${stats.video_remaining === null ? '无法统计' : fmtNum(stats.video_remaining)}
+
+
+
Token 来源
+
${escapeHtml(source)}
+
${social?.admin_connected ? '当前已接入后台自动继承' : '当前为手动或混合模式'}
+
+ `; +} + +function renderApiExample(service, tokens) { + const firstToken = tokens && tokens.length ? tokens[0].token : 'YOUR_PROXY_TOKEN'; + document.getElementById(`base-url-${service}`).textContent = location.origin; + document.getElementById(`curl-example-${service}`).textContent = buildCurlExample(service, firstToken); +} + +function renderTokenQuota(token) { + return '
无限制
已关闭小时 / 日 / 月限流
'; +} + +function renderGlanceCard(label, value, hint = '') { + return ` +
+
${escapeHtml(label)}
+
${escapeHtml(value)}
+
${escapeHtml(hint)}
+
+ `; +} + +function renderTableLegend(kind = 'token') { + const items = kind === 'key' + ? [ + ['danger', '同步异常'], + ['warn', '额度偏低 / 失败偏多'], + ['busy', '调用偏高'], + ['off', '已停用'], + ] + : [ + ['danger', '失败偏多'], + ['warn', '近期有失败'], + ['busy', '调用偏高'], + ]; + return ` +
+ ${items.map(([tone, label]) => ` + + + ${escapeHtml(label)} + + `).join('')} +
+ `; +} + +function renderDrawerActionGroup(title, body, tone = 'neutral') { + return ` +
+
${escapeHtml(title)}
+
${body}
+
+ `; +} + +function renderPoolGlance(service, payload = {}) { + const tokenRoot = document.getElementById(`token-glance-${service}`); + const tokens = payload?.tokens || []; + if (tokenRoot) { + const tokenStats = tokens.reduce((acc, token) => { + const stats = token.stats || {}; + acc.today += Number(stats.today_success || 0) + Number(stats.today_failed || 0); + acc.month += Number(stats.month_success || 0) + Number(stats.month_failed || 0); + acc.hour += Number(stats.hour_count || 0); + return acc; + }, { today: 0, month: 0, hour: 0 }); + tokenRoot.innerHTML = [ + renderGlanceCard('Token 总数', fmtNum(tokens.length), `${getServiceDisplayLabel(service)} 当前可发放的访问凭证`), + renderGlanceCard('今日调用', fmtNum(tokenStats.today), `小时 ${fmtNum(tokenStats.hour)} · 本月 ${fmtNum(tokenStats.month)}`), + renderGlanceCard('默认策略', '无限制', '当前保持开放限流策略'), + ].join(''); + } + + const keyRoot = document.getElementById(`key-glance-${service}`); + if (keyRoot) { + const keys = payload?.keys || []; + const activeKeys = keys.filter((key) => Number(key.active) === 1).length; + const syncedKeys = keys.filter((key) => Boolean(key.usage_synced_at)).length; + const erroredKeys = keys.filter((key) => Boolean(key.usage_sync_error)).length; + keyRoot.innerHTML = [ + renderGlanceCard('活跃 Key', fmtNum(activeKeys), `总数 ${fmtNum(keys.length)}`), + renderGlanceCard('已同步', fmtNum(syncedKeys), '已有官方或账户级额度信息'), + renderGlanceCard('同步异常', fmtNum(erroredKeys), erroredKeys ? '建议点击行检查失败原因' : '当前没有同步异常'), + ].join(''); + } +} + +function renderTokenSummary(token) { + const stats = token.stats || {}; + return ` +
今日 ${fmtNum(Number(stats.today_success || 0) + Number(stats.today_failed || 0))}
+
本月 ${fmtNum(Number(stats.month_success || 0) + Number(stats.month_failed || 0))}
+
小时 ${fmtNum(stats.hour_count || 0)}
+ `; +} + +function renderKeyStatusSummary(service, key) { + const active = Number(key.active) === 1; + const remain = key.usage_key_remaining ?? key.usage_account_remaining; + const remainLabel = remain === null || remain === undefined ? '剩余待同步' : `剩余 ${fmtNum(remain)}`; + return ` +
${active ? '正常' : '禁用'}
+
${remainLabel}
+
${key.usage_synced_at ? `同步 ${formatTime(key.usage_synced_at)}` : (service === 'exa' ? '实时额度暂不可查' : '尚未同步')}
+ `; +} + +function renderKeyUsageSummary(key) { + return ` +
成功 ${fmtNum(key.total_used || 0)}
+
失败 ${fmtNum(key.total_failed || 0)}
+
最近 ${formatTime(key.last_used_at)}
+ `; +} + +function syncTokenToolbar(service) { + const state = getTokenTableState(service); + const search = document.getElementById(`token-search-${service}`); + if (search && search.value !== state.search) { + search.value = state.search; + } + document.querySelectorAll(`[data-token-sort][data-service="${service}"]`).forEach((button) => { + const active = button.dataset.tokenSort === state.sort; + button.classList.toggle('is-active', active); + button.setAttribute('aria-pressed', active ? 'true' : 'false'); + button.setAttribute('tabindex', active ? '0' : '-1'); + }); +} + +function syncKeyToolbar(service) { + const state = getKeyTableState(service); + const search = document.getElementById(`key-search-${service}`); + if (search && search.value !== state.search) { + search.value = state.search; + } + document.querySelectorAll(`[data-key-filter][data-service="${service}"]`).forEach((button) => { + const active = button.dataset.keyFilter === state.filter; + button.classList.toggle('is-active', active); + button.setAttribute('aria-pressed', active ? 'true' : 'false'); + button.setAttribute('tabindex', active ? '0' : '-1'); + }); + document.querySelectorAll(`[data-key-sort][data-service="${service}"]`).forEach((button) => { + const active = button.dataset.keySort === state.sort; + button.classList.toggle('is-active', active); + button.setAttribute('aria-pressed', active ? 'true' : 'false'); + button.setAttribute('tabindex', active ? '0' : '-1'); + }); +} + +function setTokenSearch(service, value) { + getTokenTableState(service).search = value || ''; + renderTokens(service, getServicePayload(service).tokens || []); +} + +function setTokenSort(service, value) { + getTokenTableState(service).sort = value || 'risk'; + renderTokens(service, getServicePayload(service).tokens || []); +} + +function setKeySearch(service, value) { + getKeyTableState(service).search = value || ''; + renderKeys(service, getServicePayload(service).keys || []); +} + +function setKeyFilter(service, value) { + getKeyTableState(service).filter = value || 'all'; + renderKeys(service, getServicePayload(service).keys || []); +} + +function setKeySort(service, value) { + getKeyTableState(service).sort = value || 'risk'; + renderKeys(service, getServicePayload(service).keys || []); +} + +function renderTokens(service, tokens) { + const tbody = document.getElementById(`tokens-body-${service}`); + syncTokenToolbar(service); + if (!tokens || tokens.length === 0) { + tbody.innerHTML = '当前还没有 Token,先创建一个给下游使用。'; + return; + } + + const filtered = getFilteredTokens(service, tokens); + if (!filtered.length) { + tbody.innerHTML = '没有符合当前筛选条件的 Token。'; + return; + } + + tbody.innerHTML = filtered.map((token) => { + const rowClass = getTokenRowClass(token); + return ` + + + ${maskToken(token.token)} +
点击查看详情
+ + ${escapeHtml(token.name || '-')} + ${renderTokenSummary(token)} + +
+ + +
+ + + `; + }).join(''); +} + +function renderKeyQuota(service, key) { + if (service === 'exa') { + return ` +
Exa 实时额度暂时无法查询。
+
当前只展示代理层调用统计。
+ ${key.usage_sync_error ? `
最近错误: ${escapeHtml(key.usage_sync_error)}
` : ''} + `; + } + + if (key.usage_key_limit !== null && key.usage_key_used !== null) { + const remain = key.usage_key_remaining ?? Math.max(0, key.usage_key_limit - key.usage_key_used); + return ` +
已用 ${fmtNum(key.usage_key_used)} / ${fmtNum(key.usage_key_limit)}
+
剩余 ${fmtNum(remain)}
+ ${quotaBar(key.usage_key_used, key.usage_key_limit)} +
同步 ${formatTime(key.usage_synced_at)}
+ ${key.usage_sync_error ? `
最近错误: ${escapeHtml(key.usage_sync_error)}
` : ''} + `; + } + + if (service === 'firecrawl' && key.usage_synced_at) { + return ` +
Firecrawl 当前主要返回账户级 credits。
+
单 Key 独立限额请看右侧账户额度。
+ ${key.usage_sync_error ? `
最近错误: ${escapeHtml(key.usage_sync_error)}
` : ''} + `; + } + + if (key.usage_sync_error) { + return `
同步失败:${escapeHtml(key.usage_sync_error)}
`; + } + + return '未同步'; +} + +function renderAccountQuota(service, key) { + if (service === 'exa') { + return 'Exa 账户实时额度暂时无法查询'; + } + + if (key.usage_account_limit !== null && key.usage_account_used !== null) { + const remain = key.usage_account_remaining ?? Math.max(0, key.usage_account_limit - key.usage_account_used); + const plan = key.usage_account_plan || (service === 'firecrawl' ? 'Firecrawl Credits' : '未知计划'); + return ` +
${escapeHtml(plan)}
+
已用 ${fmtNum(key.usage_account_used)} / ${fmtNum(key.usage_account_limit)}
+
剩余 ${fmtNum(remain)}
+ ${quotaBar(key.usage_account_used, key.usage_account_limit)} + `; + } + return '未返回'; +} + +function renderKeys(service, keys) { + const tbody = document.getElementById(`keys-body-${service}`); + syncKeyToolbar(service); + if (!keys || keys.length === 0) { + tbody.innerHTML = '当前服务还没有导入 Key。'; + return; + } + + const filtered = getFilteredKeys(service, keys); + if (!filtered.length) { + tbody.innerHTML = '没有符合当前筛选条件的 Key。'; + return; + } + + tbody.innerHTML = filtered.map((key) => { + const active = Number(key.active) === 1; + const rowClass = getKeyRowClass(service, key); + return ` + + ${fmtNum(key.id)} + + ${escapeHtml(key.key_masked || key.key)} +
点击查看详情
+ + ${escapeHtml(key.email || '-')} + ${renderKeyStatusSummary(service, key)} + ${renderKeyUsageSummary(key)} + ${active ? '正常' : '禁用'} + +
+ + +
+ + + `; + }).join(''); +} + +function openTokenDetail(service, tokenId) { + const payload = getServicePayload(service); + const token = (payload?.tokens || []).find((item) => Number(item.id) === Number(tokenId)); + if (!token) { + showToast('没有找到这个 token 的最新数据。', 'warn'); + return; + } + const stats = token.stats || {}; + const label = getServiceDisplayLabel(service); + openDetailDrawer({ + kicker: `${label} Token`, + title: token.name || `${label} Token #${token.id}`, + subtitle: `ID ${fmtNum(token.id)} · 给客户端分发的统一访问凭证`, + tone: service === 'mysearch' ? 'info' : 'ok', + summaryHtml: [ + drawerMetric('今日成功', fmtNum(stats.today_success || 0), `失败 ${fmtNum(stats.today_failed || 0)}`), + drawerMetric('本月成功', fmtNum(stats.month_success || 0), `失败 ${fmtNum(stats.month_failed || 0)}`), + drawerMetric('小时调用', fmtNum(stats.hour_count || 0), '当前 token 的近一小时请求量'), + ].join(''), + bodyHtml: [ + drawerSection('完整 Token', `
${escapeHtml(token.token)}
`), + drawerSection('配额策略', renderTokenQuota(token)), + drawerSection('代理统计', ` +
+
今日总调用${fmtNum(Number(stats.today_success || 0) + Number(stats.today_failed || 0))}
+
本月总调用${fmtNum(Number(stats.month_success || 0) + Number(stats.month_failed || 0))}
+
+ `), + ].join(''), + actionsHtml: [ + renderDrawerActionGroup('维护动作', ` + + `), + renderDrawerActionGroup('危险动作', ` + + `, 'danger'), + ].join(''), + }); +} + +function openKeyDetail(service, keyId) { + const payload = getServicePayload(service); + const key = (payload?.keys || []).find((item) => Number(item.id) === Number(keyId)); + if (!key) { + showToast('没有找到这个 Key 的最新数据。', 'warn'); + return; + } + const active = Number(key.active) === 1; + const label = getServiceDisplayLabel(service); + openDetailDrawer({ + kicker: `${label} Key`, + title: key.email || `${label} Key #${key.id}`, + subtitle: `${escapeHtml(key.key_masked || key.key)} · ${active ? '当前正常' : '当前禁用'}`, + tone: active ? 'ok' : 'danger', + summaryHtml: [ + drawerMetric('Key 状态', active ? '正常' : '禁用', `ID ${fmtNum(key.id)}`), + drawerMetric('成功调用', fmtNum(key.total_used || 0), `失败 ${fmtNum(key.total_failed || 0)}`), + drawerMetric('最近使用', formatTime(key.last_used_at), key.usage_sync_error ? '存在同步异常' : '统计正常'), + ].join(''), + bodyHtml: [ + drawerSection('Key 配额', renderKeyQuota(service, key)), + drawerSection('账户额度', renderAccountQuota(service, key)), + drawerSection('代理统计', ` +
+
成功${fmtNum(key.total_used || 0)}
+
失败${fmtNum(key.total_failed || 0)}
+
最近使用${escapeHtml(formatTime(key.last_used_at))}
+
+ ${key.usage_sync_error ? `
同步异常:${escapeHtml(key.usage_sync_error)}
` : ''} + `), + ].join(''), + actionsHtml: [ + renderDrawerActionGroup('维护动作', ` + + `), + renderDrawerActionGroup('危险动作', ` + + `, 'danger'), + ].join(''), + }); +} + +function renderProviderWorkspace(service, servicePayload) { + const payload = servicePayload || getBlankServicePayload(); + const meta = SERVICE_META[service]; + renderSyncMeta(service, payload); + renderOverview(service, payload); + renderApiExample(service, payload.tokens || []); + renderTokens(service, payload.tokens || []); + renderKeys(service, payload.keys || []); + renderPoolGlance(service, payload); + const syncButton = document.getElementById(`sync-btn-${service}`); + if (syncButton) { + const syncSupported = payload.usage_sync?.supported !== false && meta.syncSupported !== false; + syncButton.textContent = syncSupported ? meta.syncButton : '暂不支持同步'; + syncButton.disabled = !syncSupported; + syncButton.title = syncSupported ? '' : (payload.usage_sync?.detail || meta.quotaSource); + } +} + +function renderDashboardScope(scope) { + const nextScope = normalizeRefreshScope(scope); + if (nextScope.core) { + if (PAGE_KIND === 'console') { + renderGlobalSummary(latestServices, latestSocial); + renderHeroFocus(latestServices, latestSocial); + renderServiceSwitcher(latestServices, latestSocial); + } + renderSettingsSummaries(); + } + if (nextScope.mysearch) { + renderMySearchQuickstart(latestMySearch, latestSocial); + } + if (PAGE_KIND === 'console' && nextScope.social) { + renderSocialBoard(latestSocial); + renderSocialIntegration(latestSocial); + renderSocialWorkspace(latestSocial); + } + if (PAGE_KIND === 'console') { + nextScope.services.forEach((service) => { + renderProviderWorkspace(service, latestServices[service] || getBlankServicePayload()); + }); + applyActiveService(); + } +} + +async function refresh(options = {}) { + const force = options.force ? '?force=1' : ''; + const payload = await api('GET', `/api/stats${force}`); + const services = payload.services || {}; + const social = payload.social || {}; + const mysearch = payload.mysearch || {}; + latestStatsMeta = payload.meta || {}; + latestServices = services; + latestSocial = social; + latestMySearch = mysearch; + renderDashboardScope(options.scope); +} + +function toggleImport(service) { + document.getElementById(`import-wrap-${service}`).classList.toggle('hidden'); +} + +async function createToken(service, button) { + const input = document.getElementById(`token-name-${service}`); + const tokenName = input.value.trim(); + await runWithBusyButton(button, { + busyLabel: '创建中...', + successLabel: '已创建', + errorLabel: '创建失败', + minBusyMs: service === 'mysearch' ? 560 : BUTTON_MIN_BUSY_MS, + }, async () => { + await api('POST', '/api/tokens', { + service, + name: tokenName, + }); + input.value = ''; + }); + await sleep(service === 'mysearch' ? 180 : 80); + await refresh({ force: true, scope: getRefreshScopeForService(service) }); +} + +async function delToken(service, id) { + const confirmed = await showConfirmDialog({ + title: '删除 Token', + message: '删除后这个 token 会立即失效,下游客户端会立刻无法继续调用。', + confirmText: '确认删除', + cancelText: '取消', + tone: 'danger', + kicker: 'Danger Zone', + }); + if (!confirmed) return; + await api('DELETE', `/api/tokens/${id}`); + await refresh({ force: true, scope: getRefreshScopeForService(service) }); +} + +async function addSingleKey(service, button) { + const input = document.getElementById(`single-key-${service}`); + const key = input.value.trim(); + if (!key) return; + await runWithBusyButton(button, { + busyLabel: '添加中...', + successLabel: '已添加', + errorLabel: '添加失败', + }, async () => { + await api('POST', '/api/keys', { service, key }); + input.value = ''; + }); + await refresh({ force: true, scope: getRefreshScopeForService(service) }); +} + +async function importKeys(service, button) { + const textarea = document.getElementById(`import-text-${service}`); + const text = textarea.value.trim(); + if (!text) return; + await runWithBusyButton(button, { + busyLabel: '导入中...', + successLabel: '已导入', + errorLabel: '导入失败', + }, async () => { + const result = await api('POST', '/api/keys', { service, file: text }); + textarea.value = ''; + document.getElementById(`import-wrap-${service}`).classList.add('hidden'); + showToast(`已导入 ${result.imported || 0} 个 ${SERVICE_META[service].label} Key`, 'success'); + }); + await refresh({ force: true, scope: getRefreshScopeForService(service) }); +} + +async function delKey(service, id) { + const confirmed = await showConfirmDialog({ + title: '删除 API Key', + message: '删除后这个上游 Key 会从当前服务池中移除,额度同步和代理调用都会停止使用它。', + confirmText: '确认删除', + cancelText: '取消', + tone: 'danger', + kicker: 'Danger Zone', + }); + if (!confirmed) return; + await api('DELETE', `/api/keys/${id}`); + await refresh({ force: true, scope: getRefreshScopeForService(service) }); +} + +async function toggleKey(service, id, active) { + await api('PUT', `/api/keys/${id}/toggle`, { active }); + await refresh({ force: true, scope: getRefreshScopeForService(service) }); +} + +async function syncUsage(service, force, button) { + if (SERVICE_META[service]?.syncSupported === false) { + showToast(SERVICE_META[service].quotaSource, 'warn'); + return; + } + const actionButton = button || document.getElementById(`sync-btn-${service}`); + try { + await runWithBusyButton(actionButton, { + busyLabel: '同步中...', + successLabel: '已同步', + errorLabel: '同步失败', + }, async () => { + await api('POST', '/api/usage/sync', { service, force }); + }); + await refresh({ force: true, scope: getRefreshScopeForService(service) }); + } catch (error) { + showToast(`同步 ${SERVICE_META[service].label} 额度失败: ${error.message}`, 'error'); + } +} + +async function changePwd(event) { + event?.preventDefault?.(); + const button = event?.submitter; + const input = document.getElementById('settings-new-pwd'); + const password = input.value.trim(); + if (password.length < 4) { + setStatus('settings-password-status', '密码至少 4 位。', true); + return; + } + try { + await runWithBusyButton(button, { + busyLabel: '保存中...', + successLabel: '已保存', + errorLabel: '保存失败', + }, async () => { + await api('PUT', '/api/password', { password }); + PWD = password; + localStorage.setItem(STORAGE_KEY, password); + localStorage.removeItem(LEGACY_STORAGE_KEY); + input.value = ''; + setStatus('settings-password-status', '密码已更新,当前会话也已同步。'); + }); + } catch (error) { + setStatus('settings-password-status', `保存密码失败:${error.message}`, true); + } +} + +async function saveSocialSettings(event) { + event?.preventDefault?.(); + const button = event?.submitter; + const body = collectSocialSettingsForm(); + clearSettingsProbe('social'); + + try { + await runWithBusyButton(button, { + busyLabel: '保存中...', + successLabel: '已保存', + errorLabel: '保存失败', + }, async () => { + const payload = await api('PUT', '/api/settings/social', body); + latestSettings = payload || {}; + fillSettingsForm(latestSettings); + setStatus('settings-social-status', 'Social / X 设置已保存,当前控制台状态已刷新。'); + }); + await refresh({ force: true, scope: getRefreshScopeForService('social') }); + } catch (error) { + setStatus('settings-social-status', `保存 Social / X 设置失败:${error.message}`, true); + } +} + +function flashButtonLabel(button, label) { + flashButtonState(button, label); +} + +function sleep(ms) { + return new Promise((resolve) => setTimeout(resolve, Math.max(0, ms || 0))); +} + +function ensureButtonLabel(button) { + if (!button) return ''; + if (!button.dataset.originalLabel) { + button.dataset.originalLabel = button.textContent.trim(); + } + return button.dataset.originalLabel; +} + +function resetButtonState(button, label = '') { + if (!button) return; + const original = ensureButtonLabel(button); + button.disabled = false; + button.removeAttribute('aria-busy'); + button.classList.remove('is-busy', 'is-success', 'is-error'); + button.textContent = label || original; +} + +function flashButtonState(button, label, state = 'success', duration = 1400) { + if (!button) return; + const original = ensureButtonLabel(button); + button.disabled = true; + button.classList.remove('is-busy', 'is-success', 'is-error'); + button.classList.add(`is-${state}`); + button.textContent = label; + setTimeout(() => { + resetButtonState(button, original); + }, duration); +} + +async function runWithBusyButton(button, { + busyLabel = '处理中...', + successLabel = '已完成', + errorLabel = '失败', + minBusyMs = BUTTON_MIN_BUSY_MS, +} = {}, task) { + if (!button) { + return task(); + } + const original = ensureButtonLabel(button); + const startedAt = Date.now(); + button.disabled = true; + button.setAttribute('aria-busy', 'true'); + button.classList.remove('is-success', 'is-error'); + button.classList.add('is-busy'); + button.textContent = busyLabel; + try { + const result = await task(); + const remaining = minBusyMs - (Date.now() - startedAt); + if (remaining > 0) { + await sleep(remaining); + } + if (button.isConnected) { + flashButtonState(button, successLabel, 'success'); + } + return result; + } catch (error) { + const remaining = minBusyMs - (Date.now() - startedAt); + if (remaining > 0) { + await sleep(remaining); + } + if (button.isConnected) { + flashButtonState(button, errorLabel, 'error'); + } + throw error; + } finally { + button.removeAttribute('aria-busy'); + if (button.classList.contains('is-busy')) { + resetButtonState(button, original); + } + } +} + +async function writeClipboardText(text) { + const value = String(text ?? ''); + + if (navigator.clipboard?.writeText && window.isSecureContext) { + try { + await navigator.clipboard.writeText(value); + return true; + } catch (error) { + console.warn('Clipboard API failed, falling back to execCommand copy.', error); + } + } + + const textarea = document.createElement('textarea'); + textarea.value = value; + textarea.setAttribute('readonly', ''); + textarea.style.position = 'fixed'; + textarea.style.top = '0'; + textarea.style.left = '-9999px'; + textarea.style.opacity = '0'; + textarea.style.pointerEvents = 'none'; + + document.body.appendChild(textarea); + + const selection = document.getSelection(); + const ranges = []; + if (selection) { + for (let index = 0; index < selection.rangeCount; index += 1) { + ranges.push(selection.getRangeAt(index).cloneRange()); + } + } + + textarea.focus({ preventScroll: true }); + textarea.select(); + textarea.setSelectionRange(0, textarea.value.length); + + let copied = false; + try { + copied = document.execCommand('copy'); + } finally { + textarea.remove(); + if (selection) { + selection.removeAllRanges(); + ranges.forEach((range) => selection.addRange(range)); + } + } + + if (!copied) { + throw new Error('Clipboard copy command was rejected'); + } + + return true; +} + +async function copyCode(elementId, button) { + const source = document.getElementById(elementId); + if (!source) { + flashButtonState(button, '未找到', 'error'); + return; + } + + try { + await writeClipboardText(source.textContent); + flashButtonState(button, '已复制', 'success'); + } catch (error) { + console.error(`Copy failed for #${elementId}`, error); + flashButtonState(button, '复制失败', 'error'); + } +} + +async function copyEnvAndRevealInstall(button) { + const envBlock = document.getElementById('mysearch-proxy-env'); + const installBlock = document.getElementById('mysearch-install-cmd'); + if (!envBlock) { + flashButtonState(button, '未找到 .env', 'error'); + return; + } + try { + await writeClipboardText(envBlock.textContent); + if (installBlock) { + installBlock.scrollIntoView({ behavior: 'smooth', block: 'center' }); + } + flashButtonState(button, '已复制并定位', 'success'); + } catch (error) { + console.error('Copy-and-scroll failed for MySearch quickstart', error); + flashButtonState(button, '操作失败', 'error'); + } +} + +async function copyText(value, button) { + try { + await writeClipboardText(value); + flashButtonState(button, '已复制', 'success'); + } catch (error) { + console.error('Copy failed for inline value', error); + flashButtonState(button, '复制失败', 'error'); + } +} + +document.addEventListener('keydown', (event) => { + if (handleSegmentedControlKey(event)) { + return; + } + if (trapOverlayFocus(event)) { + return; + } + if (event.key !== 'Escape') return; + if (isShellVisible('app-dialog')) { + closeAppDialog(false); + return; + } + if (isShellVisible('detail-drawer')) { + closeDetailDrawer(); + return; + } + if (isShellVisible('settings-modal')) { + closeSettingsModal(); + } +}); + +window.addEventListener('focus', () => { + refreshAutoThemeFromClock(); +}); + +document.addEventListener('visibilitychange', () => { + if (!document.hidden) { + refreshAutoThemeFromClock(); + } +}); + +applyTheme(activeTheme); +renderServiceShells(); + +async function initConsole() { + if (INITIAL_AUTHENTICATED) { + showDashboard(); + try { + await refresh(); + } catch (error) { + if (error.message === 'Unauthorized') { + showLogin(); + return; + } + document.getElementById('login-err').textContent = `控制台加载失败:${error.message}`; + document.getElementById('login-err').classList.remove('hidden'); + } + return; + } + + const migrated = await migrateStoredPasswordIfNeeded(); + const hasSession = migrated || await hasServerSession(); + if (!hasSession) { + showLogin(); + return; + } + showDashboard(); + try { + await refresh(); + } catch (error) { + if (error.message === 'Unauthorized') { + showLogin(); + return; + } + document.getElementById('login-err').textContent = `控制台加载失败:${error.message}`; + document.getElementById('login-err').classList.remove('hidden'); + } +} + +initConsole(); + +function setActiveSettingsTab(tabName) { + document.querySelectorAll('.settings-tab').forEach(btn => { + btn.classList.toggle('is-active', btn.dataset.settingsTab === tabName); + btn.setAttribute('aria-selected', btn.dataset.settingsTab === tabName ? 'true' : 'false'); + btn.setAttribute('tabindex', btn.dataset.settingsTab === tabName ? '0' : '-1'); + }); + document.querySelectorAll('.settings-tab-panel').forEach(panel => { + panel.classList.toggle('hidden', panel.dataset.settingsPanel !== tabName); + panel.classList.toggle('is-active', panel.dataset.settingsPanel === tabName); + }); +} + + +function maskToken(token) { + if (!token) return ''; + if (token.length <= 12) return '****'; + return token.slice(0, 5) + '****' + token.slice(-4); +} + + +async function saveTavilySettings(event) { + event?.preventDefault?.(); + const button = event?.submitter; + const body = collectTavilySettingsForm(); + clearSettingsProbe('tavily'); + + try { + await runWithBusyButton(button, { + busyLabel: '保存中...', + successLabel: '已保存', + errorLabel: '保存失败', + }, async () => { + const payload = await api('PUT', '/api/settings/tavily', body); + latestSettings = payload || {}; + fillSettingsForm(latestSettings); + setStatus('settings-tavily-status', 'Tavily 设置已保存。'); + }); + await refresh({ force: true, scope: getRefreshScopeForService('tavily') }); + } catch (error) { + setStatus('settings-tavily-status', `保存失败:${error.message}`, true); + } +} diff --git a/proxy/templates/components/_hero.html b/proxy/templates/components/_hero.html new file mode 100644 index 0000000..29c40d9 --- /dev/null +++ b/proxy/templates/components/_hero.html @@ -0,0 +1,100 @@ +
+
+
+
+ +
+
+
+ Search Operations Desk +
+ MySearch Proxy +
+
+ + +
+
+ +
+
+
+ + + 统一搜索入口 + + + + 官方 / Gateway 双接入 + + + + MCP + Proxy + Skill + +
+ +
+
Infrastructure-grade console
+

+ 把搜索控制台做成 + 真正可交付的基础设施 +

+
+

MySearch 把 Tavily、Exa、Firecrawl 和 Social / X 统一成同一块运维工作台。你在这里管理的不是零散 provider,而是一套可直接交给 Codex、Claude Code 和团队 Agent 使用的搜索控制面。

+ +
+ + +
+ +
+
+ 统一运维 + 把 Key 池、Token、额度同步和团队共享入口放进同一套控制面,不再靠 README 记忆操作。 +
+
+ 双模式接入 + 官方 API、上游 Gateway 和 compatible social router 都可以在同一个界面里切换和核对。 +
+
+ 摘要优先 + 先看当前工作台状态和剩余额度,再下钻到 Token、Key、设置和调用示例,避免首屏变成长表格。 +
+
+ 直接交付 + 控制台生成的配置信息可以直接喂给 MySearch MCP、OpenClaw 或其他客户端,不需要手动拼接变量。 +
+
+
+
+ +
+
+ Discovery + Tavily 做网页发现 +

适合新闻、快速 answer 和基础搜索入口,也支持切到上游 Tavily Gateway。

+
+
+ Expansion + Exa 做补充搜索 +

当你需要额外的网页搜索入口时,这里用独立 Key 池、Token 池和代理端点承接。

+
+
+ Extraction + Firecrawl 做正文抓取 +

正文页、文档站、PDF 和结构化抽取放在这里统一管理 credits 与抓取调用。

+
+
+ Signal + Social / X 做舆情路由 +

兼容 grok2api 和 xAI-compatible 搜索,对外统一成同一条 `/social/search` 链路。

+
+
+
+
diff --git a/proxy/templates/components/_settings_modal.html b/proxy/templates/components/_settings_modal.html new file mode 100644 index 0000000..3ef605c --- /dev/null +++ b/proxy/templates/components/_settings_modal.html @@ -0,0 +1,225 @@ + diff --git a/proxy/templates/console.html b/proxy/templates/console.html index 83ae295..669edcc 100644 --- a/proxy/templates/console.html +++ b/proxy/templates/console.html @@ -5,4094 +5,131 @@ MySearch Proxy Console - - - - -
-
MySearch Proxy
-

Search Infrastructure Console

-

把 Tavily、Exa、Firecrawl 和 Social / X 接入放进同一块工作台里,适合你自己用,也适合直接公开给团队或社区部署。

-
- - - -
- -
- -
-
-
-
-
-
- 统一搜索入口 - 官方 / 兼容双接入 - MCP + Proxy + Skill -
-
- -
-
- -
-
-

把搜索基础设施做成真正可交付的工作台

-

MySearch 把 Tavily、Exa、Firecrawl 和 Social / X 统一成一套可被 Codex、Claude Code 和团队 Agent 直接调用的搜索入口。使用者拿到后,不需要自己维护多套 provider、兼容网关和结果结构,也不需要每次手动判断这一轮该用网页搜索、正文抓取还是社交舆情。

- -
-
- 统一入口 - 网页发现、正文抓取、社交舆情走同一套接线方式。 -
-
- Exa 工作区 - 把 Exa 独立成可发 Token、可管 Key、可直接代理调用的新栏目。 -
-
- 统一运维 - Key 池、Token、额度同步和团队共享入口放在同一个控制台里。 -
-
- 统一兼容 - 官方 API 和自定义 compatible 聚合服务都能接,不锁死单一后端。 -
-
-
-
- -
-
- Router - Tavily 做发现 -

适合新闻、快速 answer、网页线索收集,放在 MySearch 的第一层路由里。

-
-
- Search - Exa 做网页发现 -

适合补 Tavily 之外的网页搜索入口,这里收成独立 Key 池、Token 池和代理端点。

-
-
- Depth - Firecrawl 做抓取 -

文档站、GitHub、PDF、pricing 和 changelog 这类正文内容,交给 Firecrawl 更稳。

-
-
- Social - Social / X 做舆情 -

兼容 grok2api 和 xAI-compatible 搜索,把 X 结果统一整理成 MySearch 可直接消费的结构。

-
-
-
-
- -
- -
-
- Powered by grok2api - Social / X 工作台当前默认调用 grok2api -

这部分能力基于 grok2api 提供的兼容接口和后台 token 状态完成接线。MySearch 的 Social / X 路由在这里向 grok2api 项目致谢,并保留它作为默认兼容实现来源。

- - github.com/chenyme/grok2api - -
-
-
-
当前用途
-
X Search Compatible
-
-
-
感谢项目
-
grok2api
-
-
-
- -
- -
-
-
- Workspace Switcher -

一屏切换不同搜索引擎工作台

-

选择要查看的工作台,下面会展开对应服务的 Key、Token、额度和接入配置。

-
-
已记住上次打开的工作台
-
-
-
- -
-
- -