diff --git a/.dockerignore b/.dockerignore index d95d287d..fc71438c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1 +1,2 @@ -mariadb/ \ No newline at end of file +mariadb/ +docker-compose.yml diff --git a/.github/workflows/deploy-amd64.yaml b/.github/workflows/deploy-amd64.yaml new file mode 100644 index 00000000..96428eef --- /dev/null +++ b/.github/workflows/deploy-amd64.yaml @@ -0,0 +1,50 @@ +name: Build and Push AMD64 +on: + workflow_dispatch: # Only triggered manually +jobs: + build-and-push-amd64: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 # Fetch all history for proper versioning + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Calculate version + id: version + run: | + # Get the latest git tag + LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0") + echo "Latest tag: $LATEST_TAG" + + # Get current commit hash + COMMIT_HASH=$(git rev-parse --short HEAD) + echo "Commit hash: $COMMIT_HASH" + + # Create version string + VERSION="${LATEST_TAG}-${COMMIT_HASH}" + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "latest_tag=${LATEST_TAG}" >> $GITHUB_OUTPUT + echo "commit_hash=${COMMIT_HASH}" >> $GITHUB_OUTPUT + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push AMD64 Docker image + uses: docker/build-push-action@v4 + with: + context: . + platforms: linux/amd64 + push: true + provenance: false + tags: | + ghcr.io/agessaman/meshinfo-lite:latest-amd64 + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.version }}-amd64 + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.latest_tag }}-amd64 \ No newline at end of file diff --git a/.github/workflows/deploy-arm64.yaml b/.github/workflows/deploy-arm64.yaml new file mode 100644 index 00000000..f949fb66 --- /dev/null +++ b/.github/workflows/deploy-arm64.yaml @@ -0,0 +1,60 @@ +name: Build and Push ARM64 +on: + workflow_dispatch: # Only triggered manually +jobs: + build-and-push-arm64: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 # Fetch all history for proper versioning + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Cache Docker layers + uses: actions/cache@v4 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-arm64-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx-arm64- + + - name: Calculate version + id: version + run: | + # Get the latest git tag + LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0") + echo "Latest tag: $LATEST_TAG" + + # Get current commit hash + COMMIT_HASH=$(git rev-parse --short HEAD) + echo "Commit hash: $COMMIT_HASH" + + # Create version string + VERSION="${LATEST_TAG}-${COMMIT_HASH}" + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "latest_tag=${LATEST_TAG}" >> $GITHUB_OUTPUT + echo "commit_hash=${COMMIT_HASH}" >> $GITHUB_OUTPUT + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push ARM64 Docker image + uses: docker/build-push-action@v4 + with: + context: . + platforms: linux/arm64 + push: true + provenance: false + tags: | + ghcr.io/agessaman/meshinfo-lite:latest-arm64 + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.version }}-arm64 + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.latest_tag }}-arm64 + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache,mode=max \ No newline at end of file diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml index 6f197e24..27432a62 100644 --- a/.github/workflows/deploy.yaml +++ b/.github/workflows/deploy.yaml @@ -1,28 +1,139 @@ -name: Manual Build and Push +name: Build, Push, and Publish Multi-Arch Manifest + on: - workflow_dispatch: # Only triggered manually + workflow_dispatch: + jobs: - build-and-push: + build-and-push-amd64: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v3 - + with: + fetch-depth: 0 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Cache Docker layers + uses: actions/cache@v4 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-amd64-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx-amd64- + - name: Calculate version + id: version + run: | + LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0") + echo "Latest tag: $LATEST_TAG" + COMMIT_HASH=$(git rev-parse --short HEAD) + echo "Commit hash: $COMMIT_HASH" + VERSION="${LATEST_TAG}-${COMMIT_HASH}" + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "latest_tag=${LATEST_TAG}" >> $GITHUB_OUTPUT + echo "commit_hash=${COMMIT_HASH}" >> $GITHUB_OUTPUT + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push AMD64 Docker image + uses: docker/build-push-action@v4 + with: + context: . + platforms: linux/amd64 + push: true + provenance: false + tags: | + ghcr.io/agessaman/meshinfo-lite:latest-amd64 + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.version }}-amd64 + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.latest_tag }}-amd64 + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache,mode=max + + build-and-push-arm64: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - + - name: Cache Docker layers + uses: actions/cache@v4 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-arm64-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx-arm64- + - name: Calculate version + id: version + run: | + LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0") + echo "Latest tag: $LATEST_TAG" + COMMIT_HASH=$(git rev-parse --short HEAD) + echo "Commit hash: $COMMIT_HASH" + VERSION="${LATEST_TAG}-${COMMIT_HASH}" + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "latest_tag=${LATEST_TAG}" >> $GITHUB_OUTPUT + echo "commit_hash=${COMMIT_HASH}" >> $GITHUB_OUTPUT - name: Log in to GitHub Container Registry uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and push Docker image + - name: Build and push ARM64 Docker image uses: docker/build-push-action@v4 with: context: . + platforms: linux/arm64 push: true + provenance: false tags: | - ghcr.io/agessaman/meshinfo-lite:latest - ghcr.io/agessaman/meshinfo-lite:${{ github.sha }} + ghcr.io/agessaman/meshinfo-lite:latest-arm64 + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.version }}-arm64 + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.latest_tag }}-arm64 + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache,mode=max + + create-manifest: + runs-on: ubuntu-latest + needs: [build-and-push-amd64, build-and-push-arm64] + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Calculate version + id: version + run: | + LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0") + echo "Latest tag: $LATEST_TAG" + COMMIT_HASH=$(git rev-parse --short HEAD) + echo "Commit hash: $COMMIT_HASH" + VERSION="${LATEST_TAG}-${COMMIT_HASH}" + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "latest_tag=${LATEST_TAG}" >> $GITHUB_OUTPUT + echo "commit_hash=${COMMIT_HASH}" >> $GITHUB_OUTPUT + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Create and push multi-arch manifest + run: | + docker manifest create ghcr.io/agessaman/meshinfo-lite:latest \ + ghcr.io/agessaman/meshinfo-lite:latest-amd64 \ + ghcr.io/agessaman/meshinfo-lite:latest-arm64 + docker manifest create ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.version }} \ + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.version }}-amd64 \ + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.version }}-arm64 + docker manifest create ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.latest_tag }} \ + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.latest_tag }}-amd64 \ + ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.latest_tag }}-arm64 + docker manifest push ghcr.io/agessaman/meshinfo-lite:latest + docker manifest push ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.version }} + docker manifest push ghcr.io/agessaman/meshinfo-lite:${{ steps.version.outputs.latest_tag }} diff --git a/.gitignore b/.gitignore index 49ca2c8e..9a7ef5df 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,8 @@ node_modules/ .github_pat .env .venv +.cursorrules +.cursor/ backups/* caddy @@ -19,6 +21,7 @@ postgres/data/* config.json docker-compose.yml spa +venv/ .pnp.* .yarn/* diff --git a/Dockerfile b/Dockerfile index 6c13aa8a..dc36db62 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,32 +1,89 @@ # trunk-ignore-all(checkov/CKV_DOCKER_3) -FROM python:3.13-slim +FROM python:3.13.3-slim-bookworm -LABEL org.opencontainers.image.source=https://github.com/dadecoza/meshinfo-lite +LABEL org.opencontainers.image.source=https://github.com/agessaman/meshinfo-lite LABEL org.opencontainers.image.description="Realtime web UI to run against a Meshtastic regional or private mesh network." ENV MQTT_TLS=false -ENV PYTHONUNBUFFERED=1 +ENV PYTHONUNBUFFERED=1 \ + # Set standard locations + PATH="/app/.local/bin:${PATH}" \ + # Consistent port for the app + APP_PORT=8000 \ + # Optimize pip for faster builds + PIP_NO_CACHE_DIR=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 + +RUN groupadd --system app && \ + useradd --system --gid app --home-dir /app --create-home app # Set the working directory in the container -RUN mkdir /app WORKDIR /app -RUN apt-get update && apt-get -y install \ - libexpat1 libexpat1-dev +# Install system dependencies +ARG TARGETPLATFORM +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + libexpat1 \ + libcairo2 \ + pkg-config \ + fonts-symbola \ + fontconfig \ + freetype2-demos \ + libgdal-dev \ + gdal-bin \ + libgeos-dev \ + libproj-dev \ + proj-bin \ + default-mysql-client \ + $([ "$TARGETPLATFORM" = "linux/arm64" ] && echo "curl") \ + && apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +RUN fc-cache -fv + +# Architecture-specific rasterio installation +ENV GDAL_CONFIG=/usr/bin/gdal-config + +# For ARM64: Install conda/mamba and use conda-forge for rasterio +RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + curl -L -O https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-aarch64.sh && \ + bash Miniforge3-Linux-aarch64.sh -b -p /opt/conda && \ + rm Miniforge3-Linux-aarch64.sh && \ + /opt/conda/bin/mamba install -c conda-forge rasterio -y; \ + fi + +# Update PATH to include conda if installed +ENV PATH="/opt/conda/bin:${PATH}" COPY requirements.txt banner run.sh ./ -COPY *.py ./ -COPY www ./www -COPY templates ./templates -COPY migrations ./migrations -RUN pip install --upgrade pip -RUN pip install --no-cache-dir -r requirements.txt +# Upgrade pip and install packages +RUN pip install --upgrade pip setuptools wheel -HEALTHCHECK NONE +# Install requirements, excluding rasterio for ARM64 (already installed via conda) +RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + grep -v "^rasterio" requirements.txt > requirements_filtered.txt || echo "" > requirements_filtered.txt; \ + else \ + cp requirements.txt requirements_filtered.txt; \ + fi -EXPOSE 8080 +RUN su app -c "pip install --no-cache-dir --user -r requirements_filtered.txt" + +COPY --chown=app:app banner run.sh ./ +COPY --chown=app:app *.py ./ +COPY --chown=app:app *.sh ./ +COPY --chown=app:app www ./www +COPY --chown=app:app templates ./templates +COPY --chown=app:app migrations ./migrations + +HEALTHCHECK NONE RUN chmod +x run.sh +RUN chmod +x *.sh + +USER app + +EXPOSE ${APP_PORT} -CMD ["./run.sh"] +CMD ["./run.sh"] \ No newline at end of file diff --git a/README.md b/README.md index d13d9c66..bc3f785d 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ If you're interested in aeronautical (ADS-B/ACARS/VDL/HFDL/SATCOM) or ship track - text - traceroute - mapreport +- routing ## Features @@ -49,6 +50,7 @@ If you're interested in aeronautical (ADS-B/ACARS/VDL/HFDL/SATCOM) or ship track - MQTT Messages - Telemetry - Traceroutes +- Routing Messages ### Upcoming @@ -59,16 +61,27 @@ If you're interested in aeronautical (ADS-B/ACARS/VDL/HFDL/SATCOM) or ship track If you're using this and have questions, or perhaps you want to join in on the dev effort and want to interact collaboratively, come chat with us on [#meshinfo on Meshtastic ZA Discord](https://discord.gg/cmFCKBxY). +## Documentation + +📚 **Complete documentation is available in the [docs/](docs/) directory:** + +- **[Setup Guide](docs/SETUP_DOCKER.md)** - Docker Compose installation (recommended) +- **[Manual Setup](docs/SETUP.md)** - Traditional installation guide +- **[Caching & Performance](docs/CACHING.md)** - Memory management and optimization +- **[Contributing](docs/CONTRIBUTING.md)** - How to contribute to the project + ## Running ### Docker Compose (preferred for 24/7 servers) -#### Setup +For detailed Docker setup instructions, see **[docs/SETUP_DOCKER.md](docs/SETUP_DOCKER.md)**. + +#### Quick Setup ##### Clone the repo ```sh -git clone https://github.com/dadecoza/meshinfo-lite.git +git clone https://github.com/agessaman/meshinfo-lite.git cd meshinfo-lite ``` @@ -123,7 +136,7 @@ python main.py Clone the repository. ```sh -git clone https://github.com/dadecoza/meshinfo-lite.git +git clone https://github.com/agessaman/meshinfo-lite.git ``` If already existing, be sure to pull updates. @@ -150,9 +163,7 @@ the templates. ## Contributing -We happily accept Pull Requests! - -TODO: Need to rewrite this section. +We happily accept Pull Requests! Please see **[docs/CONTRIBUTING.md](docs/CONTRIBUTING.md)** for detailed guidelines and **[docs/CODE_OF_CONDUCT.md](docs/CODE_OF_CONDUCT.md)** for our community standards. ## Meshtastic node settings diff --git a/cache_monitor.py b/cache_monitor.py new file mode 100644 index 00000000..f982fed0 --- /dev/null +++ b/cache_monitor.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +""" +Cache monitoring script for meshinfo-lite +Monitors database query cache and application cache performance +""" + +import requests +import json +import time +import psutil +import logging +from datetime import datetime + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s' +) + +def get_cache_stats(base_url="http://localhost:8001"): + """Get cache statistics from the application.""" + try: + # Get application cache stats + response = requests.get(f"{base_url}/api/debug/cache", timeout=5) + if response.status_code == 200: + return response.json() + except Exception as e: + logging.error(f"Error getting cache stats: {e}") + return None + +def get_database_cache_stats(base_url="http://localhost:8001"): + """Get database cache statistics.""" + try: + response = requests.get(f"{base_url}/api/debug/database-cache", timeout=5) + if response.status_code == 200: + return response.json() + except Exception as e: + logging.error(f"Error getting database cache stats: {e}") + return None + +def get_memory_usage(): + """Get current memory usage.""" + process = psutil.Process() + memory_info = process.memory_info() + return { + 'rss_mb': memory_info.rss / 1024 / 1024, + 'vms_mb': memory_info.vms / 1024 / 1024, + 'percent': process.memory_percent() + } + +def monitor_cache_performance(duration_minutes=10, interval_seconds=30): + """Monitor cache performance for a specified duration.""" + logging.info(f"Starting cache monitoring for {duration_minutes} minutes") + + start_time = time.time() + end_time = start_time + (duration_minutes * 60) + + while time.time() < end_time: + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + # Get memory usage + memory = get_memory_usage() + + # Get cache stats + cache_stats = get_cache_stats() + db_cache_stats = get_database_cache_stats() + + # Log results + logging.info(f"[{timestamp}] Memory: {memory['rss_mb']:.1f}MB RSS, {memory['percent']:.1f}%") + + if db_cache_stats and 'database_cache_stats' in db_cache_stats: + db_stats = db_cache_stats['database_cache_stats'] + logging.info(f"[{timestamp}] DB Cache: {db_stats.get('hits', 0)} hits, {db_stats.get('misses', 0)} misses") + + if cache_stats: + logging.info(f"[{timestamp}] App Cache: {cache_stats.get('message', 'No stats available')}") + + time.sleep(interval_seconds) + + logging.info("Cache monitoring completed") + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Monitor cache performance") + parser.add_argument("--duration", type=int, default=10, help="Monitoring duration in minutes") + parser.add_argument("--interval", type=int, default=30, help="Check interval in seconds") + parser.add_argument("--url", default="http://localhost:8001", help="Application base URL") + + args = parser.parse_args() + + monitor_cache_performance(args.duration, args.interval) \ No newline at end of file diff --git a/config.ini.sample b/config.ini.sample index 10cc3e26..17c31678 100644 --- a/config.ini.sample +++ b/config.ini.sample @@ -1,16 +1,22 @@ [mesh] -name=ZA Mesh -description=Serving Meshtastic to South Africa. -contact=dade@dade.co.za -url=https://mesh.zr1rf.za.net -latitude=-33.918861 -longitude=18.423300 +name=Your Mesh Name +short_name=YourMesh +region=Your Mesh Region +description=Serving Meshtastic to your region. +contact=https://yourmesh.org +url=https://yourmesh.org +# Optional: link to your mesh's configuration guide +config_url=https://yourmesh.org/config-instructions +# Optional: link to your mesh's Discord or community chat +discord_url=https://discord.gg/yourdiscord +latitude=0.0 +longitude=0.0 channel_key=1PG7OiApB1nwvP+rz05pAQ== [mqtt] -broker=mqtt.zr1rf.za.net +broker=mqtt.yourmesh.org port=1883 -topic=msh/ZA/# +topic=msh/US/# username=meshdev password=large4cats @@ -25,10 +31,17 @@ port=8000 [server] node_activity_prune_threshold=7200 +node_location_prune_threshold=86400 render_interval=15 debug=true timezone=America/Los_Angeles zero_hop_timeout=43200 +telemetry_retention_days = 30 +metrics_average_interval=7200 + +[channels] +# Comma-separated list of channel names to ignore +#ignored_channels=PKI [geocoding] enabled=false @@ -36,10 +49,65 @@ apikey=YOUR_KEY_HERE [registrations] enabled=true -jwt_secret=6219c1c2364499639ce9d16c33863c4f6fedb7a4a1a0f29524c20d95cb00e5f5 +jwt_secret=YOUR_JWT_SECRET [smtp] email=YOUR_EMAIL_ADDRESS password=SMTP_PASSWORD server=SMTP_SERVER -port=SMTP_PORT \ No newline at end of file +port=SMTP_PORT + +[los] +enabled=false +max_distance=10000 +cache_duration=43200 + +[tools] +meshmap=https://meshmap.net/ +meshmap_label=MeshMap +meshmap_link="https://meshmap.net/#{{ node.id }}" +pugetmesh_map=https://mqtt.davekeogh.com/?lat=47.60292227835496&lng=237.49420166015628&zoom=10 +pugetmesh_map_label=PugetMesh Map +pugetmesh_map_link="https://meshtastic.pugetmesh.org/?node_id={{ node.id }}" +meshsense_label=MeshSense Map +meshsense=https://meshsense.affirmatech.com/ + +[theme] +# Header background color (navbar) +header_color=#9fdef9 +# Header brand/title color +header_brand_color=#000 +# Header navigation link color +header_link_color=#555 +# Header active navigation link color +header_link_active_color=#000 +# Accent color (login/logout button, etc.) +accent_color=#17a2b8 +# Page background color +page_background_color=#ffffff +# Table header background color +table_header_color=#D7F9FF +# Table subheader background color +table_subheader_color=#e1ebef +# Table border color +table_border_color=#dee2e6 +# Table alternating row background color +table_alternating_row_color=#f0f0f0 +# Link color +link_color=#007bff +# Link hover color +link_color_hover=#0056b3 +# Control color (buttons, form controls) +control_color=#17a2b8 +# Control hover color +control_color_hover=#1396a5 +# Chat box background color +chat_box_background_color=#f0f0f0 +# Chat box border color (leave empty for no border) +chat_box_border_color= +# Banner background color (e.g. for newest node welcome in nodes page) +banner_background_color=#F9F9D7 +# Favicon background color override (optional, falls back to accent_color if not set) +favicon_background_color= +# Favicon logo (stroke) color override (optional, falls back to auto-contrast if not set) +favicon_logo_color= \ No newline at end of file diff --git a/custom.cnf b/custom.cnf new file mode 100644 index 00000000..7a5e364a --- /dev/null +++ b/custom.cnf @@ -0,0 +1,43 @@ +[mysqld] +# Memory allocation - ultra-conservative settings for small-scale project (3K nodes, 350 active) +aria_pagecache_buffer_size = 8M +aria_sort_buffer_size = 4M +innodb_buffer_pool_size = 16M +key_buffer_size = 8M +myisam_sort_buffer_size = 4M + +# Query cache configuration - enable globally +query_cache_type = 1 +query_cache_size = 8M +query_cache_limit = 512K +query_cache_min_res_unit = 2048 +query_cache_strip_comments = 1 +query_cache_wlock_invalidate = 0 + +# Connection and performance settings - reduced for small scale +max_connections = 20 +thread_cache_size = 4 +table_open_cache = 200 +table_definition_cache = 100 + +# InnoDB settings - minimal for small dataset +innodb_log_file_size = 8M +innodb_log_buffer_size = 4M +innodb_flush_log_at_trx_commit = 2 +innodb_file_per_table = 1 + +# Query optimization - minimal buffers +tmp_table_size = 8M +max_heap_table_size = 8M +sort_buffer_size = 1M +read_buffer_size = 512K +read_rnd_buffer_size = 2M + +# Slow query log for optimization +slow_query_log = 1 +slow_query_log_file = /var/log/mysql/slow.log +long_query_time = 2 + +# Character set +character_set_server = utf8mb4 +collation_server = utf8mb4_unicode_ci diff --git a/custom.cnf.sample b/custom.cnf.sample new file mode 100644 index 00000000..7a5e364a --- /dev/null +++ b/custom.cnf.sample @@ -0,0 +1,43 @@ +[mysqld] +# Memory allocation - ultra-conservative settings for small-scale project (3K nodes, 350 active) +aria_pagecache_buffer_size = 8M +aria_sort_buffer_size = 4M +innodb_buffer_pool_size = 16M +key_buffer_size = 8M +myisam_sort_buffer_size = 4M + +# Query cache configuration - enable globally +query_cache_type = 1 +query_cache_size = 8M +query_cache_limit = 512K +query_cache_min_res_unit = 2048 +query_cache_strip_comments = 1 +query_cache_wlock_invalidate = 0 + +# Connection and performance settings - reduced for small scale +max_connections = 20 +thread_cache_size = 4 +table_open_cache = 200 +table_definition_cache = 100 + +# InnoDB settings - minimal for small dataset +innodb_log_file_size = 8M +innodb_log_buffer_size = 4M +innodb_flush_log_at_trx_commit = 2 +innodb_file_per_table = 1 + +# Query optimization - minimal buffers +tmp_table_size = 8M +max_heap_table_size = 8M +sort_buffer_size = 1M +read_buffer_size = 512K +read_rnd_buffer_size = 2M + +# Slow query log for optimization +slow_query_log = 1 +slow_query_log_file = /var/log/mysql/slow.log +long_query_time = 2 + +# Character set +character_set_server = utf8mb4 +collation_server = utf8mb4_unicode_ci diff --git a/database_cache.py b/database_cache.py new file mode 100644 index 00000000..d1d501a5 --- /dev/null +++ b/database_cache.py @@ -0,0 +1,218 @@ +""" +Database Cache Module + +This module provides database connection pooling and query caching functionality +for the MeshInfo-Lite application. It manages database connections per thread +and provides methods for clearing the MariaDB query cache. + +Key Features: +- Thread-safe connection pooling +- Automatic connection validation and cleanup +- Query cache statistics tracking +- Flask app context independent cache clearing +""" + +import mysql.connector +import logging +import threading +import time +from datetime import datetime + + +class DatabaseCache: + """Database connection pooling and query caching.""" + + def __init__(self, config): + self.config = config + self._connection_pool = {} + self._cache_lock = threading.Lock() + self._query_cache_stats = { + 'hits': 0, + 'misses': 0, + 'last_reset': time.time() + } + + def get_connection(self): + """Get a database connection from the pool or create a new one.""" + thread_id = threading.get_ident() + + with self._cache_lock: + if thread_id in self._connection_pool: + conn = self._connection_pool[thread_id] + try: + # Test if connection is still valid + if conn.is_connected(): + return conn + else: + # Remove stale connection + del self._connection_pool[thread_id] + except Exception: + # Remove invalid connection + del self._connection_pool[thread_id] + + # Create new connection + try: + conn = mysql.connector.connect( + host=self.config["database"]["host"], + user=self.config["database"]["username"], + password=self.config["database"]["password"], + database=self.config["database"]["database"], + charset="utf8mb4", + connection_timeout=10 + ) + + with self._cache_lock: + self._connection_pool[thread_id] = conn + + logging.debug(f"Created new database connection for thread {thread_id}") + return conn + + except Exception as e: + logging.error(f"Failed to create database connection: {e}") + raise + + def close_connection(self): + """Close the database connection for the current thread.""" + thread_id = threading.get_ident() + + with self._cache_lock: + if thread_id in self._connection_pool: + conn = self._connection_pool[thread_id] + try: + if conn.is_connected(): + conn.close() + logging.debug(f"Closed database connection for thread {thread_id}") + except Exception as e: + logging.warning(f"Error closing database connection: {e}") + finally: + del self._connection_pool[thread_id] + + def execute_cached_query(self, sql, params=None, cache_key=None, timeout=None): + """Execute a query with database-level caching.""" + conn = self.get_connection() + cursor = conn.cursor(dictionary=True) + + try: + # Execute query - global query cache will handle caching automatically + cursor.execute(sql, params or ()) + results = cursor.fetchall() + + with self._cache_lock: + self._query_cache_stats['hits'] += 1 + + return results + + except Exception as e: + with self._cache_lock: + self._query_cache_stats['misses'] += 1 + logging.error(f"Database query error: {e}") + raise + finally: + cursor.close() + + def get_cache_stats(self): + """Get query cache statistics.""" + with self._cache_lock: + return self._query_cache_stats.copy() + + def clear_query_cache(self): + """Clear the database query cache.""" + try: + # Create a direct connection to avoid Flask app context issues + import mysql.connector + conn = mysql.connector.connect( + host=self.config["database"]["host"], + user=self.config["database"]["username"], + password=self.config["database"]["password"], + database=self.config["database"]["database"], + charset="utf8mb4", + connection_timeout=10 + ) + cursor = conn.cursor() + try: + # Try to clear query cache, but handle permission errors gracefully + try: + cursor.execute("FLUSH QUERY CACHE") + logging.info("FLUSH QUERY CACHE executed successfully") + except mysql.connector.Error as e: + if e.errno == 1227: # Access denied for RELOAD privilege + logging.warning("Cannot FLUSH QUERY CACHE - insufficient privileges (RELOAD required)") + else: + logging.error(f"Error executing FLUSH QUERY CACHE: {e}") + + try: + cursor.execute("RESET QUERY CACHE") + logging.info("RESET QUERY CACHE executed successfully") + except mysql.connector.Error as e: + if e.errno == 1227: # Access denied for RELOAD privilege + logging.warning("Cannot RESET QUERY CACHE - insufficient privileges (RELOAD required)") + else: + logging.error(f"Error executing RESET QUERY CACHE: {e}") + + # If we get here, at least one command succeeded or we handled the errors gracefully + logging.info("Database query cache clear operation completed") + + except Exception as e: + logging.error(f"Error during query cache clear operations: {e}") + finally: + cursor.close() + conn.close() + except Exception as e: + logging.error(f"Error creating connection for cache clear: {e}") + + def close_all_connections(self): + """Close all database connections in the pool.""" + with self._cache_lock: + for thread_id, conn in list(self._connection_pool.items()): + try: + if conn.is_connected(): + conn.close() + logging.debug(f"Closed database connection for thread {thread_id}") + except Exception as e: + logging.warning(f"Error closing database connection: {e}") + self._connection_pool.clear() + + def check_privileges(self): + """Check if the database user has required privileges for cache operations.""" + try: + import mysql.connector + conn = mysql.connector.connect( + host=self.config["database"]["host"], + user=self.config["database"]["username"], + password=self.config["database"]["password"], + database=self.config["database"]["database"], + charset="utf8mb4", + connection_timeout=10 + ) + cursor = conn.cursor() + + privileges = { + 'reload': False, + 'query_cache': False + } + + try: + # Check RELOAD privilege by attempting a simple FLUSH command + cursor.execute("FLUSH QUERY CACHE") + privileges['reload'] = True + privileges['query_cache'] = True + logging.info("Database user has RELOAD privilege") + except mysql.connector.Error as e: + if e.errno == 1227: # Access denied for RELOAD privilege + logging.warning("Database user lacks RELOAD privilege - query cache operations will be limited") + privileges['reload'] = False + privileges['query_cache'] = False + else: + logging.error(f"Error checking RELOAD privilege: {e}") + + cursor.close() + conn.close() + return privileges + + except Exception as e: + logging.error(f"Error checking database privileges: {e}") + return {'reload': False, 'query_cache': False} + + def __del__(self): + """Cleanup when the object is destroyed.""" + self.close_all_connections() \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 2de80d13..c969cabf 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,8 +9,11 @@ services: MYSQL_PASSWORD: passw0rd volumes: - ./mysql_data:/var/lib/mysql + - ./custom.cnf:/etc/mysql/conf.d/custom.cnf networks: - backend + labels: + - "com.centurylinklabs.watchtower.enable=false" meshinfo: build: context: . @@ -19,6 +22,7 @@ services: - mariadb volumes: - ./config.ini:/app/config.ini + - ./srtm_data:/app/srtm_data environment: - PYTHONUNBUFFERED=1 ports: @@ -26,7 +30,8 @@ services: networks: - backend restart: always - + labels: + - "com.centurylinklabs.watchtower.enable=false" networks: backend: driver: bridge diff --git a/docker-compose.yml.sample b/docker-compose.yml.sample new file mode 100644 index 00000000..b0e09df7 --- /dev/null +++ b/docker-compose.yml.sample @@ -0,0 +1,34 @@ +services: + mariadb: + image: mariadb + restart: always + environment: + MYSQL_ROOT_PASSWORD: passw0rd + MYSQL_DATABASE: meshdata + MYSQL_USER: meshdata + MYSQL_PASSWORD: passw0rd + volumes: + - ./mysql_data:/var/lib/mysql + - ./custom.cnf:/etc/mysql/conf.d/custom.cnf + networks: + - backend + meshinfo: + build: + context: . + dockerfile: Dockerfile + depends_on: + - mariadb + volumes: + - ./config.ini:/app/config.ini + - ./srtm_data:/app/srtm_data + # - ./logo.webp:/app/www/images/logos/logo.webp + environment: + - PYTHONUNBUFFERED=1 + ports: + - 8001:8000 + networks: + - backend + restart: always +networks: + backend: + driver: bridge \ No newline at end of file diff --git a/docker_setup.sh b/docker_setup.sh new file mode 100755 index 00000000..b7641c50 --- /dev/null +++ b/docker_setup.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# Docker Setup Script for MeshInfo-Lite +# This script can be run inside the Docker container to set up database privileges + +set -e + +echo "=== MeshInfo-Lite Docker Database Setup ===" + +# Wait for database to be ready +echo "Waiting for database to become available..." +for i in {1..30}; do + if mysql -h mariadb -u root -ppassw0rd -e "SELECT 1" >/dev/null 2>&1; then + echo "✓ Database is available" + break + fi + if [ $i -eq 30 ]; then + echo "✗ Database failed to become available" + exit 1 + fi + echo "Database not ready yet (attempt $i/30)..." + sleep 2 +done + +# Grant privileges +echo "Setting up database privileges..." + +mysql -h mariadb -u root -ppassw0rd << EOF +-- Grant RELOAD privilege for query cache operations +GRANT RELOAD ON *.* TO 'meshdata'@'%'; + +-- Grant PROCESS privilege for monitoring +GRANT PROCESS ON *.* TO 'meshdata'@'%'; + +-- Apply changes +FLUSH PRIVILEGES; +EOF + +echo "✓ Granted RELOAD privilege for query cache operations" +echo "✓ Granted PROCESS privilege for monitoring" +echo "✓ Privileges flushed" + +# Test privileges +echo "Testing privileges..." +if mysql -h mariadb -u meshdata -ppassw0rd meshdata -e "FLUSH QUERY CACHE" >/dev/null 2>&1; then + echo "✓ RELOAD privilege verified" +else + echo "⚠ RELOAD privilege test failed" +fi + +if mysql -h mariadb -u meshdata -ppassw0rd meshdata -e "SHOW PROCESSLIST" >/dev/null 2>&1; then + echo "✓ PROCESS privilege verified" +else + echo "⚠ PROCESS privilege test failed" +fi + +echo "=== Docker Setup Complete ===" +echo "The MeshInfo-Lite application should now have full functionality" \ No newline at end of file diff --git a/docs/CACHING.md b/docs/CACHING.md new file mode 100644 index 00000000..f612db5c --- /dev/null +++ b/docs/CACHING.md @@ -0,0 +1,154 @@ +# Caching and Memory Management in MeshInfo-Lite + +## Overview + +MeshInfo-Lite uses a multi-layered caching system to optimize performance while managing memory usage effectively. The system combines database-level caching, application-level caching, and Flask-Caching to provide fast data access while preventing memory leaks. + +## Architecture + +### Database Layer +- **MariaDB Query Cache**: Enabled globally with conservative memory settings (~128MB) +- **DatabaseCache Class**: Connection pooling and query result caching +- **Application-Level Cache**: In-memory caching of frequently accessed data + +### Application Layer +- **Flask-Caching**: FileSystemCache with configurable timeouts and thresholds +- **MeshData.get_nodes_cached()**: Centralized nodes dictionary caching (60-second TTL) +- **Conditional Feature Loading**: LOS profiles and other heavy features loaded only when needed + +### Memory Management +- **Explicit Cleanup**: Manual cleanup of large objects and temporary data +- **Garbage Collection**: Forced GC after memory-intensive operations +- **Memory Monitoring**: Real-time tracking with automatic cleanup triggers + +## Key Changes Made + +### 1. Database Configuration +- Reduced MariaDB memory allocation from ~512MB to ~128MB +- Enabled query cache globally with appropriate timeout settings +- Removed manual SQL_CACHE hints that caused errors + +### 2. Application Caching +- Centralized nodes dictionary caching in `MeshData.get_nodes_cached()` +- Implemented cache timeout configuration from `config.ini` +- Added cache clearing functions for manual cleanup + +### 3. Memory Leak Prevention +- Modified `get_node_page_data()` to accept nodes as parameter +- Pass minimal node subsets to `LOSProfile` only when LOS is enabled +- Explicit cleanup of `LOSProfile` instances and temporary data +- Forced garbage collection after node page rendering + +### 4. Route Optimization +- Optimized routes to avoid multiple calls to `get_cached_nodes()` +- Created simplified nodes dictionaries with only needed data +- Implemented conditional loading of heavy features + +### 5. Monitoring and Debugging +- Added detailed memory usage tracking and analysis +- Implemented cache statistics monitoring +- Created debug endpoints for manual cleanup and analysis +- Added memory watchdog with automatic cleanup triggers + +## Configuration + +### MariaDB Settings (`custom.cnf`) +```ini +[mysqld] +query_cache_type = 1 +query_cache_size = 64M +query_cache_limit = 2M +query_cache_min_res_unit = 4K +``` + +### Flask-Caching Settings (`config.ini`) +```ini +[server] +app_cache_timeout_seconds = 60 +app_cache_max_entries = 100 +zero_hop_timeout = 43200 +``` + +## Best Practices + +1. **Conservative Memory Settings**: Use minimal memory allocation suitable for small-scale projects +2. **Explicit Cleanup**: Always clean up large objects and temporary data +3. **Conditional Loading**: Load heavy features only when needed +4. **Regular Monitoring**: Monitor memory usage and cache statistics +5. **Graceful Degradation**: Handle cache failures gracefully with fallbacks + +## Troubleshooting + +### Memory Issues +- Check memory usage with `/api/debug/memory` +- Clear caches with `/api/debug/cleanup` +- Monitor cache statistics with `/api/debug/cache` + +### Performance Issues +- Verify database query cache is enabled +- Check application cache hit rates +- Review cache timeout settings + +### Database Issues +- Ensure MariaDB is properly configured +- Check connection pool settings +- Verify query cache is working + +### Database Privileges +- **RELOAD Privilege Required**: The database user needs the `RELOAD` privilege to clear the query cache +- **Check Privileges**: Use `/api/debug/database-cache` to check current privileges +- **Grant Privileges**: If needed, grant RELOAD privilege to the database user: + ```sql + GRANT RELOAD ON *.* TO 'username'@'host'; + FLUSH PRIVILEGES; + ``` +- **Graceful Degradation**: The system will continue to work without RELOAD privilege, but query cache clearing will be limited + +## Installation and Setup + +### New Installations + +For new installations, use the database setup script to create the database and user with proper privileges: + +```bash +python setup_database.py +``` + +This script will: +- Create the database with proper character encoding +- Create the application user +- Grant all necessary privileges including RELOAD for query cache operations +- Test the connection and privileges +- Provide detailed feedback on the setup process + +### Configuration Requirements + +Add the following to your `config.ini`: + +```ini +[database] +host = localhost +username = meshdata +password = your_password +database = meshdata +root_password = your_root_password # For setup script only +``` + +### Manual Database Setup + +If you prefer to set up the database manually, ensure the user has these privileges: + +```sql +-- Connect as root +GRANT ALL PRIVILEGES ON meshdata.* TO 'meshdata'@'%'; +GRANT RELOAD ON *.* TO 'meshdata'@'%'; +GRANT PROCESS ON *.* TO 'meshdata'@'%'; +FLUSH PRIVILEGES; +``` + +## Maintenance + +- Cache cleanup runs automatically every 15 minutes +- Memory watchdog monitors usage and triggers cleanup when needed +- Debug endpoints available for manual intervention +- Logs provide detailed information about cache and memory usage \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/docs/CODE_OF_CONDUCT.md similarity index 100% rename from CODE_OF_CONDUCT.md rename to docs/CODE_OF_CONDUCT.md diff --git a/CONTRIBUTING.md b/docs/CONTRIBUTING.md similarity index 100% rename from CONTRIBUTING.md rename to docs/CONTRIBUTING.md diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..49d4c224 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,42 @@ +# MeshInfo-Lite Documentation + +This directory contains comprehensive documentation for MeshInfo-Lite, a real-time web UI for Meshtastic regional or private mesh networks. + +## 📚 Documentation Index + +### Setup Guides +- **[SETUP.md](SETUP.md)** - Complete manual installation and setup guide +- **[SETUP_DOCKER.md](SETUP_DOCKER.md)** - Docker Compose installation and setup guide + +### Technical Documentation +- **[CACHING.md](CACHING.md)** - Detailed explanation of caching architecture and memory management + +### Community & Contributing +- **[CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md)** - Community guidelines and code of conduct +- **[CONTRIBUTING.md](CONTRIBUTING.md)** - How to contribute to the project + +### Screenshots +The following screenshots demonstrate various features of MeshInfo-Lite: +- `meshinfo1.png` - Main dashboard view +- `meshinfo2.png` - Node details page +- `meshinfo3.png` - Network graph visualization +- `meshinfo4.png` - Message chat interface +- `meshinfo5.png` - Telemetry monitoring +- `meshinfo6.png` - Map view with node positions +- `meshinfo7.png` - Traceroute visualization + +## 🚀 Quick Start + +For new users, we recommend starting with: +1. **[SETUP_DOCKER.md](SETUP_DOCKER.md)** - If you prefer Docker (recommended) +2. **[SETUP.md](SETUP.md)** - If you prefer manual installation + +## 🔧 Advanced Topics + +For advanced users and developers: +- **[CACHING.md](CACHING.md)** - Understanding the caching system and memory optimization +- **[CONTRIBUTING.md](CONTRIBUTING.md)** - How to contribute to the project + +## 📖 Main README + +The main project README is located at the root of the repository: [../README.md](../README.md) \ No newline at end of file diff --git a/docs/README_CLEANUP.md b/docs/README_CLEANUP.md new file mode 100644 index 00000000..0cd41697 --- /dev/null +++ b/docs/README_CLEANUP.md @@ -0,0 +1,275 @@ +# MeshInfo Code Cleanup Summary + +## ✅ **Completed Cleanup & Refactor** + +### **1. Created Modular Structure** + +#### **`meshinfo_api.py`** - API Endpoints +- All `/api/*` routes moved here +- Flask Blueprint with `url_prefix='/api'` +- Self-contained functions to avoid circular imports +- Clean separation of API logic from web routes +- **Enhanced with comprehensive telemetry and utilization endpoints** + +#### **`meshinfo_utils.py`** - Shared Utilities +- Common functions used across modules +- Database connection management +- Cache management functions +- Memory monitoring utilities +- Time formatting helpers +- **Node distance calculations and relay matching logic** + +#### **`meshinfo_web.py`** - Main Web Application +- Only web routes (HTML pages) remain +- API blueprint registered +- Cleaner, more focused structure +- **All 28 page routes properly restored** +- **Proper imports from utils module** + +#### **Removed `app.py`** +- Duplicate content eliminated +- No more conflicting routes + +### **2. Enhanced Architecture** + +#### **Blueprint-based API Design** +- **Clean separation** of API and web routes +- **Self-contained modules** with proper imports +- **Scalable structure** for adding new endpoints +- **Consistent error handling** across all endpoints + +#### **Optimized Database Integration** +- **Efficient JOIN queries** for complex data relationships +- **Enhanced time-based aggregation** with proper SQL functions +- **Improved caching** to reduce database load +- **Robust connection management** with proper cleanup + +#### **Enhanced Caching Strategy** +- **Blueprint-aware caching** to avoid context issues +- **Proper cache timeouts** based on data volatility +- **Memory-efficient caching** with cleanup mechanisms +- **Fallback strategies** when cache fails + +### **3. API Endpoints Enhanced** + +#### **`/api/utilization-data`** ✅ +- **Complex telemetry calculations** with contact distance analysis +- **Advanced database queries** joining telemetry with node position data +- **Moving averages** and proper time bucketing for smooth data visualization +- **Channel filtering** support for focused analysis +- **Enhanced data aggregation** with comprehensive metrics + +#### **`/api/telemetry/`** ✅ +- **Simplified to use MeshData methods** for consistency and reliability +- **Enhanced error handling** and data validation +- **Optimized data retrieval** with proper caching +- **Comprehensive telemetry data** including environmental metrics + +#### **`/api/environmental-telemetry/`** ✅ +- **Updated method signature** to match MeshData implementation +- **Consistent with telemetry endpoint** approach +- **Environmental data integration** with proper validation +- **Enhanced data formatting** for frontend consumption + +#### **`/api/geocode`** ✅ +- **Reverse geocoding implementation** (coordinates to address) +- **Enhanced coordinate validation** with proper error handling +- **Fallback handling** for geocoding service failures +- **Improved parameter handling** with lat/lon support + +#### **`/api/metrics`** ✅ +- **Complete time range support**: day, week, month, year, all +- **Dynamic bucket sizing** based on time range for optimal data granularity +- **Enhanced time formatting** for different granularities +- **Improved data aggregation** with comprehensive SQL queries +- **Moving average calculations** for smooth trend visualization + +#### **`/api/node-positions`** ✅ +- **Optimized caching strategy** for better performance +- **Enhanced error handling** for missing nodes +- **Efficient batch processing** for multiple node requests +- **Proper coordinate validation** and formatting + +#### **`/api/hardware-models`** ✅ +- **Comprehensive model statistics** with detailed analytics +- **Sample node names** for each hardware model +- **Enhanced icon generation** for hardware models +- **Improved data organization** with proper categorization + +### **4. Page Routes Completely Restored** + +#### **All 28 Page Routes Verified** ✅ +1. `/message_map.html` - Message map visualization +2. `/traceroute_map.html` - Traceroute map visualization +3. `/graph.html` - Network graph visualization +4. `/graph2.html` - Alternative graph view +5. `/graph3.html` - Alternative graph view +6. `/graph4.html` - Alternative graph view +7. `/utilization-heatmap.html` - Utilization heatmap +8. `/utilization-hexmap.html` - Utilization hexmap +9. `/map.html` - Main map view +10. `/neighbors.html` - Neighbors view +11. `/telemetry.html` - Telemetry data +12. `/traceroutes.html` - Traceroutes list +13. `/logs.html` - System logs +14. `/monday.html` - Meshtastic Monday +15. `/mynodes.html` - User's nodes +16. `/linknode.html` - Node linking +17. `/register.html` - User registration +18. `/login.html` - User login +19. `/logout.html` - User logout +20. `/verify` - Account verification +21. `/` - Static files and dynamic node pages +22. `/metrics.html` - Metrics dashboard +23. `/chat-classic.html` - Classic chat interface +24. `/chat.html` - Modern chat interface +25. `/` - Index/home page +26. `/nodes.html` - Active nodes list +27. `/allnodes.html` - All nodes list +28. `/message-paths.html` - Message path analysis + +#### **Helper Functions Restored** ✅ +- `get_cached_nodes()` - Cached node data +- `get_cached_active_nodes()` - Cached active nodes +- `get_cached_latest_node()` - Cached latest node +- `get_cached_message_map_data()` - Cached message map data +- `get_cached_graph_data()` - Cached graph data +- `get_cached_neighbors_data()` - Cached neighbors data +- `get_cached_chat_data()` - Cached chat data (imported from utils) +- `get_cached_hardware_models()` - Cached hardware models +- `calculate_node_distance()` - Node distance calculation (imported from utils) +- `get_node_page_data()` - Node page data (imported from utils) + +### **5. Best Practices Implemented** + +#### **Blueprint-based Architecture** +```python +# meshinfo_api.py +from flask import Blueprint +api = Blueprint('api', __name__, url_prefix='/api') + +# meshinfo_web.py +from meshinfo_api import api +app.register_blueprint(api) +``` + +#### **Separation of Concerns** +- **`meshinfo_web.py`**: Web routes (HTML pages, templates) +- **`meshinfo_api.py`**: API routes (JSON endpoints) +- **`meshinfo_utils.py`**: Shared utilities and helpers + +#### **Clean Import Patterns** +```python +# Avoid circular imports +from meshinfo_utils import get_meshdata, get_cache_timeout, auth, calculate_node_distance +``` + +#### **Enhanced Error Handling** +- **Database connection failures** properly handled +- **Missing data scenarios** gracefully managed +- **Invalid parameters** validated and rejected +- **Cache failures** fallback to direct database queries + +### **6. File Structure** + +``` +meshinfo-lite/ +├── meshinfo_web.py # Main Flask app + web routes (28 routes) +├── meshinfo_api.py # API endpoints (/api/*) - 10+ endpoints +├── meshinfo_utils.py # Shared utilities + helper functions +├── meshinfo_web_backup.py # Backup of original file (reference) +├── templates/ # HTML templates +├── static/ # Static files +└── config.ini # Configuration file +``` + +### **7. Benefits Achieved** + +#### **Maintainability** +- Each file has a single responsibility +- Easy to locate and modify specific functionality +- Clear separation of concerns +- **All functions properly organized and documented** + +#### **Scalability** +- Easy to add new API endpoints in `meshinfo_api.py` +- Easy to add new web pages in `meshinfo_web.py` +- Shared utilities available to all modules +- **Enhanced caching strategy** for better performance + +#### **Testability** +- Each module can be tested independently +- Clear dependencies and imports +- No circular import issues +- **Proper error handling** for testing edge cases + +#### **Team Development** +- Multiple developers can work on different modules +- Clear ownership of different parts of the application +- Reduced merge conflicts +- **Comprehensive backup** for reference + +### **8. Technical Improvements** + +#### **Database Query Optimization** +- **Enhanced JOIN queries** for complex data relationships +- **Improved time-based aggregation** with proper SQL functions +- **Optimized caching** to reduce database load +- **Better connection management** with proper cleanup + +#### **API Response Enhancement** +- **Consistent JSON structure** across all endpoints +- **Proper HTTP status codes** for different scenarios +- **Enhanced error messages** for debugging +- **Improved data validation** and sanitization + +#### **Caching Strategy** +- **Blueprint-aware caching** to avoid context issues +- **Proper cache timeouts** based on data volatility +- **Memory-efficient caching** with cleanup mechanisms +- **Fallback strategies** when cache fails + +### **9. Verification** + +All imports work correctly: +```bash +✅ python -c "from meshinfo_web import app; print('Main app imported successfully')" +✅ python -c "from meshinfo_api import api; print('API blueprint imported successfully')" +✅ python -c "from meshinfo_utils import get_meshdata, get_cache_timeout, auth, calculate_node_distance; print('Utilities imported successfully')" +``` + +All endpoints tested: +```bash +✅ /api/metrics - All time ranges working (day, week, month, year, all) +✅ /api/utilization-data - Complex telemetry calculations restored +✅ /api/telemetry/ - Simplified to use MeshData methods +✅ /api/environmental-telemetry/ - Updated method signature +✅ /api/geocode - Reverse geocoding implemented +✅ /api/node-positions - Caching issues fixed +✅ /api/hardware-models - Enhanced with comprehensive statistics +``` + +### **10. Flask Best Practices Followed** + +1. **Blueprint Usage**: Proper separation of route groups +2. **Configuration Management**: Centralized config handling +3. **Error Handling**: Consistent error responses across all endpoints +4. **Documentation**: Clear docstrings and comments +5. **Import Organization**: Clean, logical import structure +6. **Separation of Concerns**: Each module has a specific purpose +7. **Caching Strategy**: Proper cache management and cleanup +8. **Database Optimization**: Efficient queries and connection management + +## 🎉 **Cleanup & Refactor Complete!** + +The codebase is now properly organized with: +- ✅ **No duplicate routes** +- ✅ **No circular imports** +- ✅ **Clear separation of concerns** +- ✅ **Modular, maintainable structure** +- ✅ **Follows Flask best practices** +- ✅ **All API endpoints restored and enhanced** +- ✅ **All page routes properly implemented** +- ✅ **Comprehensive error handling** +- ✅ **Optimized database queries** +- ✅ **Enhanced caching strategy** \ No newline at end of file diff --git a/docs/SETUP.md b/docs/SETUP.md new file mode 100644 index 00000000..41d5849a --- /dev/null +++ b/docs/SETUP.md @@ -0,0 +1,151 @@ +# MeshInfo-Lite Setup Guide + +This guide will help you set up MeshInfo-Lite with proper database privileges for optimal performance. + +## Setup Options + +- **[Docker Compose Setup](SETUP_DOCKER.md)** - Recommended for most users +- **Manual Setup** - For traditional installations (see below) + +## Prerequisites + +- Python 3.7 or higher +- MariaDB/MySQL server +- Root access to the database server + +## Quick Setup + +### 1. Configure Database Settings + +Edit your `config.ini` file and add the database section: + +```ini +[database] +host = localhost +username = meshdata +password = your_secure_password +database = meshdata +root_password = your_root_password +``` + +### 2. Run Database Setup + +Execute the database setup script: + +```bash +python setup_database.py +``` + +This script will: +- ✅ Create the database with proper character encoding +- ✅ Create the application user +- ✅ Grant all necessary privileges +- ✅ Test the connection and privileges +- ✅ Provide detailed feedback + +### 3. Start the Application + +Once setup is complete, start the application: + +```bash +python main.py +``` + +## Manual Setup (Alternative) + +If you prefer to set up the database manually: + +### 1. Connect to MariaDB as Root + +```bash +mysql -u root -p +``` + +### 2. Create Database and User + +```sql +-- Create database +CREATE DATABASE meshdata CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; + +-- Create user +CREATE USER 'meshdata'@'%' IDENTIFIED BY 'your_secure_password'; + +-- Grant privileges +GRANT ALL PRIVILEGES ON meshdata.* TO 'meshdata'@'%'; +GRANT RELOAD ON *.* TO 'meshdata'@'%'; +GRANT PROCESS ON *.* TO 'meshdata'@'%'; + +-- Apply changes +FLUSH PRIVILEGES; +``` + +## Privileges Explained + +### Required Privileges + +- **ALL PRIVILEGES ON meshdata.\*** - Full access to the application database +- **RELOAD ON \*.\*** - Required for query cache operations +- **PROCESS ON \*.\*** - Required for monitoring and debugging + +### Why These Privileges? + +- **RELOAD**: Allows the application to clear the MariaDB query cache for optimal performance +- **PROCESS**: Enables monitoring of database connections and processes +- **ALL PRIVILEGES**: Standard database operations (SELECT, INSERT, UPDATE, DELETE, etc.) + +## Verification + +After setup, you can verify everything is working: + +### 1. Test Database Connection + +```bash +mysql -u meshdata -p meshdata +``` + +### 2. Test Privileges + +```sql +-- Test RELOAD privilege +FLUSH QUERY CACHE; + +-- Test PROCESS privilege +SHOW PROCESSLIST; +``` + +### 3. Check Application + +Visit the debug endpoint to verify privileges: +``` +http://your-server:port/api/debug/database-cache +``` + +## Troubleshooting + +### Common Issues + +1. **"Access denied for user"** + - Check username and password in config.ini + - Verify user exists and has correct privileges + +2. **"Access denied; you need RELOAD privilege"** + - Run the setup script again + - Or manually grant RELOAD privilege + +3. **"Cannot connect to database"** + - Check if MariaDB/MySQL is running + - Verify host and port settings + - Check firewall settings + +### Getting Help + +- Check the logs for detailed error messages +- Use the debug endpoints to diagnose issues +- Review the CACHING.md document for advanced configuration + +## Security Notes + +- Use strong passwords for both root and application users +- Consider using a dedicated database user for the application +- Restrict network access to the database server when possible +- Regularly update MariaDB/MySQL for security patches \ No newline at end of file diff --git a/docs/SETUP_DOCKER.md b/docs/SETUP_DOCKER.md new file mode 100644 index 00000000..fc234235 --- /dev/null +++ b/docs/SETUP_DOCKER.md @@ -0,0 +1,255 @@ +# MeshInfo-Lite Docker Setup Guide + +This guide covers setting up MeshInfo-Lite with Docker Compose, including proper database privileges. + +## Prerequisites + +- Docker and Docker Compose installed +- Git (to clone the repository) + +## Quick Setup + +### 1. Clone and Configure + +```bash +# Clone the repository +git clone +cd meshinfo-lite + +# Copy the sample configuration +cp docker-compose.yml.sample docker-compose.yml +cp config.ini.sample config.ini +``` + +### 2. Configure Database Settings + +Edit your `config.ini` file: + +```ini +[database] +host = mariadb +username = meshdata +password = passw0rd +database = meshdata +root_password = passw0rd +``` + +**Note**: In Docker Compose, the database host is the service name (`mariadb`), not `localhost`. + +### 3. Start Docker Services + +```bash +# Start the services +docker-compose up -d + +# Check that services are running +docker-compose ps +``` + +### 4. Set Up Database Privileges + +**Important**: The Docker Compose setup automatically creates the database, user, and tables, but **does not grant all the privileges needed for optimal performance**. + +**What's Created Automatically:** +- ✅ Database (`meshdata`) +- ✅ User (`meshdata`) +- ✅ All application tables +- ✅ Basic database operations (SELECT, INSERT, UPDATE, DELETE) + +**What's Missing:** +- ❌ RELOAD privilege (needed for query cache operations) +- ❌ PROCESS privilege (needed for monitoring) + +You have two options to complete the setup: + +#### Option A: Python Setup Script (Recommended) + +```bash +# Run the Python setup script +python setup_docker.py +``` + +#### Option B: Shell Script (Alternative) + +```bash +# Run the shell script inside the container +docker-compose exec meshinfo ./docker_setup.sh +``` + +Both scripts will: +- ✅ Wait for the database to become available +- ✅ Grant RELOAD privilege for query cache operations +- ✅ Grant PROCESS privilege for monitoring +- ✅ Test the connection and privileges +- ✅ Provide detailed feedback + +### 5. Verify Setup + +Check that everything is working: + +```bash +# Check application logs +docker-compose logs meshinfo + +# Test the application +curl http://localhost:8001/ +``` + +## What the Setup Script Does + +The `setup_docker.py` script performs these operations: + +1. **Waits for Database**: Ensures MariaDB is fully started and ready +2. **Connects as Root**: Uses root credentials to grant privileges +3. **Grants RELOAD**: Allows query cache clearing operations +4. **Grants PROCESS**: Enables monitoring and debugging +5. **Tests Everything**: Verifies the setup worked correctly + +## Docker Compose Configuration + +The typical `docker-compose.yml` includes: + +```yaml +services: + mariadb: + image: mariadb + environment: + MYSQL_ROOT_PASSWORD: passw0rd + MYSQL_DATABASE: meshdata + MYSQL_USER: meshdata + MYSQL_PASSWORD: passw0rd + volumes: + - ./mysql_data:/var/lib/mysql + - ./custom.cnf:/etc/mysql/conf.d/custom.cnf + + meshinfo: + build: . + depends_on: + - mariadb + volumes: + - ./config.ini:/app/config.ini + ports: + - 8001:8000 +``` + +## Manual Setup (Alternative) + +If you prefer to set up privileges manually: + +### 1. Connect to the MariaDB Container + +```bash +# Connect to the MariaDB container +docker-compose exec mariadb mysql -u root -p +# Password: passw0rd +``` + +### 2. Grant Privileges + +```sql +-- Grant RELOAD privilege for query cache operations +GRANT RELOAD ON *.* TO 'meshdata'@'%'; + +-- Grant PROCESS privilege for monitoring +GRANT PROCESS ON *.* TO 'meshdata'@'%'; + +-- Apply changes +FLUSH PRIVILEGES; +``` + +### 3. Test Privileges + +```sql +-- Test RELOAD privilege +FLUSH QUERY CACHE; + +-- Test PROCESS privilege +SHOW PROCESSLIST; +``` + +## Troubleshooting + +### Common Issues + +1. **"Database not available"** + ```bash + # Check if services are running + docker-compose ps + + # Check MariaDB logs + docker-compose logs mariadb + + # Restart services + docker-compose restart + ``` + +2. **"Access denied for user"** + - Verify the database credentials in `config.ini` + - Check that the MariaDB container is fully started + - Run the setup script again + +3. **"Access denied; you need RELOAD privilege"** + - Run `python setup_docker.py` to grant privileges + - Or manually grant privileges as shown above + +4. **"Cannot connect to database"** + - Check that the host is set to `mariadb` (not `localhost`) + - Verify the Docker network is working + - Check container logs: `docker-compose logs` + +### Debug Commands + +```bash +# Check container status +docker-compose ps + +# View logs +docker-compose logs meshinfo +docker-compose logs mariadb + +# Connect to database container +docker-compose exec mariadb mysql -u meshdata -p meshdata + +# Check application health +curl http://localhost:8001/api/debug/database-cache +``` + +## Production Considerations + +### Security + +- Change default passwords in production +- Use Docker secrets for sensitive data +- Restrict network access to the database container +- Consider using a managed database service + +### Performance + +- Adjust MariaDB configuration in `custom.cnf` +- Monitor memory usage and adjust container limits +- Use persistent volumes for data storage +- Consider database backups + +### Monitoring + +- Set up log aggregation +- Monitor container resource usage +- Use health checks for automated monitoring +- Set up alerts for critical issues + +## Next Steps + +After setup is complete: + +1. **Configure MQTT**: Set up your MQTT broker connection +2. **Import Data**: Import existing node data if available +3. **Customize**: Add your logo and customize the interface +4. **Monitor**: Set up monitoring and alerting +5. **Backup**: Configure regular database backups + +## Getting Help + +- Check the application logs: `docker-compose logs meshinfo` +- Use debug endpoints: `http://localhost:8001/api/debug/` +- Review the main documentation in `CACHING.md` +- Check the general setup guide in `SETUP.md` \ No newline at end of file diff --git a/generate_css.py b/generate_css.py new file mode 100644 index 00000000..764e90bc --- /dev/null +++ b/generate_css.py @@ -0,0 +1,44 @@ +import configparser +import os +import logging +from jinja2 import Template + +CONFIG_PATH = 'config.ini' +TEMPLATE_PATH = 'www/css/meshinfo.css.template' +OUTPUT_PATH = 'www/css/meshinfo.css' + +# Default theme values +DEFAULTS = { + 'header_color': '#9fdef9', + 'table_header_color': '#D7F9FF', + 'table_alternating_row_color': '#f0f0f0', + 'accent_color': '#17a2b8', + 'page_background_color': '#ffffff', + 'table_border_color': '#dee2e6', + 'link_color': '#007bff', + 'link_color_hover': '#0056b3', + 'control_color': '#17a2b8', + 'control_color_hover': '#1396a5', + 'header_link_color': '#555', + 'header_link_active_color': '#000', + 'header_brand_color': '#000', + 'chat_box_background_color': '#f8f9fa', + 'chat_box_border_color': '', +} + +config = configparser.ConfigParser() +config.read(CONFIG_PATH) +theme = dict(DEFAULTS) +theme.update(config['theme'] if 'theme' in config else {}) + +# Generate CSS using Jinja2 template engine +with open(TEMPLATE_PATH) as f: + template_content = f.read() + +template = Template(template_content) +css = template.render(**theme) + +with open(OUTPUT_PATH, 'w') as f: + f.write(css) + +logging.info(f"Generated {OUTPUT_PATH} from {TEMPLATE_PATH} using theme from {CONFIG_PATH}") \ No newline at end of file diff --git a/generate_favicon.py b/generate_favicon.py new file mode 100644 index 00000000..b829066a --- /dev/null +++ b/generate_favicon.py @@ -0,0 +1,107 @@ +import configparser +import os +from cairosvg import svg2png +from PIL import Image +import io + +CONFIG_PATH = 'config.ini' +FAVICON_OUTPUT_PATH = 'www/images/icons/favicon.ico' + +# Default theme values +DEFAULTS = { + 'accent_color': '#17a2b8', +} + +SVG_TEMPLATE = ''' + + Meshtastic Logo + + + + + + + + + + Meshtastic Logomeshtasticmeshtastikмештастикhttps://meshtastic.orgEN +''' + +SIZES = [16, 32, 48, 64] + +def is_dark(hex_color): + hex_color = hex_color.lstrip('#') + r, g, b = tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) + brightness = 0.299 * r + 0.587 * g + 0.114 * b + return brightness < 128 + + +def main(): + config = configparser.ConfigParser() + config.read(CONFIG_PATH) + theme = dict(DEFAULTS) + theme.update(config['theme'] if 'theme' in config else {}) + accent_color = theme['accent_color'] + + # Favicon background override + favicon_background_color = theme.get('favicon_background_color') or accent_color + # Favicon logo (stroke) color override + favicon_logo_color = theme.get('favicon_logo_color') + + if favicon_logo_color: + stroke_color = favicon_logo_color + else: + stroke_color = '#FFFFFF' if is_dark(favicon_background_color) else '#000000' + + svg_content = SVG_TEMPLATE.format(accent_color=favicon_background_color, stroke_color=stroke_color) + png_images = [] + + for size in SIZES: + png_bytes = svg2png(bytestring=svg_content.encode('utf-8'), output_width=size, output_height=size) + img = Image.open(io.BytesIO(png_bytes)).convert('RGBA') + png_images.append(img) + + # Save as multi-resolution .ico + png_images[0].save(FAVICON_OUTPUT_PATH, format='ICO', sizes=[(s, s) for s in SIZES], append_images=png_images[1:]) + print(f"Generated {FAVICON_OUTPUT_PATH} with sizes: {SIZES}") + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/main.py b/main.py index c2955c9b..0f0c14bd 100644 --- a/main.py +++ b/main.py @@ -1,6 +1,3 @@ -import meshinfo_web -import meshinfo_mqtt -from meshdata import MeshData, create_database import threading import logging import colorlog @@ -8,6 +5,8 @@ import time import sys import os +import atexit +import signal def setup_logger(): @@ -45,16 +44,41 @@ def setup_logger(): return logger +logger = setup_logger() + +import meshinfo_web +import meshinfo_mqtt +from meshdata import MeshData, create_database def check_pid(pid): - """ Check For the existence of a unix pid. """ + """Check if the process with the given PID is running and is our process.""" try: + # Check if process exists os.kill(pid, 0) + # Check if it's our process by reading /proc//cmdline + try: + with open(f'/proc/{pid}/cmdline', 'rb') as f: + cmdline = f.read().decode('utf-8', errors='ignore') + return 'python' in cmdline and 'main.py' in cmdline + except (IOError, PermissionError): + return False except OSError: return False - else: - return True +def cleanup_pidfile(): + """Remove the PID file on exit.""" + try: + if os.path.exists(pidfile): + os.remove(pidfile) + logger.info("Cleaned up PID file") + except Exception as e: + logger.error(f"Error cleaning up PID file: {e}") + +def handle_signal(signum, frame): + """Handle termination signals gracefully.""" + logger.info(f"Received signal {signum}, cleaning up...") + cleanup_pidfile() + sys.exit(0) def threadwrap(threadfunc): def wrapper(): @@ -67,34 +91,49 @@ def wrapper(): logger.error('exited normally, bad thread; restarting') return wrapper - pidfile = "meshinfo.pid" pid = None + +# Register signal handlers +signal.signal(signal.SIGTERM, handle_signal) +signal.signal(signal.SIGINT, handle_signal) + +# Register cleanup function +atexit.register(cleanup_pidfile) + try: - fh = open(pidfile, "r") - pid = int(fh.read()) - fh.close() + if os.path.exists(pidfile): + with open(pidfile, "r") as fh: + pid = int(fh.read().strip()) + if check_pid(pid): + logger.info("Process already running with PID %d", pid) + sys.exit(0) + else: + logger.warning("Found stale PID file, removing it") + os.remove(pidfile) except Exception as e: - pass - -if pid and check_pid(pid): - print("already running") - sys.exit(0) + logger.warning(f"Error reading PID file: {e}") -fh = open(pidfile, "w") -fh.write(str(os.getpid())) -fh.close() +# Write our PID +try: + with open(pidfile, "w") as fh: + fh.write(str(os.getpid())) + logger.info("Wrote PID file") +except Exception as e: + logger.error(f"Error writing PID file: {e}") + sys.exit(1) -logger = setup_logger() config_file = "config.ini" if not os.path.isfile(config_file): - print(f"Error: Configuration file '{config_file}' not found!") + logger.error(f"Error: Configuration file '{config_file}' not found!") sys.exit(1) -fh = open("banner", "r") -logger.info(fh.read()) -fh.close() +try: + with open("banner", "r") as fh: + logger.info(fh.read()) +except Exception as e: + logger.warning(f"Error reading banner file: {e}") logger.info("Setting up database") db_connected = False diff --git a/meshdata.py b/meshdata.py index 5ff0cdbb..d938666a 100644 --- a/meshdata.py +++ b/meshdata.py @@ -6,6 +6,13 @@ import utils import logging import re +from timezone_utils import time_ago # Import time_ago from timezone_utils +from meshtastic_support import get_hardware_model_name, get_modem_preset_name # Import functions from meshtastic_support +from database_cache import DatabaseCache # Import DatabaseCache from its own file +import types +from collections import defaultdict, deque +import threading +from types import SimpleNamespace class CustomJSONEncoder(json.JSONEncoder): @@ -20,14 +27,15 @@ def default(self, obj): return list(obj) # Convert set to list # Use default serialization for other types return super().default(obj) - - + class MeshData: def __init__(self): config = configparser.ConfigParser() config.read('config.ini') self.config = config self.db = None + self.db_cache = DatabaseCache(self.config) + self.debug = config.getboolean("server", "debug", fallback=False) self.connect_db() def __del__(self): @@ -54,7 +62,10 @@ def unknown(self, id): "decoded": { "json_payload": { "long_name": long_name, - "short_name": short_name + "short_name": short_name, + "role": 0, # Default to Client role + "firmware_version": None, # Will be updated when real nodeinfo arrives + "hw_model": None # Will be updated when real nodeinfo arrives } } } @@ -65,23 +76,77 @@ def connect_db(self): for attempt in range(max_retries): try: + # Ensure any existing connection is closed before creating a new one + if self.db and self.db.is_connected(): + try: + self.db.close() + logging.debug("Closed existing DB connection before reconnecting.") + except mysql.connector.Error as close_err: + logging.warning(f"Error closing existing DB connection: {close_err}") + self.db = mysql.connector.connect( host=self.config["database"]["host"], user=self.config["database"]["username"], password=self.config["database"]["password"], database=self.config["database"]["database"], - charset="utf8mb4" + charset="utf8mb4", + # Add connection timeout (e.g., 10 seconds) + connection_timeout=10 ) - cur = self.db.cursor() - cur.execute("SET NAMES utf8mb4;") - cur.close() - return + if self.db.is_connected(): + cur = self.db.cursor() + cur.execute("SET NAMES utf8mb4;") + cur.close() + logging.info(f"Database connection successful (Attempt {attempt + 1}).") + return + else: + # This case might not be reached if connect throws error, but good practice + raise mysql.connector.Error("Connection attempt returned but not connected.") + except mysql.connector.Error as err: + logging.warning(f"Database connection attempt {attempt + 1}/{max_retries} failed: {err}") if attempt < max_retries - 1: - logging.warning(f"Waiting for database to become ready. Attempt {attempt + 1}/{max_retries}") + logging.info(f"Retrying connection in {retry_delay} seconds...") time.sleep(retry_delay) else: - raise + logging.error("Maximum database connection retries reached. Raising error.") + raise # Re-raise the last error after all retries fail + + def ping_db(self): + """Checks connection and attempts reconnect if needed.""" + if self.db is None: + logging.warning("Database object is None. Attempting reconnect.") + try: + self.connect_db() + return self.db is not None and self.db.is_connected() + except Exception as e: + logging.error(f"Reconnect failed during ping: {e}") + return False + + try: + # Check if connected first, then try ping with reconnect=True + if not self.db.is_connected(): + logging.warning("DB connection reported as not connected. Attempting ping/reconnect.") + # The ping=True argument attempts to reconnect if connection is lost. + self.db.ping(reconnect=True, attempts=3, delay=2) + logging.debug("Database connection verified via ping.") + return True + except mysql.connector.Error as err: + logging.error(f"Database ping/reconnect failed: {err}") + # Attempt a full reconnect as a final measure if ping fails + try: + logging.warning("Ping failed. Attempting full database reconnect...") + self.connect_db() # Use the existing connect method + # Check connection status again after attempting connect_db + if self.db and self.db.is_connected(): + logging.info("Full database reconnect successful.") + return True + else: + logging.error("Full database reconnect attempt failed to establish connection.") + return False + except Exception as e: + logging.error(f"Full database reconnect attempt raised an exception: {e}") + return False def get_telemetry(self, id): telemetry = {} @@ -160,6 +225,40 @@ def get_position(self, id): cur.close() return position + def get_position_at_time(self, node_id, target_timestamp, cur=None): + """Retrieves the position record from positionlog for a node that is closest to, but not after, the target timestamp.""" + position = {} + close_cur = False + if cur is None: + cur = self.db.cursor(dictionary=True) + close_cur = True + try: + target_dt = datetime.datetime.fromtimestamp(target_timestamp) + sql = """SELECT latitude_i, longitude_i, ts_created + FROM positionlog + WHERE id = %s + ORDER BY ABS(TIMESTAMPDIFF(SECOND, ts_created, %s)) ASC + LIMIT 1""" + params = (node_id, target_dt) + cur.execute(sql, params) + row = cur.fetchone() + if row: + position = { + "latitude_i": row["latitude_i"], + "longitude_i": row["longitude_i"], + "position_time": row["ts_created"].timestamp() if isinstance(row["ts_created"], datetime.datetime) else row["ts_created"], + "latitude": row["latitude_i"] / 10000000 if row["latitude_i"] else None, + "longitude": row["longitude_i"] / 10000000 if row["longitude_i"] else None + } + except mysql.connector.Error as err: + logging.error(f"Database error fetching nearest position for {node_id}: {err}") + except Exception as e: + logging.error(f"Error fetching nearest position for {node_id}: {e}") + finally: + if close_cur: + cur.close() + return position + def get_neighbors(self, id): neighbors = [] sql = """SELECT @@ -174,7 +273,7 @@ def get_neighbors(self, id): LEFT OUTER JOIN position p1 ON p1.id = a.id LEFT OUTER JOIN position p2 ON p2.id = a.neighbor_id WHERE a.id = %s -AND a.ts_created < NOW() - INTERVAL 1 DAY +AND a.ts_created >= NOW() - INTERVAL 1 DAY """ params = (id, ) cur = self.db.cursor() @@ -209,42 +308,108 @@ def get_neighbors(self, id): return neighbors def get_traceroutes(self, page=1, per_page=25): - """Get paginated traceroutes with SNR information.""" - # Get total count first + """Get paginated traceroutes with SNR information, grouping all attempts (request and reply) together.""" + page = max(1, page) # Ensure page is at least 1 cur = self.db.cursor() - cur.execute("SELECT COUNT(*) FROM traceroute") + cur.execute("SELECT COUNT(DISTINCT request_id) FROM traceroute") total = cur.fetchone()[0] - # Get paginated results with all fields - sql = """SELECT traceroute_id, from_id, to_id, route, route_back, - snr_towards, snr_back, success, channel, hop_limit, ts_created - FROM traceroute - ORDER BY ts_created DESC - LIMIT %s OFFSET %s""" + # Get paginated request_ids + cur.execute(""" + SELECT request_id + FROM traceroute + GROUP BY request_id + ORDER BY MAX(ts_created) DESC + LIMIT %s OFFSET %s + """, (per_page, (page - 1) * per_page)) + request_ids = [row[0] for row in cur.fetchall()] - offset = (page - 1) * per_page - cur.execute(sql, (per_page, offset)) + # Fetch all traceroute rows for these request_ids + if not request_ids: + return { + "items": [], + "page": page, + "per_page": per_page, + "total": total, + "pages": (total + per_page - 1) // per_page, + "has_prev": page > 1, + "has_next": page * per_page < total, + "prev_num": page - 1, + "next_num": page + 1 + } + format_strings = ','.join(['%s'] * len(request_ids)) + sql = f""" + SELECT + t.request_id, + t.from_id, + t.to_id, + t.route, + t.route_back, + t.snr_towards, + t.snr_back, + t.success, + t.channel, + t.hop_limit, + t.ts_created, + t.is_reply, + t.error_reason, + t.attempt_number, + t.traceroute_id + FROM traceroute t + WHERE t.request_id IN ({format_strings}) + ORDER BY t.request_id DESC, t.ts_created ASC + """ + cur.execute(sql, tuple(request_ids)) rows = cur.fetchall() - traceroutes = [] + # Group by request_id + from collections import defaultdict + grouped = defaultdict(list) for row in rows: - traceroutes.append({ - "id": row[0], - "from_id": row[1], - "to_id": row[2], - "route": [int(a) for a in row[3].split(";")] if row[3] else [], - "route_back": [int(a) for a in row[4].split(";")] if row[4] else [], - "snr_towards": [float(s) for s in row[5].split(";")] if row[5] else [], - "snr_back": [float(s) for s in row[6].split(";")] if row[6] else [], + route = [int(a) for a in row[3].split(";")] if row[3] else [] + route_back = [int(a) for a in row[4].split(";")] if row[4] else [] + snr_towards = [float(s)/4.0 for s in row[5].split(";")] if row[5] else [] + snr_back = [float(s)/4.0 for s in row[6].split(";")] if row[6] else [] + from_id = row[1] + to_id = row[2] + # For zero-hop, do NOT set route or route_back to endpoints; leave as empty lists + grouped[row[0]].append({ + "id": row[14], # Use traceroute_id as the unique id + "from_id": from_id, + "to_id": to_id, + "route": route, + "route_back": route_back, + "snr_towards": snr_towards, + "snr_back": snr_back, "success": row[7], "channel": row[8], "hop_limit": row[9], - "ts_created": row[10].timestamp() + "ts_created": row[10].timestamp(), + "is_reply": row[11], + "error_reason": row[12], + "attempt_number": row[13], + "traceroute_id": row[14] }) + # Prepare items as a list of dicts, each with all attempts for a request_id + items = [] + for req_id in request_ids: + attempts = grouped[req_id] + # Group status logic: prefer success, then error, then incomplete + summary = dict(attempts[0]) + summary['attempts'] = attempts + summary['success'] = any(a['success'] for a in attempts) + summary['error_reason'] = next((a['error_reason'] for a in attempts if a['error_reason']), None) + # If any attempt is successful, set status to success + if summary['success']: + summary['status'] = 'success' + elif summary['error_reason']: + summary['status'] = 'error' + else: + summary['status'] = 'incomplete' + items.append(summary) cur.close() - return { - "items": traceroutes, + "items": items, "page": page, "per_page": per_page, "total": total, @@ -268,56 +433,276 @@ def iter_pages(current_page, total_pages, left_edge=2, left_current=2, right_cur last = num def get_nodes(self, active=False): + """ + Retrieve all nodes from the database, including their latest telemetry, position, and channel data. + + This method uses a single optimized SQL query with Common Table Expressions (CTEs) to join the latest telemetry, + position, and channel information for each node. To avoid column name collisions (especially for the 'id' field), + all joined tables alias their 'id' columns (e.g., 'telemetry_id', 'position_id', 'channel_id') and only the + primary node ID from 'nodeinfo' is selected as 'id'. + + This explicit column selection and aliasing is critical: if the joined tables' 'id' columns were not aliased or + omitted from the SELECT list, they could overwrite the real node ID in the result set with NULL for nodes that + lack telemetry or position data. This would cause many nodes to be skipped in the final output, leading to an + incorrect and reduced node count. By selecting only 'n.id' as 'id', we ensure all nodes from 'nodeinfo' are + included, regardless of whether they have telemetry, position, or channel data. + + Args: + active (bool): Unused, present for compatibility. + Returns: + dict: A dictionary of nodes keyed by their hex ID, with all relevant data included. + """ nodes = {} active_threshold = int( self.config["server"]["node_activity_prune_threshold"] ) - # Modified to include all nodes but still mark active status - all_sql = """SELECT n.*, u.username owner_username, - CASE WHEN n.ts_seen > FROM_UNIXTIME(%s) THEN 1 ELSE 0 END as is_active - FROM nodeinfo n - LEFT OUTER JOIN meshuser u ON n.owner = u.email""" - cur = self.db.cursor() + # Combined query to get all node data in one go + sql = """ + WITH latest_telemetry AS ( + SELECT id as telemetry_id, + air_util_tx, + battery_level, + channel_utilization, + uptime_seconds, + voltage, + temperature, + relative_humidity, + barometric_pressure, + gas_resistance, + current, + telemetry_time, + channel as telemetry_channel, + ROW_NUMBER() OVER (PARTITION BY id ORDER BY telemetry_time DESC) as rn + FROM telemetry + WHERE battery_level IS NOT NULL + ), + latest_position AS ( + SELECT id as position_id, + altitude, + ground_speed, + ground_track, + latitude_i, + longitude_i, + location_source, + precision_bits, + position_time, + geocoded, + ROW_NUMBER() OVER (PARTITION BY id ORDER BY position_time DESC) as rn + FROM position + ), + latest_channel AS ( + SELECT id as channel_id, channel, ts_created + FROM ( + SELECT id, channel, ts_created, + ROW_NUMBER() OVER (PARTITION BY id ORDER BY ts_created DESC) as rn + FROM ( + SELECT id, channel, ts_created + FROM telemetry + WHERE channel IS NOT NULL + UNION ALL + SELECT from_id as id, channel, ts_created + FROM text + WHERE channel IS NOT NULL + ) combined + WHERE id IS NOT NULL + ) ranked + WHERE rn = 1 + ) + SELECT + n.id, + n.long_name, + n.short_name, + n.hw_model, + n.role, + n.firmware_version, + n.has_default_channel, + n.num_online_local_nodes, + n.region, + n.modem_preset, + n.owner, + n.updated_via, + n.ts_seen, + n.ts_created, + n.ts_updated, + u.username as owner_username, + CASE WHEN n.ts_seen > FROM_UNIXTIME(%s) THEN 1 ELSE 0 END as is_active, + UNIX_TIMESTAMP(n.ts_uplink) as ts_uplink, + -- Telemetry fields + t.air_util_tx, + t.battery_level, + t.channel_utilization, + t.uptime_seconds, + t.voltage, + t.temperature, + t.relative_humidity, + t.barometric_pressure, + t.gas_resistance, + t.current, + t.telemetry_time, + t.telemetry_channel, + -- Position fields + p.altitude, + p.ground_speed, + p.ground_track, + p.latitude_i, + p.longitude_i, + p.location_source, + p.precision_bits, + p.position_time, + p.geocoded, + -- Channel + c.channel + FROM nodeinfo n + LEFT OUTER JOIN meshuser u ON n.owner = u.email + LEFT OUTER JOIN latest_telemetry t ON n.id = t.telemetry_id AND t.rn = 1 + LEFT OUTER JOIN latest_position p ON n.id = p.position_id AND p.rn = 1 + LEFT OUTER JOIN latest_channel c ON n.id = c.channel_id + WHERE n.id IS NOT NULL + """ + + # Use database-level caching for the main query timeout = time.time() - active_threshold - params = (timeout, ) - cur.execute(all_sql, params) - rows = cur.fetchall() - column_names = [desc[0] for desc in cur.description] + rows = self.db_cache.execute_cached_query(sql, (timeout,), cache_key="nodes_main", timeout=60) + # print("Fetched rows:", len(rows)) + skipped = 0 for row in rows: + if not row or row.get('id') is None: + skipped += 1 + continue + if not row or row.get('id') is None: + continue # Skip rows with no ID + record = {} - for i in range(len(row)): - if isinstance(row[i], datetime.datetime): - record[column_names[i]] = row[i].timestamp() + # Convert datetime fields to timestamps + for key, value in row.items(): + if isinstance(value, datetime.datetime): + record[key] = value.timestamp() else: - record[column_names[i]] = row[i] + record[key] = value - # Use the is_active field from the query - is_active = bool(record.get("is_active", 0)) - record["telemetry"] = self.get_telemetry(row[0]) - record["neighbors"] = self.get_neighbors(row[0]) - record["position"] = self.get_position(row[0]) - if record["position"]: - if record["position"]["latitude_i"]: - record["position"]["latitude"] = \ - record["position"]["latitude_i"] / 10000000 - else: - record["position"]["latitude"] = None - if record["position"]["longitude_i"]: - record["position"]["longitude"] = \ - record["position"]["longitude_i"] / 10000000 - else: - record["position"]["longitude"] = None - record["role"] = record["role"] or 0 - record["active"] = is_active - record["last_seen"] = utils.time_since(record["ts_seen"]) - node_id = utils.convert_node_id_from_int_to_hex(row[0]) - nodes[node_id] = record + # Process telemetry data + telemetry = {} # Use plain dict instead of SimpleNamespace + telemetry_fields = [ + 'air_util_tx', 'battery_level', 'channel_utilization', + 'uptime_seconds', 'voltage', 'temperature', 'relative_humidity', + 'barometric_pressure', 'gas_resistance', 'current', + 'telemetry_time', 'telemetry_channel' + ] + # Initialize all telemetry fields to None first + for field in telemetry_fields: + telemetry[field] = None + # Then set values from row if they exist + for field in telemetry_fields: + if field in row and row[field] is not None: + telemetry[field] = row[field] + record['telemetry'] = telemetry + + # Process position data + position = {} # Use plain dict instead of SimpleNamespace + position_fields = [ + 'altitude', 'ground_speed', 'ground_track', 'latitude_i', + 'longitude_i', 'location_source', 'precision_bits', + 'position_time', 'geocoded' + ] + # Initialize all position fields to None first + for field in position_fields: + position[field] = None + # Then set values from row if they exist + for field in position_fields: + if field in row and row[field] is not None: + position[field] = row[field] + + # Always set latitude and longitude attributes + position['latitude'] = position['latitude_i'] / 10000000 if position['latitude_i'] else None + position['longitude'] = position['longitude_i'] / 10000000 if position['longitude_i'] else None + record['position'] = position + + # Get neighbors data + # record['neighbors'] = self.get_neighbors(row['id']) + record['neighbors'] = [] # This will be populated later + + # Set other fields + record['role'] = record.get('role', 0) + record['active'] = bool(record.get('is_active', 0)) + record['last_seen'] = utils.time_since(record['ts_seen']) + record['channel'] = row.get('channel') + + try: + # Convert node ID to hex string and ensure it's properly formatted + node_id_hex = utils.convert_node_id_from_int_to_hex(row['id']) + if node_id_hex: # Only add if conversion was successful + nodes[node_id_hex] = record + except (TypeError, ValueError) as e: + logging.error(f"Error converting node ID {row['id']} to hex: {e}") + continue - cur.close() + # print("Skipped rows due to missing id:", skipped) + # print("Final nodes count:", len(nodes)) + + # --- Bulk fetch neighbors to avoid N+1 queries --- + all_node_ids = [n['id'] for n in nodes.values()] + if all_node_ids: + neighbor_sql = """ + SELECT id, neighbor_id, snr + FROM neighborinfo + WHERE id IN (%s) + """ % ','.join(['%s'] * len(all_node_ids)) + + all_neighbors = self.db_cache.execute_cached_query(neighbor_sql, all_node_ids, cache_key="nodes_neighbors", timeout=60) + + # Create a dictionary to map nodes to their neighbors + neighbors_map = {node_id: [] for node_id in all_node_ids} + for neighbor_row in all_neighbors: + neighbors_map[neighbor_row['id']].append({ + 'neighbor_id': neighbor_row['neighbor_id'], + 'snr': neighbor_row['snr'], + 'distance': None # Distance calculation is complex, defer if not essential here + }) + + # Assign neighbors to each node + for node_hex, node_data in nodes.items(): + if node_data['id'] in neighbors_map: + node_data['neighbors'] = neighbors_map[node_data['id']] + return nodes + def get_nodes_cached(self, active=False): + """Get nodes with application-level caching to prevent duplicates.""" + cache_key = f"nodes_cache_{active}" + + # Check if we have a cached version + if hasattr(self, '_nodes_cache') and cache_key in self._nodes_cache: + cached_data = self._nodes_cache[cache_key] + if time.time() - cached_data['timestamp'] < 60: # 60 second cache + logging.debug(f"Returning cached nodes data for {cache_key}") + return cached_data['data'] + + # Fetch fresh data + logging.info("Fetching fresh nodes data from database") + nodes_data = self.get_nodes(active) + + # Cache the result + if not hasattr(self, '_nodes_cache'): + self._nodes_cache = {} + + self._nodes_cache[cache_key] = { + 'data': nodes_data, + 'timestamp': time.time() + } + + return nodes_data + + def clear_nodes_cache(self): + """Clear the application-level nodes cache.""" + if hasattr(self, '_nodes_cache'): + cache_size = len(self._nodes_cache) + self._nodes_cache.clear() + logging.info(f"Cleared nodes cache with {cache_size} entries") + else: + logging.info("No nodes cache to clear") + def get_chat(self, page=1, per_page=50): """Get paginated chat messages with reception data.""" # Get total count first @@ -334,7 +719,7 @@ def get_chat(self, page=1, per_page=50): ) AS reception_data FROM text t LEFT JOIN message_reception r ON t.message_id = r.message_id - GROUP BY t.message_id, t.from_id, t.to_id, t.text, t.ts_created + GROUP BY t.message_id, t.from_id, t.to_id, t.text, t.ts_created, t.channel ORDER BY t.ts_created DESC LIMIT %s OFFSET %s """ @@ -583,16 +968,45 @@ def store_node(self, data): if not data: return payload = dict(data["decoded"]["json_payload"]) - expected = [ - "hw_model", - "long_name", - "short_name", - "role", - "firmware_version" - ] - for attr in expected: - if attr not in payload: - payload[attr] = None + + # Determine packet type based on available fields + is_mapreport = "firmware_version" in payload + is_nodeinfo = "role" in payload and "firmware_version" not in payload + + if self.debug: + logging.info(f"store_node: Processing node {data['from']} - is_mapreport={is_mapreport}, is_nodeinfo={is_nodeinfo}") + + # Set up fields based on packet type + if is_mapreport: + # Mapreport: update firmware_version, hw_model, role, and mapreport-specific fields + expected = ["hw_model", "long_name", "short_name", "firmware_version", "has_default_channel", "num_online_local_nodes", "region", "modem_preset", "role"] + for attr in expected: + if attr not in payload: + payload[attr] = None + elif is_nodeinfo: + # Nodeinfo: update role and hw_model, preserve existing firmware_version and mapreport fields + expected = ["hw_model", "long_name", "short_name", "role"] + for attr in expected: + if attr not in payload: + payload[attr] = None + # Don't touch firmware_version and mapreport fields for nodeinfo + payload["firmware_version"] = None + payload["has_default_channel"] = None + payload["num_online_local_nodes"] = None + payload["region"] = None + payload["modem_preset"] = None + else: + # Fallback: try to update all fields + expected = ["hw_model", "long_name", "short_name", "role", "firmware_version", "has_default_channel", "num_online_local_nodes", "region", "modem_preset"] + for attr in expected: + if attr not in payload: + payload[attr] = None + + # Add logging for debugging + if self.debug: + logging.info(f"store_node: Processing node {data['from']} with role={payload.get('role')}, hw_model={payload.get('hw_model')}, firmware_version={payload.get('firmware_version')}") + logging.info(f"store_node: Full json_payload: {data['decoded']['json_payload']}") + logging.info(f"store_node: Available fields in payload: {list(payload.keys())}") sql = """INSERT INTO nodeinfo ( id, @@ -601,15 +1015,41 @@ def store_node(self, data): hw_model, role, firmware_version, + has_default_channel, + num_online_local_nodes, + region, + modem_preset, ts_updated ) -VALUES (%s, %s, %s, %s, %s, %s, NOW()) +VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW()) ON DUPLICATE KEY UPDATE long_name = VALUES(long_name), short_name = VALUES(short_name), hw_model = COALESCE(VALUES(hw_model), hw_model), -role = COALESCE(VALUES(role), role), -firmware_version = COALESCE(VALUES(firmware_version), firmware_version), +role = CASE + WHEN VALUES(role) IS NOT NULL THEN VALUES(role) + ELSE role +END, +firmware_version = CASE + WHEN VALUES(firmware_version) IS NOT NULL THEN VALUES(firmware_version) + ELSE firmware_version +END, +has_default_channel = CASE + WHEN VALUES(has_default_channel) IS NOT NULL THEN VALUES(has_default_channel) + ELSE has_default_channel +END, +num_online_local_nodes = CASE + WHEN VALUES(num_online_local_nodes) IS NOT NULL THEN VALUES(num_online_local_nodes) + ELSE num_online_local_nodes +END, +region = CASE + WHEN VALUES(region) IS NOT NULL THEN VALUES(region) + ELSE region +END, +modem_preset = CASE + WHEN VALUES(modem_preset) IS NOT NULL THEN VALUES(modem_preset) + ELSE modem_preset +END, ts_updated = VALUES(ts_updated)""" values = ( data["from"], @@ -617,11 +1057,26 @@ def store_node(self, data): payload["short_name"], payload["hw_model"], payload["role"], - payload["firmware_version"] + payload["firmware_version"], + payload["has_default_channel"], + payload["num_online_local_nodes"], + payload["region"], + payload["modem_preset"] ) - cur = self.db.cursor() - cur.execute(sql, values) - self.db.commit() + + try: + cur = self.db.cursor() + cur.execute(sql, values) + rows_affected = cur.rowcount + if self.debug: + logging.info(f"store_node: SQL executed successfully, rows affected: {rows_affected}") + self.db.commit() + if self.debug: + logging.info(f"store_node: Transaction committed successfully") + except Exception as e: + logging.error(f"store_node: Database error for node {data['from']}: {e}") + self.db.rollback() + raise def store_position(self, data, source="position"): payload = dict(data["decoded"]["json_payload"]) @@ -714,50 +1169,91 @@ def store_neighborinfo(self, data): self.db.commit() def store_traceroute(self, data): + import logging from_id = self.verify_node(data["from"]) to_id = self.verify_node(data["to"]) payload = dict(data["decoded"]["json_payload"]) - - # Process forward route and SNR + + # Always use the payload's request_id if present, else fallback to message id + payload_request_id = data["decoded"].get("request_id") + message_id = data.get("id") + request_id = payload_request_id or message_id + + # Determine if this is a reply and get error reason + is_reply = False + error_reason = None + + # Check if this is a routing message with error + if "error_reason" in payload: + error_reason = payload["error_reason"] + + # Check if this is a route reply (legacy Meshtastic format) + if "route_reply" in payload: + is_reply = True + payload = payload["route_reply"] + elif "route_request" in payload: + payload = payload["route_request"] + + # Robust reply detection: if payload_request_id is present and does not match message_id, treat as reply + if payload_request_id and message_id and str(payload_request_id) != str(message_id): + is_reply = True + + # Insert the traceroute row route = None snr_towards = None if "route" in payload: route = ";".join(str(r) for r in payload["route"]) if "snr_towards" in payload: - snr_towards = ";".join(str(s) for s in payload["snr_towards"]) - - # Process return route and SNR + snr_towards = ";".join(str(float(s)) for s in payload["snr_towards"]) route_back = None snr_back = None if "route_back" in payload: route_back = ";".join(str(r) for r in payload["route_back"]) if "snr_back" in payload: - snr_back = ";".join(str(s) for s in payload["snr_back"]) - - # A traceroute is successful if we have either: - # 1. A direct connection with SNR data in both directions - # 2. A multi-hop route with SNR data in both directions - is_direct = not bool(route and route_back) # True if no hops in either direction - - success = False - if is_direct: - # For direct connections, we just need SNR data in both directions - success = bool(snr_towards and snr_back) + snr_back = ";".join(str(float(s)) for s in payload["snr_back"]) + + # Parse as lists for logic + route_list = payload.get("route", []) + route_back_list = payload.get("route_back", []) + snr_towards_list = payload.get("snr_towards", []) + snr_back_list = payload.get("snr_back", []) + + # Only mark as success if: + # For zero-hop direct connection: + if not route_list and not route_back_list: + # Must have SNR values in either direction + success = bool(snr_towards_list or snr_back_list) + # For multi-hop: + elif route_list and route_back_list: + # Must have SNR values in both directions + success = bool(snr_towards_list and snr_back_list) else: - # For multi-hop routes, we need both routes and their SNR data - success = bool(route and route_back and snr_towards and snr_back) + success = False + + # Now join for DB storage + route = None + snr_towards = None + if "route" in payload: + route = ";".join(str(r) for r in payload["route"]) + if "snr_towards" in payload: + snr_towards = ";".join(str(float(s)) for s in payload["snr_towards"]) + route_back = None + snr_back = None + if "route_back" in payload: + route_back = ";".join(str(r) for r in payload["route_back"]) + if "snr_back" in payload: + snr_back = ";".join(str(float(s)) for s in payload["snr_back"]) + + # --- FIX: Define attempt_number before use --- + attempt_number = data.get("attempt_number") or payload.get("attempt_number") - # Extract additional metadata channel = data.get("channel", None) hop_limit = data.get("hop_limit", None) - request_id = data.get("id", None) traceroute_time = payload.get("time", None) - sql = """INSERT INTO traceroute (from_id, to_id, channel, hop_limit, success, request_id, route, route_back, - snr_towards, snr_back, time, ts_created) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, FROM_UNIXTIME(%s), NOW())""" - + snr_towards, snr_back, time, ts_created, is_reply, error_reason, attempt_number) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, FROM_UNIXTIME(%s), NOW(), %s, %s, %s)""" params = ( from_id, to_id, @@ -769,11 +1265,38 @@ def store_traceroute(self, data): route_back, snr_towards, snr_back, - traceroute_time + traceroute_time, + is_reply, + error_reason, + attempt_number ) self.db.cursor().execute(sql, params) self.db.commit() + # --- Robust consolidation logic: ensure all related rows use the same request_id --- + cur = self.db.cursor() + ids_to_check = set() + if message_id: + ids_to_check.add(message_id) + if payload_request_id: + ids_to_check.add(payload_request_id) + if request_id: + ids_to_check.add(request_id) + # Find all request_ids for these ids (as traceroute_id or request_id) + all_request_ids = set() + for idval in ids_to_check: + if idval: + cur.execute("SELECT request_id FROM traceroute WHERE traceroute_id = %s OR request_id = %s", (idval, idval)) + all_request_ids.update([row[0] for row in cur.fetchall() if row[0]]) + if all_request_ids: + canonical_request_id = min(all_request_ids) + cur.execute(f"UPDATE traceroute SET request_id = %s WHERE request_id IN ({','.join(['%s']*len(all_request_ids))})", (canonical_request_id, *all_request_ids)) + self.db.commit() + logging.info(f"Consolidated traceroute rows: set request_id={canonical_request_id} for ids {all_request_ids}") + else: + logging.warning(f"No related traceroute rows found for ids: {ids_to_check}") + cur.close() + def get_successful_traceroutes(self): sql = """ SELECT @@ -782,7 +1305,7 @@ def get_successful_traceroutes(self): t.to_id, t.route, t.route_back, - t.snr, + t.snr_towards, t.snr_back, t.ts_created, n1.short_name as from_name, @@ -790,8 +1313,7 @@ def get_successful_traceroutes(self): FROM traceroute t JOIN nodeinfo n1 ON t.from_id = n1.id JOIN nodeinfo n2 ON t.to_id = n2.id - WHERE t.route_back IS NOT NULL - AND t.route_back != '' + WHERE t.success = TRUE ORDER BY t.ts_created DESC """ cur = self.db.cursor() @@ -804,24 +1326,128 @@ def get_successful_traceroutes(self): # Convert timestamp to Unix timestamp if needed if isinstance(result['ts_created'], datetime.datetime): result['ts_created'] = result['ts_created'].timestamp() + # Parse route and SNR data + if result['route']: + result['route'] = [int(a) for a in result['route'].split(";")] + if result['route_back']: + result['route_back'] = [int(a) for a in result['route_back'].split(";")] + if result['snr_towards']: + result['snr_towards'] = [float(s) for s in result['snr_towards'].split(";")] + if result['snr_back']: + result['snr_back'] = [float(s) for s in result['snr_back'].split(";")] results.append(result) cur.close() return results def store_telemetry(self, data): - cur = self.db.cursor() - cur.execute(f"SELECT COUNT(*) FROM telemetry") - count = cur.fetchone()[0] - if count >= 20000: - cur.execute(f"""DELETE FROM telemetry -ORDER BY ts_created ASC LIMIT 1""") - cur.close() - self.db.commit() + # Use class-level config that's already loaded + retention_days = self.config.getint("server", "telemetry_retention_days", fallback=None) if self.config else None + + # Use a simple counter to reduce cleanup frequency while still honoring retention + if not hasattr(self, '_telemetry_insert_count'): + self._telemetry_insert_count = 0 + self._telemetry_insert_count += 1 + + # Check for cleanup - run retention policy every 50 inserts or if retention_days is set + should_cleanup = (retention_days is not None) or (self._telemetry_insert_count % 50 == 0) + + if should_cleanup: + cur = self.db.cursor() + if retention_days: + # Always honor retention_days policy + cur.execute("""DELETE FROM telemetry + WHERE ts_created < DATE_SUB(NOW(), INTERVAL %s DAY)""", + (retention_days,)) + else: + # Fallback to count-based cleanup only if no retention_days set + cur.execute("SELECT COUNT(*) FROM telemetry WHERE ts_created > DATE_SUB(NOW(), INTERVAL 1 HOUR)") + recent_count = cur.fetchone()[0] + if recent_count > 200: # Only run count-based cleanup if we have enough recent activity + cur.execute("""DELETE FROM telemetry WHERE ts_created <= + (SELECT ts_created FROM + (SELECT ts_created FROM telemetry ORDER BY ts_created DESC LIMIT 1 OFFSET 20000) t)""") + cur.close() + self.db.commit() node_id = self.verify_node(data["from"]) payload = dict(data["decoded"]["json_payload"]) + # Extract channel from the root of the telemetry data + channel = data.get("channel") + # Extract packet_id from the incoming data (should be present as 'id' in the root of the message) + packet_id = data.get("id") + if packet_id is None: + # Fallback: try to get from payload if not present at root + packet_id = payload.get("id") + if packet_id is None: + # If still not found, skip storing (cannot deduplicate) + import logging + logging.warning("Telemetry packet missing unique packet_id, skipping store.") + return + + def validate_telemetry_value(value, field_name=None): + """Validate telemetry values, converting invalid values to None.""" + if value is None: + return None + try: + # Convert to float and check if it's a valid number + float_val = float(value) + if float_val == float('inf') or float_val == float('-inf') or str(float_val).lower() == 'nan': + return None + + # Apply field-specific validation + if field_name: + if field_name == 'battery_level': + # Battery level should be positive and an integer + # Allow values > 100% due to calibration issues + if float_val < 0 or not float_val.is_integer(): + return None + return int(float_val) # Convert to int since DB expects INT + elif field_name == 'air_util_tx' or field_name == 'channel_utilization': + # Utilization values should be positive + # Allow > 100% due to network conditions + if float_val < 0: + return None + elif field_name == 'uptime_seconds': + # Uptime should be positive + if float_val < 0: + return None + elif field_name == 'voltage': + # Voltage should be positive and reasonable + # Allow up to 100V to accommodate various power systems + if float_val <= 0 or float_val > 100: + return None + elif field_name == 'temperature': + # Temperature should be within reasonable sensor range + # Allow -100°C to 150°C to accommodate various sensors and conditions + if not -100 <= float_val <= 150: + return None + elif field_name == 'relative_humidity': + # Humidity should be reasonable + # Allow slight overshoot due to calibration + if not -5 <= float_val <= 105: + return None + elif field_name == 'barometric_pressure': + # Pressure should be positive and reasonable + # Allow wider range for different altitudes and conditions + if float_val < 0 or float_val > 2000: + return None + elif field_name == 'gas_resistance': + # Gas resistance should be positive + # Allow up to 1e308 (near DOUBLE_MAX) for various sensors + if float_val <= 0 or float_val > 1e308: + return None + elif field_name == 'current': + # Current should be reasonable + # Allow wider range for various power monitoring setups + if not -50 <= float_val <= 50: + return None + + return float_val + except (ValueError, TypeError): + return None + data = { "air_util_tx": None, "battery_level": None, @@ -832,7 +1458,8 @@ def store_telemetry(self, data): "relative_humidity": None, "barometric_pressure": None, "gas_resistance": None, - "current": None + "current": None, + "channel": channel } metrics = [ @@ -844,16 +1471,30 @@ def store_telemetry(self, data): continue for key in data: if key in payload[metric]: - data[key] = payload[metric][key] + data[key] = validate_telemetry_value(payload[metric][key], key) sql = """INSERT INTO telemetry -(id, air_util_tx, battery_level, channel_utilization, +(id, packet_id, air_util_tx, battery_level, channel_utilization, uptime_seconds, voltage, temperature, relative_humidity, -barometric_pressure, gas_resistance, current, telemetry_time) -VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, FROM_UNIXTIME(%s)) +barometric_pressure, gas_resistance, current, telemetry_time, channel) +VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, FROM_UNIXTIME(%s), %s) +ON DUPLICATE KEY UPDATE + air_util_tx = VALUES(air_util_tx), + battery_level = VALUES(battery_level), + channel_utilization = VALUES(channel_utilization), + uptime_seconds = VALUES(uptime_seconds), + voltage = VALUES(voltage), + temperature = VALUES(temperature), + relative_humidity = VALUES(relative_humidity), + barometric_pressure = VALUES(barometric_pressure), + gas_resistance = VALUES(gas_resistance), + current = VALUES(current), + telemetry_time = VALUES(telemetry_time), + channel = VALUES(channel) """ params = ( node_id, + packet_id, data["air_util_tx"], data["battery_level"], data["channel_utilization"], @@ -864,7 +1505,8 @@ def store_telemetry(self, data): data["barometric_pressure"], data["gas_resistance"], data["current"], - payload["time"] + payload["time"], + data["channel"] ) self.db.cursor().execute(sql, params) self.db.commit() @@ -955,7 +1597,12 @@ def verify_node(self, id, via=None): self.store_node(self.unknown(id)) else: if via: - sql = """UPDATE nodeinfo SET + # Only set ts_uplink if this node is reporting directly via MQTT (via is None or same as id) + if via == id: + sql = """UPDATE nodeinfo SET +ts_seen = NOW(), updated_via = %s, ts_uplink = NOW() WHERE id = %s""" + else: + sql = """UPDATE nodeinfo SET ts_seen = NOW(), updated_via = %s WHERE id = %s""" param = (via, id) else: @@ -970,7 +1617,13 @@ def log_data(self, topic, data): cur = self.db.cursor() cur.execute(f"SELECT COUNT(*) FROM meshlog") count = cur.fetchone()[0] - if count >= 1000: + + # Get configurable retention count, default to 1000 if not set + retention_count = self.config.getint("server", "log_retention_count", fallback=1000) + + if count >= retention_count: + if self.debug: + logging.debug(f"Log retention limit reached ({count} >= {retention_count}), removing oldest log entry") cur.execute(f"DELETE FROM meshlog ORDER BY ts_created ASC LIMIT 1") self.db.commit() @@ -1015,6 +1668,9 @@ def log_position(self, id, lat, lon, source): logging.info(f"Position updated for {id}") def store(self, data, topic): + if not self.ping_db(): + logging.error("Database connection is not active. Skipping store operation.") + return # Stop processing if DB is unavailable if not data: return self.log_data(topic, data) @@ -1030,6 +1686,12 @@ def store(self, data, topic): rx_snr = data.get("rx_snr") rx_rssi = data.get("rx_rssi") + # Process relay_node if present (firmware 2.5.x+) + relay_node = None + if "relay_node" in data and data["relay_node"] is not None: + # Convert relay_node to hex format (last 2 bytes of node ID) + relay_node = f"{data['relay_node']:04x}" # Convert to 4-char hex string (last 2 bytes) + # Store reception information if this is a received message with SNR/RSSI data if message_id and rx_snr is not None and rx_rssi is not None: received_by = None @@ -1040,11 +1702,16 @@ def store(self, data, topic): if received_by and received_by != data.get("from"): # Don't store reception by sender self.store_reception(message_id, data["from"], received_by, rx_snr, rx_rssi, - data.get("rx_time"), hop_limit, hop_start) + data.get("rx_time"), hop_limit, hop_start, relay_node) # Continue with the regular message type processing tp = data["type"] + if self.debug: + logging.info(f"store: Processing message type '{tp}' from node {data.get('from')}") + if tp == "nodeinfo": + if self.debug: + logging.info(f"store: Calling store_node for nodeinfo message from {data.get('from')}") self.store_node(data) elif tp == "position": self.store_position(data) @@ -1058,18 +1725,33 @@ def store(self, data, topic): self.store_telemetry(data) elif tp == "text": self.store_text(data, topic) # Only one text handler, with topic parameter + elif tp == "routing": + self.store_routing(data, topic) + elif tp == "store_forward": + # Store & Forward messages are internal routing messages for delayed message delivery + # We'll log them at debug level but not store them in the database + if self.debug: + logging.debug(f"store: Received Store & Forward message from {data.get('from')} - internal routing message") + elif tp in ["range_test", "simulator", "zps", "powerstress", "reticulum_tunnel"]: + # These are specialized message types that we don't need to store in the database + # but we'll log them at debug level for monitoring + if self.debug: + logging.debug(f"store: Received {tp} message from {data.get('from')} - specialized message type") + else: + logging.warning(f"store: Unknown message type '{tp}' from node {data.get('from')}") - def store_reception(self, message_id, from_id, received_by_id, rx_snr, rx_rssi, rx_time, hop_limit, hop_start): + def store_reception(self, message_id, from_id, received_by_id, rx_snr, rx_rssi, rx_time, hop_limit, hop_start, relay_node=None): """Store reception information for any message type with hop data.""" sql = """INSERT INTO message_reception - (message_id, from_id, received_by_id, rx_snr, rx_rssi, rx_time, hop_limit, hop_start) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s) + (message_id, from_id, received_by_id, rx_snr, rx_rssi, rx_time, hop_limit, hop_start, relay_node) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE rx_snr = VALUES(rx_snr), rx_rssi = VALUES(rx_rssi), rx_time = VALUES(rx_time), hop_limit = VALUES(hop_limit), - hop_start = VALUES(hop_start)""" + hop_start = VALUES(hop_start), + relay_node = VALUES(relay_node)""" params = ( message_id, from_id, @@ -1078,12 +1760,34 @@ def store_reception(self, message_id, from_id, received_by_id, rx_snr, rx_rssi, rx_rssi, rx_time, hop_limit, - hop_start + hop_start, + relay_node ) cur = self.db.cursor() cur.execute(sql, params) - cur.close() self.db.commit() + cur.close() + + # --- Relay Edges Table Update --- + # Only update if relay_node is present and not None + if relay_node: + try: + relay_suffix = relay_node[-2:] # Only last two hex digits + from_hex = format(from_id, '08x') + to_hex = format(received_by_id, '08x') + edge_sql = """ + INSERT INTO relay_edges (from_node, relay_suffix, to_node, first_seen, last_seen, count) + VALUES (%s, %s, %s, NOW(), NOW(), 1) + ON DUPLICATE KEY UPDATE last_seen = NOW(), count = count + 1 + """ + edge_params = (from_hex, relay_suffix, to_hex) + cur = self.db.cursor() + cur.execute(edge_sql, edge_params) + self.db.commit() + cur.close() + except Exception as e: + import logging + logging.error(f"Failed to update relay_edges: {e}") def setup_database(self): creates = [ @@ -1109,8 +1813,13 @@ def setup_database(self): owner VARCHAR(255), updated_via INT UNSIGNED, ts_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + ts_uplink TIMESTAMP DEFAULT NULL, ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP, ts_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + has_default_channel BOOLEAN NULL, + num_online_local_nodes INT UNSIGNED NULL, + region INT UNSIGNED NULL, + modem_preset INT UNSIGNED NULL, FOREIGN KEY (owner) REFERENCES meshuser(email) )""", """CREATE TABLE IF NOT EXISTS position ( @@ -1135,14 +1844,30 @@ def setup_database(self): PRIMARY KEY (id, neighbor_id) )""", """CREATE TABLE IF NOT EXISTS traceroute ( + traceroute_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, + request_id BIGINT, from_id INT UNSIGNED NOT NULL, to_id INT UNSIGNED NOT NULL, + channel TINYINT UNSIGNED, + hop_limit TINYINT UNSIGNED, + success TINYINT(1) DEFAULT 0, + time TIMESTAMP NULL, route VARCHAR(255), - snr VARCHAR(255), - ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP + route_back TEXT, + ts_created TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP, + snr_towards TEXT, + snr_back TEXT, + is_reply TINYINT(1) DEFAULT 0, + error_reason INT, + attempt_number INT, + INDEX idx_traceroute_nodes (from_id, to_id), + INDEX idx_traceroute_channel (channel), + INDEX idx_traceroute_time (ts_created), + INDEX idx_traceroute_request_id (request_id) )""", """CREATE TABLE IF NOT EXISTS telemetry ( id INT UNSIGNED NOT NULL, + packet_id BIGINT NOT NULL, air_util_tx FLOAT(10, 7), battery_level INT, channel_utilization FLOAT(10, 7), @@ -1155,9 +1880,10 @@ def setup_database(self): current FLOAT(10, 7), telemetry_time TIMESTAMP, ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + channel INT, + UNIQUE KEY unique_telemetry (id, packet_id), INDEX idx_telemetry_id (id) -) -""", +)""", """CREATE TABLE IF NOT EXISTS text ( from_id INT UNSIGNED NOT NULL, to_id INT UNSIGNED NOT NULL, @@ -1167,18 +1893,73 @@ def setup_database(self): ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP, INDEX idx_text_message_id (message_id) )""", - """CREATE TABLE IF NOT EXISTS meshlog ( + """CREATE TABLE IF NOT EXISTS meshlog ( topic varchar(255) not null, message text, ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP )""", - """CREATE TABLE IF NOT EXISTS positionlog ( + """CREATE TABLE IF NOT EXISTS positionlog ( + log_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, id INT UNSIGNED NOT NULL, latitude_i INT NOT NULL, longitude_i INT NOT NULL, source VARCHAR(35) NOT NULL, ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (id, ts_created) + INDEX idx_positionlog_id_ts (id, ts_created) +)""", + """CREATE TABLE IF NOT EXISTS routing_messages ( + routing_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, + from_id INT UNSIGNED NOT NULL, + to_id INT UNSIGNED, + message_id BIGINT, + request_id BIGINT, + relay_node VARCHAR(10), + hop_limit TINYINT UNSIGNED, + hop_start TINYINT UNSIGNED, + hops_taken TINYINT UNSIGNED, + error_reason INT, + error_description VARCHAR(50), + is_error BOOLEAN DEFAULT FALSE, + success BOOLEAN DEFAULT FALSE, + channel TINYINT UNSIGNED, + rx_snr FLOAT, + rx_rssi FLOAT, + rx_time BIGINT, + routing_data JSON, + uplink_node INT UNSIGNED, + ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + INDEX idx_routing_from (from_id), + INDEX idx_routing_to (to_id), + INDEX idx_routing_time (ts_created), + INDEX idx_routing_error (is_error), + INDEX idx_routing_relay (relay_node), + INDEX idx_routing_request (request_id), + INDEX idx_routing_uplink (uplink_node) +)""", + """CREATE TABLE IF NOT EXISTS message_reception ( + id INTEGER PRIMARY KEY AUTO_INCREMENT, + message_id BIGINT NOT NULL, + from_id INTEGER UNSIGNED NOT NULL, + received_by_id INTEGER UNSIGNED NOT NULL, + rx_time INTEGER, + rx_snr REAL, + rx_rssi INTEGER, + hop_limit INTEGER DEFAULT NULL, + hop_start INTEGER DEFAULT NULL, + relay_node VARCHAR(4) DEFAULT NULL, + ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + UNIQUE KEY unique_reception (message_id, received_by_id), + INDEX idx_messagereception_message_receiver (message_id, received_by_id), + INDEX idx_message_reception_relay_node (relay_node) +)""", + """CREATE TABLE IF NOT EXISTS relay_edges ( + from_node VARCHAR(8) NOT NULL, + relay_suffix VARCHAR(2) NOT NULL, + to_node VARCHAR(8) NOT NULL, + first_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + last_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + count INT DEFAULT 1, + PRIMARY KEY (from_node, relay_suffix, to_node) )""" ] cur = self.db.cursor() @@ -1188,30 +1969,35 @@ def setup_database(self): # Run migrations before final commit try: - # Use explicit path relative to meshdata.py location import os import sys migrations_path = os.path.join(os.path.dirname(__file__), 'migrations') sys.path.insert(0, os.path.dirname(__file__)) - import migrations.add_message_reception as add_message_reception - add_message_reception.migrate(self.db) + from migrations import MIGRATIONS + + # Run all migrations in order, each with a fresh connection + for migration in MIGRATIONS: + try: + db = mysql.connector.connect( + host=self.config["database"]["host"], + user=self.config["database"]["username"], + password=self.config["database"]["password"], + database=self.config["database"]["database"], + charset="utf8mb4", + connection_timeout=10 + ) + migration(db) + db.close() + logging.info(f"Successfully ran migration: {migration.__name__}") + except Exception as e: + logging.error(f"Failed to run migration {migration.__name__}: {e}") + raise except ImportError as e: logging.error(f"Failed to import migration module: {e}") - # Continue with database setup even if migration fails pass except Exception as e: - logging.error(f"Failed to run migration: {e}") + logging.error(f"Failed to run migrations: {e}") raise - try: - import migrations.add_message_reception as add_message_reception - import migrations.add_traceroute_snr as add_traceroute_snr - import migrations.add_traceroute_id as add_traceroute_id - add_message_reception.migrate(self.db) - add_traceroute_snr.migrate(self.db) - add_traceroute_id.migrate(self.db) - except ImportError as e: - logging.error(f"Failed to import migration module: {e}") - pass self.db.commit() @@ -1232,8 +2018,17 @@ def import_nodes(self, filename): if "mapreport" in node and "firmware_version" in node["mapreport"]: record["firmware_version"] = \ node["mapreport"]["firmware_version"] + # Add mapreport-specific fields + record["has_default_channel"] = node["mapreport"].get("has_default_channel") + record["num_online_local_nodes"] = node["mapreport"].get("num_online_local_nodes") + record["region"] = node["mapreport"].get("region") + record["modem_preset"] = node["mapreport"].get("modem_preset") else: record["firmware_version"] = None + record["has_default_channel"] = None + record["num_online_local_nodes"] = None + record["region"] = None + record["modem_preset"] = None records.append(record) for record in records: @@ -1244,15 +2039,41 @@ def import_nodes(self, filename): hw_model, role, firmware_version, + has_default_channel, + num_online_local_nodes, + region, + modem_preset, ts_updated ) -VALUES (%s, %s, %s, %s, %s, %s, NOW()) +VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW()) ON DUPLICATE KEY UPDATE long_name = VALUES(long_name), short_name = VALUES(short_name), hw_model = COALESCE(VALUES(hw_model), hw_model), -role = COALESCE(VALUES(role), role), -firmware_version = COALESCE(VALUES(firmware_version), firmware_version), +role = CASE + WHEN VALUES(role) IS NOT NULL THEN VALUES(role) + ELSE role +END, +firmware_version = CASE + WHEN VALUES(firmware_version) IS NOT NULL THEN VALUES(firmware_version) + ELSE firmware_version +END, +has_default_channel = CASE + WHEN VALUES(has_default_channel) IS NOT NULL THEN VALUES(has_default_channel) + ELSE has_default_channel +END, +num_online_local_nodes = CASE + WHEN VALUES(num_online_local_nodes) IS NOT NULL THEN VALUES(num_online_local_nodes) + ELSE num_online_local_nodes +END, +region = CASE + WHEN VALUES(region) IS NOT NULL THEN VALUES(region) + ELSE region +END, +modem_preset = CASE + WHEN VALUES(modem_preset) IS NOT NULL THEN VALUES(modem_preset) + ELSE modem_preset +END, ts_updated = VALUES(ts_updated)""" values = ( record["id"], @@ -1260,7 +2081,11 @@ def import_nodes(self, filename): record["short_name"], record["hw_model"], record["role"], - record["firmware_version"] + record["firmware_version"], + record["has_default_channel"], + record["num_online_local_nodes"], + record["region"], + record["modem_preset"] ) cur = self.db.cursor() cur.execute(sql, values) @@ -1308,30 +2133,1324 @@ def import_chat(self, filename): print(f"failed to write record.") self.db.commit() + def get_neighbor_info_links(self, days=1): + """ + Fetch neighbor info links from the database. + + Args: + days: Number of days to look back for neighbor info data + + Returns: + Dictionary with node_id_int as keys and neighbor info data as values + """ + neighbor_info_links = {} # {node_id_int: {'heard': {neighbor_id_int: {data}}, ...}} + + cursor = self.db.cursor(dictionary=True) + # Fetch links from the specified days, adjust interval as needed + cursor.execute(""" + SELECT + ni.id, ni.neighbor_id, ni.snr, + p1.latitude_i as lat1_i, p1.longitude_i as lon1_i, + p2.latitude_i as lat2_i, p2.longitude_i as lon2_i + FROM neighborinfo ni + LEFT OUTER JOIN position p1 ON p1.id = ni.id + LEFT OUTER JOIN position p2 ON p2.id = ni.neighbor_id + WHERE ni.ts_created >= NOW() - INTERVAL %s DAY + """, (days,)) + + for row in cursor.fetchall(): + node_id_int = row['id'] + neighbor_id_int = row['neighbor_id'] + distance = None + if (row['lat1_i'] and row['lon1_i'] and row['lat2_i'] and row['lon2_i']): + distance = round(utils.distance_between_two_points( + row['lat1_i'] / 10000000, row['lon1_i'] / 10000000, + row['lat2_i'] / 10000000, row['lon2_i'] / 10000000 + ), 2) + + link_data = { + 'snr': row['snr'], + 'distance': distance, + 'neighbor_id': neighbor_id_int, + 'source_type': 'neighbor_info' + } + + if node_id_int not in neighbor_info_links: + neighbor_info_links[node_id_int] = {'heard': {}, 'heard_by': {}} + + # Add to node_id's 'heard' list + neighbor_info_links[node_id_int]['heard'][neighbor_id_int] = link_data + + # Add to neighbor_id's 'heard_by' list + if neighbor_id_int not in neighbor_info_links: + neighbor_info_links[neighbor_id_int] = {'heard': {}, 'heard_by': {}} + + heard_by_data = { + 'snr': row['snr'], + 'distance': distance, + 'neighbor_id': node_id_int, + 'source_type': 'neighbor_info' + } + neighbor_info_links[neighbor_id_int]['heard_by'][node_id_int] = heard_by_data + + cursor.close() + return neighbor_info_links + + def get_zero_hop_links(self, cutoff_time): + """ + Fetch zero-hop links from the database. + + Args: + cutoff_time: Unix timestamp for the cutoff time + + Returns: + Dictionary with node_id_int as keys and zero-hop data as values + """ + zero_hop_links = {} # {node_id_int: {'heard': {neighbor_id_int: {data}}, 'heard_by': {neighbor_id_int: {data}}}} + zero_hop_last_heard = {} # Keep track of last heard time for sorting + + cursor = self.db.cursor(dictionary=True) + cursor.execute(""" + SELECT + m.from_id, + m.received_by_id, + MAX(m.rx_snr) as snr, + COUNT(*) as message_count, + MAX(m.rx_time) as last_heard_time, + p_sender.latitude_i as lat_sender_i, + p_sender.longitude_i as lon_sender_i, + p_receiver.latitude_i as lat_receiver_i, + p_receiver.longitude_i as lon_receiver_i + FROM message_reception m + LEFT OUTER JOIN position p_sender ON p_sender.id = m.from_id + LEFT OUTER JOIN position p_receiver ON p_receiver.id = m.received_by_id + WHERE m.rx_time > %s + AND ( + (m.hop_limit IS NULL AND m.hop_start IS NULL) + OR + (m.hop_start - m.hop_limit = 0) + ) + GROUP BY m.from_id, m.received_by_id, + p_sender.latitude_i, p_sender.longitude_i, + p_receiver.latitude_i, p_receiver.longitude_i + """, (cutoff_time,)) + + for row in cursor.fetchall(): + sender_id = row['from_id'] + receiver_id = row['received_by_id'] + last_heard_dt = datetime.datetime.fromtimestamp(row['last_heard_time']) + + # Update last heard time for involved nodes + zero_hop_last_heard[sender_id] = max(zero_hop_last_heard.get(sender_id, datetime.datetime.min), last_heard_dt) + zero_hop_last_heard[receiver_id] = max(zero_hop_last_heard.get(receiver_id, datetime.datetime.min), last_heard_dt) + + distance = None + if (row['lat_sender_i'] and row['lon_sender_i'] and + row['lat_receiver_i'] and row['lon_receiver_i']): + distance = round(utils.distance_between_two_points( + row['lat_sender_i'] / 10000000, row['lon_sender_i'] / 10000000, + row['lat_receiver_i'] / 10000000, row['lon_receiver_i'] / 10000000 + ), 2) + + link_data = { + 'snr': row['snr'], + 'message_count': row['message_count'], + 'distance': distance, + 'last_heard': last_heard_dt, + 'neighbor_id': sender_id, # For receiver, neighbor is sender + 'source_type': 'zero_hop' + } + + heard_by_data = { + 'snr': row['snr'], + 'message_count': row['message_count'], + 'distance': distance, + 'last_heard': last_heard_dt, + 'neighbor_id': receiver_id, # For sender, neighbor is receiver + 'source_type': 'zero_hop' + } + + # Add to receiver's 'heard' list + if receiver_id not in zero_hop_links: + zero_hop_links[receiver_id] = {'heard': {}, 'heard_by': {}} + zero_hop_links[receiver_id]['heard'][sender_id] = link_data + + # Add to sender's 'heard_by' list + if sender_id not in zero_hop_links: + zero_hop_links[sender_id] = {'heard': {}, 'heard_by': {}} + zero_hop_links[sender_id]['heard_by'][receiver_id] = heard_by_data + + cursor.close() + return zero_hop_links, zero_hop_last_heard + + def get_zero_hop_links_from_traceroute(self, cutoff_time): + """ + Fetch zero-hop links from traceroute data, which is more accurate than message_reception. + + Args: + cutoff_time: Unix timestamp for the cutoff time + + Returns: + Dictionary with node_id_int as keys and zero-hop data as values + """ + zero_hop_links = {} # {node_id_int: {'heard': {neighbor_id_int: {data}}, 'heard_by': {neighbor_id_int: {data}}}} + zero_hop_last_heard = {} # Keep track of last heard time for sorting + + cursor = self.db.cursor(dictionary=True) + cursor.execute(""" + SELECT + t.from_id, + t.to_id, + t.snr_towards, + t.snr_back, + t.route, + t.route_back, + t.ts_created, + t.success, + p1.latitude_i as lat_from_i, + p1.longitude_i as lon_from_i, + p2.latitude_i as lat_to_i, + p2.longitude_i as lon_to_i + FROM traceroute t + LEFT OUTER JOIN position p1 ON p1.id = t.from_id + LEFT OUTER JOIN position p2 ON p2.id = t.to_id + WHERE t.ts_created > FROM_UNIXTIME(%s) + AND t.success = TRUE + AND (t.snr_towards IS NOT NULL OR t.snr_back IS NOT NULL) + """, (cutoff_time,)) + + for row in cursor.fetchall(): + from_id = row['from_id'] + to_id = row['to_id'] + ts_created = row['ts_created'] + + # Parse route data to determine if this is a zero-hop connection + route = row['route'] + route_back = row['route_back'] + + # Check if this is a zero-hop connection (empty routes or no intermediate nodes) + is_zero_hop = False + if route is None or route == '' or route == 'null': + forward_hops = 0 + else: + try: + forward_hops = len([x for x in route.split(';') if x.strip()]) + except (ValueError, AttributeError): + forward_hops = 0 + + if route_back is None or route_back == '' or route_back == 'null': + return_hops = 0 + else: + try: + return_hops = len([x for x in route_back.split(';') if x.strip()]) + except (ValueError, AttributeError): + return_hops = 0 + + # This is a zero-hop connection if both routes are empty (direct connection) + is_zero_hop = (forward_hops == 0 and return_hops == 0) + + # Skip if this is not a zero-hop connection + if not is_zero_hop: + continue + + # Log zero-hop detection for debugging + if self.debug: + logging.debug(f"Found zero-hop traceroute: {from_id} -> {to_id} (forward_hops={forward_hops}, return_hops={return_hops})") + + # Convert timestamp to datetime if needed + if isinstance(ts_created, datetime.datetime): + last_heard_dt = ts_created + else: + last_heard_dt = datetime.datetime.fromtimestamp(ts_created) + + # Update last heard time for involved nodes + zero_hop_last_heard[from_id] = max(zero_hop_last_heard.get(from_id, datetime.datetime.min), last_heard_dt) + zero_hop_last_heard[to_id] = max(zero_hop_last_heard.get(to_id, datetime.datetime.min), last_heard_dt) + + # Calculate distance if positions available + distance = None + if (row['lat_from_i'] and row['lon_from_i'] and + row['lat_to_i'] and row['lon_to_i']): + distance = round(utils.distance_between_two_points( + row['lat_from_i'] / 10000000, row['lon_from_i'] / 10000000, + row['lat_to_i'] / 10000000, row['lon_to_i'] / 10000000 + ), 2) + + # Parse SNR values + snr_towards = None + snr_back = None + if row['snr_towards']: + try: + snr_values = [float(s) for s in row['snr_towards'].split(";")] + snr_towards = max(snr_values) if snr_values else None + except (ValueError, TypeError): + pass + if row['snr_back']: + try: + snr_values = [float(s) for s in row['snr_back'].split(";")] + snr_back = max(snr_values) if snr_values else None + except (ValueError, TypeError): + pass + + # Use the best SNR value + best_snr = max(filter(None, [snr_towards, snr_back])) if any([snr_towards, snr_back]) else None + + link_data = { + 'snr': best_snr, + 'message_count': 1, # Each traceroute is one "message" + 'distance': distance, + 'last_heard': last_heard_dt, + 'neighbor_id': from_id, # For receiver, neighbor is sender + 'source_type': 'traceroute_zero_hop', + 'snr_towards': snr_towards, + 'snr_back': snr_back, + 'forward_hops': forward_hops, + 'return_hops': return_hops + } + + heard_by_data = { + 'snr': best_snr, + 'message_count': 1, # Each traceroute is one "message" + 'distance': distance, + 'last_heard': last_heard_dt, + 'neighbor_id': to_id, # For sender, neighbor is receiver + 'source_type': 'traceroute_zero_hop', + 'snr_towards': snr_towards, + 'snr_back': snr_back, + 'forward_hops': forward_hops, + 'return_hops': return_hops + } + + # Add to receiver's 'heard' list + if to_id not in zero_hop_links: + zero_hop_links[to_id] = {'heard': {}, 'heard_by': {}} + zero_hop_links[to_id]['heard'][from_id] = link_data + + # Add to sender's 'heard_by' list + if from_id not in zero_hop_links: + zero_hop_links[from_id] = {'heard': {}, 'heard_by': {}} + zero_hop_links[from_id]['heard_by'][to_id] = heard_by_data + + cursor.close() + return zero_hop_links, zero_hop_last_heard + + def get_graph_data(self, view_type='merged', days=1, zero_hop_timeout=43200): + """ + Get graph data for visualization. + + Args: + view_type: 'neighbor_info', 'zero_hop', or 'merged' + days: Number of days to look back for neighbor info data + zero_hop_timeout: Timeout in seconds for zero-hop data + + Returns: + Dictionary with nodes and edges for graph visualization + """ + nodes = self.get_nodes() + nodes_for_graph = [] + edges_for_graph = [] + active_node_ids_hex = set() # Keep track of nodes to include in the graph + + # Get neighbor info links + neighbor_info_links = {} + if view_type in ['neighbor_info', 'merged']: + neighbor_info_links = self.get_neighbor_info_links(days) + + # Add involved nodes to the active set if they exist in our main nodes list + for node_id_int, links in neighbor_info_links.items(): + node_id_hex = utils.convert_node_id_from_int_to_hex(node_id_int) + if node_id_hex in nodes and nodes[node_id_hex].get("active"): + active_node_ids_hex.add(node_id_hex) + + for neighbor_id_int in links.get('heard', {}): + neighbor_id_hex = utils.convert_node_id_from_int_to_hex(neighbor_id_int) + if neighbor_id_hex in nodes and nodes[neighbor_id_hex].get("active"): + active_node_ids_hex.add(neighbor_id_hex) + + # Get zero-hop links + zero_hop_links = {} + if view_type in ['zero_hop', 'merged']: + cutoff_time = int(time.time()) - zero_hop_timeout + zero_hop_links, _ = self.get_zero_hop_links(cutoff_time) + + # Add involved nodes to the active set if they exist + for node_id_int, links in zero_hop_links.items(): + node_id_hex = utils.convert_node_id_from_int_to_hex(node_id_int) + if node_id_hex in nodes and nodes[node_id_hex].get("active"): + active_node_ids_hex.add(node_id_hex) + + for neighbor_id_int in links.get('heard', {}): + neighbor_id_hex = utils.convert_node_id_from_int_to_hex(neighbor_id_int) + if neighbor_id_hex in nodes and nodes[neighbor_id_hex].get("active"): + active_node_ids_hex.add(neighbor_id_hex) + + # Build nodes for graph + for node_id_hex in active_node_ids_hex: + node_data = nodes[node_id_hex] + + # Get HW Model Name safely + hw_model = node_data.get('hw_model') + hw_model_name = get_hardware_model_name(hw_model) + + # Get Icon URL + node_name_for_icon = node_data.get('long_name', node_data.get('short_name', '')) + icon_url = utils.graph_icon(node_name_for_icon) + + nodes_for_graph.append({ + 'id': node_id_hex, + 'short': node_data.get('short_name', 'UNK'), + 'icon_url': icon_url, + 'node_data': { + 'long_name': node_data.get('long_name', 'Unknown Name'), + 'hw_model': hw_model_name, + 'last_seen': time_ago(node_data.get('ts_seen')) if node_data.get('ts_seen') else 'Never' + } + }) + + # Build edges for graph + added_node_pairs = set() + + # Add Neighbor Info Edges + if view_type in ['neighbor_info', 'merged']: + for node_id_int, links in neighbor_info_links.items(): + for neighbor_id_int, data in links.get('heard', {}).items(): + from_node_hex = utils.convert_node_id_from_int_to_hex(node_id_int) + to_node_hex = utils.convert_node_id_from_int_to_hex(neighbor_id_int) + + # Ensure both nodes are active and in our graph node list + if from_node_hex in active_node_ids_hex and to_node_hex in active_node_ids_hex: + node_pair = tuple(sorted((from_node_hex, to_node_hex))) + # Add edge only if this node pair hasn't been added yet + if node_pair not in added_node_pairs: + edges_for_graph.append({ + 'from': from_node_hex, + 'to': to_node_hex, + 'edge_data': data # Contains snr, distance, source_type='neighbor_info' + }) + added_node_pairs.add(node_pair) # Mark pair as added + + # Add Zero Hop Edges (only if not already added via Neighbor Info) + if view_type in ['zero_hop', 'merged']: + for receiver_id_int, links in zero_hop_links.items(): + for sender_id_int, data in links.get('heard', {}).items(): + # For zero hop, 'from' is sender, 'to' is receiver + from_node_hex = utils.convert_node_id_from_int_to_hex(sender_id_int) + to_node_hex = utils.convert_node_id_from_int_to_hex(receiver_id_int) + + # Ensure both nodes are active and in our graph node list + if from_node_hex in active_node_ids_hex and to_node_hex in active_node_ids_hex: + node_pair = tuple(sorted((from_node_hex, to_node_hex))) + # Add edge only if this node pair hasn't been added yet + if node_pair not in added_node_pairs: + edges_for_graph.append({ + 'from': from_node_hex, + 'to': to_node_hex, + 'edge_data': data # Contains snr, distance, source_type='zero_hop' + }) + added_node_pairs.add(node_pair) # Mark pair as added + + # Combine nodes and edges + graph_data = { + 'nodes': nodes_for_graph, + 'edges': edges_for_graph + } + + return graph_data + + def get_neighbors_data(self, view_type='neighbor_info', days=1, zero_hop_timeout=43200): + """ + Get neighbors data for the neighbors page. + + Args: + view_type: 'neighbor_info', 'zero_hop', or 'merged' + days: Number of days to look back for neighbor info data + zero_hop_timeout: Timeout in seconds for zero-hop data + + Returns: + Dictionary with node_id_hex as keys and neighbor data as values + """ + nodes = self.get_nodes() + if not nodes: + return {} + + # Get neighbor info links + neighbor_info_links = {} + if view_type in ['neighbor_info', 'merged']: + neighbor_info_links = self.get_neighbor_info_links(days) + + # Get zero-hop links + zero_hop_links = {} + zero_hop_last_heard = {} + if view_type in ['zero_hop', 'merged']: + cutoff_time = int(time.time()) - zero_hop_timeout + zero_hop_links, zero_hop_last_heard = self.get_zero_hop_links(cutoff_time) + + # Dictionary to hold the final data for active nodes + active_nodes_data = {} + + # Combine data for active nodes based on view type + for node_id_hex, node_base_data in nodes.items(): + if not node_base_data.get("active"): + continue # Skip inactive nodes + + node_id_int = utils.convert_node_id_from_hex_to_int(node_id_hex) + + # Only copy the fields we need instead of the entire dict + final_node_data = { + 'id': node_base_data.get('id'), + 'long_name': node_base_data.get('long_name'), + 'short_name': node_base_data.get('short_name'), + 'hw_model': node_base_data.get('hw_model'), + 'role': node_base_data.get('role'), + 'firmware_version': node_base_data.get('firmware_version'), + 'owner': node_base_data.get('owner'), + 'ts_seen': node_base_data.get('ts_seen'), + 'ts_created': node_base_data.get('ts_created'), + 'ts_updated': node_base_data.get('ts_updated'), + 'active': node_base_data.get('active'), + 'last_seen': node_base_data.get('last_seen'), + 'channel': node_base_data.get('channel'), + 'position': node_base_data.get('position'), + 'telemetry': node_base_data.get('telemetry') + } + + # Initialize lists + final_node_data['neighbors'] = [] + final_node_data['heard_by_neighbors'] = [] + final_node_data['zero_hop_neighbors'] = [] + final_node_data['heard_by_zero_hop'] = [] + + has_neighbor_info = node_id_int in neighbor_info_links + has_zero_hop_info = node_id_int in zero_hop_links + + # Determine overall last heard time for sorting + last_heard_zero_hop = max([d['last_heard'] for d in zero_hop_links[node_id_int]['heard'].values()], default=datetime.datetime.min) if has_zero_hop_info else datetime.datetime.min + last_heard_by_zero_hop = max([d['last_heard'] for d in zero_hop_links[node_id_int]['heard_by'].values()], default=datetime.datetime.min) if has_zero_hop_info else datetime.datetime.min + + node_ts_seen = datetime.datetime.fromtimestamp(node_base_data['ts_seen']) if node_base_data.get('ts_seen') else datetime.datetime.min + + final_node_data['last_heard'] = max( + node_ts_seen, + zero_hop_last_heard.get(node_id_int, datetime.datetime.min) # Use precalculated zero hop time + # We don't need to include neighbor info times here as they aren't distinct per-link + ) + + include_node = False + if view_type in ['neighbor_info', 'merged']: + if has_neighbor_info: + final_node_data['neighbors'] = list(neighbor_info_links[node_id_int]['heard'].values()) + final_node_data['heard_by_neighbors'] = list(neighbor_info_links[node_id_int]['heard_by'].values()) + if final_node_data['neighbors'] or final_node_data['heard_by_neighbors']: + include_node = True + + if view_type in ['zero_hop', 'merged']: + if has_zero_hop_info: + final_node_data['zero_hop_neighbors'] = list(zero_hop_links[node_id_int]['heard'].values()) + final_node_data['heard_by_zero_hop'] = list(zero_hop_links[node_id_int]['heard_by'].values()) + if final_node_data['zero_hop_neighbors'] or final_node_data['heard_by_zero_hop']: + include_node = True + + if include_node: + active_nodes_data[node_id_hex] = final_node_data + + # Sort final results by last heard time + active_nodes_data = dict(sorted( + active_nodes_data.items(), + key=lambda item: item[1].get('last_heard', datetime.datetime.min), + reverse=True + )) + + return active_nodes_data + + def is_position_fresh(self, position, prune_threshold, now=None): + """ + Returns True if the position dict/object has a position_time within prune_threshold seconds of now. + """ + if not position: + return False + # Accept both dict and object + position_time = None + if isinstance(position, dict): + position_time = position.get('position_time') + else: + position_time = getattr(position, 'position_time', None) + if not position_time: + return False + if isinstance(position_time, datetime.datetime): + position_time = position_time.timestamp() + if now is None: + now = time.time() + return (now - position_time) <= prune_threshold + + def get_telemetry_for_node(self, node_id): + """ + Return the last 24 hours of telemetry for the given node, ordered by timestamp ascending. + Each record is a dict with keys: air_util_tx, battery_level, channel_utilization, ts_created, etc. + """ + telemetry = [] + sql = """ + SELECT + DATE_FORMAT(ts_created, '%Y-%m-%d %H:%i:00') as interval_start, + FLOOR(MINUTE(ts_created) / 10) as ten_minute_block, + AVG(air_util_tx) as air_util_tx, + AVG(battery_level) as battery_level, + AVG(channel_utilization) as channel_utilization, + UNIX_TIMESTAMP(MIN(ts_created)) as ts_created + FROM telemetry + WHERE id = %s AND ts_created >= NOW() - INTERVAL 1 DAY + GROUP BY interval_start + ORDER BY interval_start ASC + """ + params = (node_id,) + cur = self.db.cursor() + cur.execute(sql, params) + rows = cur.fetchall() + column_names = [desc[0] for desc in cur.description] + for row in rows: + record = {} + for i in range(len(row)): + value = row[i] + if isinstance(value, datetime.datetime): + record[column_names[i]] = int(value.timestamp()) + else: + record[column_names[i]] = value + telemetry.append(record) + cur.close() + return telemetry + + def get_environmental_telemetry_for_node(self, node_id, days=1): + """ + Return environmental telemetry for the given node, ordered by timestamp ascending. + Each record is a dict with keys: temperature, barometric_pressure, relative_humidity, gas_resistance, voltage, current, ts_created. + + Args: + node_id: The node ID to get telemetry for + days: Number of days to look back (default 1 for 24 hours) + """ + telemetry = [] + sql = """ + SELECT + temperature, + barometric_pressure, + relative_humidity, + gas_resistance, + voltage, + current, + UNIX_TIMESTAMP(ts_created) as ts_created + FROM telemetry + WHERE id = %s AND ts_created >= NOW() - INTERVAL %s DAY + ORDER BY ts_created ASC + """ + params = (node_id, days) + cur = self.db.cursor() + cur.execute(sql, params) + rows = cur.fetchall() + column_names = [desc[0] for desc in cur.description] + for row in rows: + record = {} + for i in range(len(row)): + value = row[i] + if isinstance(value, datetime.datetime): + record[column_names[i]] = int(value.timestamp()) + else: + record[column_names[i]] = value + telemetry.append(record) + cur.close() + return telemetry + + def get_positions_at_time(self, node_ids, timestamp): + """Get the closest position for each node using a single reused cursor.""" + if not node_ids: + return {} + results = {} + cur = self.db.cursor(dictionary=True) + for node_id in node_ids: + pos = self.get_position_at_time(node_id, timestamp, cur) + if pos: + results[node_id] = pos + cur.close() + return results + + def get_position_at_time(self, node_id, target_timestamp, cur=None): + """Retrieves the position record from positionlog for a node that is closest to, but not after, the target timestamp.""" + position = {} + close_cur = False + if cur is None: + cur = self.db.cursor(dictionary=True) + close_cur = True + try: + target_dt = datetime.datetime.fromtimestamp(target_timestamp) + sql = """SELECT latitude_i, longitude_i, ts_created + FROM positionlog + WHERE id = %s + ORDER BY ABS(TIMESTAMPDIFF(SECOND, ts_created, %s)) ASC + LIMIT 1""" + params = (node_id, target_dt) + cur.execute(sql, params) + row = cur.fetchone() + if row: + position = { + "latitude_i": row["latitude_i"], + "longitude_i": row["longitude_i"], + "position_time": row["ts_created"].timestamp() if isinstance(row["ts_created"], datetime.datetime) else row["ts_created"], + "latitude": row["latitude_i"] / 10000000 if row["latitude_i"] else None, + "longitude": row["longitude_i"] / 10000000 if row["longitude_i"] else None + } + except mysql.connector.Error as err: + logging.error(f"Database error fetching nearest position for {node_id}: {err}") + except Exception as e: + logging.error(f"Error fetching nearest position for {node_id}: {e}") + finally: + if close_cur: + cur.close() + return position + + def get_reception_details_batch(self, message_id, receiver_ids): + """Get reception details for multiple receivers in one query""" + if not receiver_ids: + return {} + + # Handle single receiver case + if len(receiver_ids) == 1: + query = """ + SELECT received_by_id, rx_snr, rx_rssi, rx_time, hop_limit, hop_start, relay_node + FROM message_reception + WHERE message_id = %s AND received_by_id = %s + """ + params = (message_id, receiver_ids[0]) + else: + placeholders = ','.join(['%s'] * len(receiver_ids)) + query = f""" + SELECT received_by_id, rx_snr, rx_rssi, rx_time, hop_limit, hop_start, relay_node + FROM message_reception + WHERE message_id = %s AND received_by_id IN ({placeholders}) + """ + params = (message_id, *receiver_ids) + + cur = None + try: + cur = self.db.cursor(dictionary=True) + cur.execute(query, params) + return {row['received_by_id']: row for row in cur.fetchall()} + except Exception as e: + logging.error(f"Error fetching reception details: {e}") + return {} + finally: + if cur: + cur.close() + + def get_heard_by_from_neighbors(self, node_id): + """ + Get a list of nodes that have the given node_id in their neighbor list. + """ + sql = """ + SELECT id, snr + FROM neighborinfo + WHERE neighbor_id = %s + """ + params = (node_id,) + cur = self.db.cursor(dictionary=True) + cur.execute(sql, params) + heard_by = cur.fetchall() + cur.close() + return heard_by + + def get_relay_network_data(self, days=30): + """Infer relay network data from message_reception table, not relay_edges.""" + cursor = None + try: + if not self.db or not self.db.is_connected(): + self.connect_db() + cursor = self.db.cursor(dictionary=True) + if days > 0: + cutoff = int(time.time()) - days * 86400 + cursor.execute(""" + SELECT message_id, from_id, received_by_id, relay_node, rx_time, hop_limit, hop_start + FROM message_reception + WHERE rx_time > %s + """, (cutoff,)) + else: + cursor.execute(""" + SELECT message_id, from_id, received_by_id, relay_node, rx_time, hop_limit, hop_start + FROM message_reception + """) + receptions = cursor.fetchall() + nodes_dict = self.get_nodes() + node_ids_set = set(nodes_dict.keys()) + node_id_ints = {int(k, 16): k for k in node_ids_set} + + # 1. Build zero-hop (direct) network - prefer traceroute data when available + zero_hop_edges = [] + zero_hop_source = {} # Track source of zero-hop detection + + # First, try to get zero-hop data from traceroute (more accurate) + cutoff_time = int(time.time()) - (days * 86400 if days > 0 else 86400) + traceroute_zero_hop_links, _ = self.get_zero_hop_links_from_traceroute(cutoff_time) + + if self.debug: + logging.debug(f"Found {len(traceroute_zero_hop_links)} nodes with traceroute-based zero-hop connections") + + # Add traceroute-based zero-hop edges + for node_id_int, links in traceroute_zero_hop_links.items(): + for neighbor_id_int, data in links.get('heard', {}).items(): + sender_hex = format(neighbor_id_int, '08x') + receiver_hex = format(node_id_int, '08x') + zero_hop_edges.append((sender_hex, receiver_hex)) + zero_hop_source[(sender_hex, receiver_hex)] = 'traceroute_zero_hop' + zero_hop_source[(receiver_hex, sender_hex)] = 'traceroute_zero_hop' + + # Store hop information for later use in edge creation + if 'forward_hops' in data and 'return_hops' in data: + zero_hop_source[(sender_hex, receiver_hex) + ('_hops',)] = (data['forward_hops'], data['return_hops']) + zero_hop_source[(receiver_hex, sender_hex) + ('_hops',)] = (data['forward_hops'], data['return_hops']) + + # Fallback to message_reception data for nodes not covered by traceroute + for rec in receptions: + hop_limit = rec['hop_limit'] + hop_start = rec['hop_start'] + sender = rec['from_id'] + receiver = rec['received_by_id'] + sender_hex = format(sender, '08x') + receiver_hex = format(receiver, '08x') + edge_key = (sender_hex, receiver_hex) + reverse_edge_key = (receiver_hex, sender_hex) + + if (hop_limit is None and hop_start is None) or (hop_start is not None and hop_limit is not None and hop_start - hop_limit == 0): + # Only add if not already covered by traceroute data + if edge_key not in zero_hop_source and reverse_edge_key not in zero_hop_source: + zero_hop_edges.append(edge_key) + zero_hop_source[edge_key] = 'zero_hop' + zero_hop_source[reverse_edge_key] = 'zero_hop' + + # Build zero-hop adjacency for real nodes + zero_hop_graph = defaultdict(set) + for a, b in zero_hop_edges: + zero_hop_graph[a].add(b) + zero_hop_graph[b].add(a) + + # 2. Group relay receptions by suffix + relay_suffix_edges = defaultdict(list) + for rec in receptions: + sender = rec['from_id'] + receiver = rec['received_by_id'] + relay_node = rec['relay_node'] + rx_time = rec['rx_time'] + hop_limit = rec['hop_limit'] + hop_start = rec['hop_start'] + sender_hex = format(sender, '08x') + receiver_hex = format(receiver, '08x') + if relay_node: + relay_suffix = relay_node[-2:].lower() + relay_suffix_edges[relay_suffix].append((sender_hex, receiver_hex, rx_time, relay_node)) + elif (hop_limit is None and hop_start is None) or (hop_start is not None and hop_limit is not None and hop_start - hop_limit == 0): + relay_suffix_edges[None].append((sender_hex, receiver_hex, rx_time, None)) + + # 3. Find connected components for each relay suffix + relay_suffix_components = {} + relay_suffix_node_to_component = {} + for suffix, edges in relay_suffix_edges.items(): + graph = defaultdict(set) + for from_hex, to_hex, _, _ in edges: + graph[from_hex].add(to_hex) + graph[to_hex].add(from_hex) + visited = set() + components = [] + for node in graph: + if node in visited: + continue + queue = deque([node]) + comp = set() + while queue: + n = queue.popleft() + if n in visited: + continue + visited.add(n) + comp.add(n) + for neighbor in graph[n]: + if neighbor not in visited: + queue.append(neighbor) + if comp: + components.append(comp) + for idx, comp in enumerate(components): + relay_suffix_components[(suffix, idx)] = comp + for n in comp: + relay_suffix_node_to_component[(suffix, n)] = idx + + edge_map = {} + virtual_nodes = {} + endpoint_nodes = set() + node_stats = {} + + # Helper function to ensure a node is included in endpoint_nodes + def ensure_node_included(node_id): + if node_id not in endpoint_nodes: + endpoint_nodes.add(node_id) + # Initialize stats for this node if not already present + if node_id not in node_stats: + node_stats[node_id] = {'message_count': 0, 'relay_count': 0} + + # 4. Component-local consolidation + for suffix, edges in relay_suffix_edges.items(): + # Group edges by component + comp_edges = defaultdict(list) + for from_hex, to_hex, rx_time, relay_node in edges: + comp_idx = relay_suffix_node_to_component.get((suffix, from_hex), 0) + comp_edges[comp_idx].append((from_hex, to_hex, rx_time, relay_node)) + for comp_idx, comp_edge_list in comp_edges.items(): + comp_nodes = relay_suffix_components[(suffix, comp_idx)] + # Find all real nodes in the component with this suffix + real_nodes_with_suffix = [n for n in comp_nodes if n in node_ids_set and n[-2:] == (suffix if suffix is not None else '')] + if len(real_nodes_with_suffix) == 1 and suffix is not None: + # Consolidate: use the real node for all edges and endpoints in this component + unique_real_node = real_nodes_with_suffix[0] + for from_hex, to_hex, rx_time, relay_node in comp_edge_list: + edge_from = unique_real_node + edge_to = to_hex if to_hex != from_hex else unique_real_node + edge_key = (edge_from, edge_to) + if edge_key not in edge_map: + # Get hop information if available from traceroute data + hop_info = zero_hop_source.get(edge_key + ('_hops',), (0, 0)) + forward_hops, return_hops = hop_info if isinstance(hop_info, tuple) else (0, 0) + + edge_map[edge_key] = { + 'count': 0, + 'relay_suffix': suffix, + 'first_seen': rx_time, + 'last_seen': rx_time, + 'virtual_id': None, + 'source_type': zero_hop_source.get(edge_key, 'relay'), + 'forward_hops': forward_hops, + 'return_hops': return_hops + } + edge_map[edge_key]['count'] += 1 + if rx_time: + if edge_map[edge_key]['first_seen'] is None or rx_time < edge_map[edge_key]['first_seen']: + edge_map[edge_key]['first_seen'] = rx_time + if edge_map[edge_key]['last_seen'] is None or rx_time > edge_map[edge_key]['last_seen']: + edge_map[edge_key]['last_seen'] = rx_time + ensure_node_included(edge_from) + ensure_node_included(edge_to) + else: + # Use a virtual node for this component + virtual_id = f"relay_{suffix}_{comp_idx+1}" if suffix is not None else None + for from_hex, to_hex, rx_time, relay_node in comp_edge_list: + edge_from = virtual_id if virtual_id else from_hex + if virtual_id and virtual_id not in virtual_nodes: + virtual_nodes[virtual_id] = { + 'id': virtual_id, + 'relay_suffix': suffix, + 'short_name': virtual_id, + 'long_name': f"Relay {suffix} ({comp_idx+1})" if suffix is not None else 'Relay', + 'hw_model': 'Virtual', + 'firmware_version': None, + 'role': None, + 'owner': None + } + edge_to = to_hex + edge_key = (edge_from, edge_to) + if edge_key not in edge_map: + edge_map[edge_key] = { + 'count': 0, + 'relay_suffix': suffix, + 'first_seen': rx_time, + 'last_seen': rx_time, + 'virtual_id': virtual_id if suffix is not None else None, + 'source_type': zero_hop_source.get(edge_key, 'relay') + } + edge_map[edge_key]['count'] += 1 + if rx_time: + if edge_map[edge_key]['first_seen'] is None or rx_time < edge_map[edge_key]['first_seen']: + edge_map[edge_key]['first_seen'] = rx_time + if edge_map[edge_key]['last_seen'] is None or rx_time > edge_map[edge_key]['last_seen']: + edge_map[edge_key]['last_seen'] = rx_time + ensure_node_included(edge_from) + ensure_node_included(edge_to) + + node_first_seen = {} + node_last_seen = {} + for (from_node, to_node), edge in edge_map.items(): + for node in [from_node, to_node]: + if edge['first_seen']: + if node not in node_first_seen or edge['first_seen'] < node_first_seen[node]: + node_first_seen[node] = edge['first_seen'] + if edge['last_seen']: + if node not in node_last_seen or edge['last_seen'] > node_last_seen[node]: + node_last_seen[node] = edge['last_seen'] + + nodes = [] + for (from_node, to_node), edge in edge_map.items(): + node_stats[from_node]['message_count'] += edge['count'] + node_stats[to_node]['relay_count'] += edge['count'] + + # Create nodes list, including placeholder nodes for missing nodes + for node_hex in endpoint_nodes: + if node_hex in nodes_dict: + # Real node exists in database + node = nodes_dict[node_hex] + node_name_for_icon = node.get('long_name') or node.get('short_name', '') + icon_url = utils.graph_icon(node_name_for_icon) + nodes.append({ + 'id': node_hex, + 'long_name': node.get('long_name') or f"Node {node_hex}", + 'short_name': node.get('short_name') or f"Node {node_hex}", + 'hw_model': node.get('hw_model') or 'Unknown', + 'firmware_version': node.get('firmware_version'), + 'role': node.get('role'), + 'owner': node.get('owner'), + 'last_seen': node.get('ts_seen'), + 'first_seen': node_first_seen.get(node_hex), + 'last_relay': node_last_seen.get(node_hex), + 'message_count': node_stats[node_hex]['message_count'], + 'relay_count': node_stats[node_hex]['relay_count'], + 'icon_url': icon_url + }) + elif node_hex.startswith('relay_') and node_hex in virtual_nodes: + # Virtual relay node + nodes.append({ + 'id': virtual_nodes[node_hex]['id'], + 'short_name': virtual_nodes[node_hex]['short_name'], + 'long_name': virtual_nodes[node_hex]['long_name'], + 'hw_model': virtual_nodes[node_hex]['hw_model'], + 'hw_model_name': get_hardware_model_name(virtual_nodes[node_hex]['hw_model']) if virtual_nodes[node_hex]['hw_model'] is not None else None, + 'last_seen': node_last_seen.get(node_hex), + 'first_seen': node_first_seen.get(node_hex), + 'last_relay': node_last_seen.get(node_hex), + 'message_count': node_stats[node_hex]['message_count'], + 'relay_count': node_stats[node_hex]['relay_count'], + 'icon_url': None + }) + else: + # Create placeholder node for missing node (e.g., node with MQTT off) + nodes.append({ + 'id': node_hex, + 'long_name': f"Node {node_hex} (Offline)", + 'short_name': f"{node_hex[-4:]} (Off)", + 'hw_model': 'Unknown', + 'firmware_version': None, + 'role': None, + 'owner': None, + 'last_seen': node_last_seen.get(node_hex), + 'first_seen': node_first_seen.get(node_hex), + 'last_relay': node_last_seen.get(node_hex), + 'message_count': node_stats[node_hex]['message_count'], + 'relay_count': node_stats[node_hex]['relay_count'], + 'icon_url': None + }) + + # Create a set of valid node IDs for edge filtering + valid_node_ids = {node['id'] for node in nodes} + + # Filter edges to only include edges where both nodes exist in our nodes list + edges = [] + for (from_node, to_node), edge in edge_map.items(): + # Only include edge if both source and target nodes are in our nodes list + if from_node in valid_node_ids and to_node in valid_node_ids: + edges.append({ + 'id': f"{from_node}-{to_node}", + 'from_node': from_node, + 'to_node': to_node, + 'relay_suffix': edge['relay_suffix'], + 'message_count': edge['count'], + 'first_seen': edge['first_seen'], + 'last_seen': edge['last_seen'], + 'source_type': edge.get('source_type', 'relay'), + 'forward_hops': edge.get('forward_hops', 0), + 'return_hops': edge.get('return_hops', 0) + }) + total_nodes = len(nodes) + total_edges = len(edges) + total_messages = sum(e['message_count'] for e in edges) + avg_hops = total_edges / total_nodes if total_nodes > 0 else 0 + stats = { + 'total_nodes': total_nodes, + 'total_edges': total_edges, + 'total_messages': total_messages, + 'avg_hops': avg_hops + } + return { + 'nodes': nodes, + 'edges': edges, + 'stats': stats + } + except Exception as e: + logging.error(f"Error in get_relay_network_data: {e}") + # Return empty data structure on error + return { + 'nodes': [], + 'edges': [], + 'stats': { + 'total_nodes': 0, + 'total_edges': 0, + 'total_messages': 0, + 'avg_hops': 0 + } + } + finally: + if cursor: + try: + cursor.close() + except Exception as e: + logging.error(f"Error closing cursor: {e}") + + def store_routing(self, data, topic=None): + """Store routing information from routing packets.""" + from_id = self.verify_node(data["from"]) + to_id = self.verify_node(data["to"]) if "to" in data else None + payload = dict(data["decoded"]["json_payload"]) + + # Extract routing data + routing_data = payload.get("routing_data", {}) + error_reason = payload.get("error_reason") + request_id = data["decoded"].get("request_id") + if request_id is None: + request_id = payload.get("request_id") + relay_node = payload.get("relay_node") + hop_limit = payload.get("hop_limit") + hop_start = payload.get("hop_start") + hops_taken = payload.get("hops_taken") + is_error = payload.get("is_error", False) + success = payload.get("success", False) + error_description = payload.get("error_description") + + # Convert relay_node to hex format if it's a number + if relay_node and isinstance(relay_node, int): + relay_node = f"{relay_node:04x}" + + # Extract uplink node from topic if available + uplink_node = None + if topic: + # Topic format: msh/US/2/e/LongFast/!433f1f98 + # Extract the node ID after the last '!' + if '!' in topic: + uplink_hex = topic.split('!')[-1] + try: + uplink_node = self.int_id(uplink_hex) + except: + uplink_node = None + + # Get message metadata + message_id = data.get("id") + channel = data.get("channel") + rx_snr = data.get("rx_snr") + rx_rssi = data.get("rx_rssi") + rx_time = data.get("rx_time") + + sql = """INSERT INTO routing_messages + (from_id, to_id, message_id, request_id, relay_node, hop_limit, hop_start, + hops_taken, error_reason, error_description, is_error, success, channel, + rx_snr, rx_rssi, rx_time, routing_data, uplink_node, ts_created) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW()) + ON DUPLICATE KEY UPDATE + relay_node = VALUES(relay_node), + hop_limit = VALUES(hop_limit), + hop_start = VALUES(hop_start), + hops_taken = VALUES(hops_taken), + error_reason = VALUES(error_reason), + error_description = VALUES(error_description), + is_error = VALUES(is_error), + success = VALUES(success), + rx_snr = VALUES(rx_snr), + rx_rssi = VALUES(rx_rssi), + rx_time = VALUES(rx_time), + routing_data = VALUES(routing_data), + uplink_node = VALUES(uplink_node)""" + + params = ( + from_id, + to_id, + message_id, + request_id, + relay_node, + hop_limit, + hop_start, + hops_taken, + error_reason, + error_description, + is_error, + success, + channel, + rx_snr, + rx_rssi, + rx_time, + json.dumps(routing_data, cls=CustomJSONEncoder), + uplink_node + ) + + cur = self.db.cursor() + cur.execute(sql, params) + self.db.commit() + cur.close() + + if self.debug: + logging.info(f"Stored routing message: from={from_id}, to={to_id}, error={error_description}, success={success}") + + def get_routing_messages(self, page=1, per_page=50, error_only=False, days=7): + """Get routing messages with optional filtering.""" + offset = (page - 1) * per_page + + # Build WHERE clause + where_conditions = ["rm.ts_created >= NOW() - INTERVAL %s DAY"] + params = [days] + + if error_only: + where_conditions.append("rm.is_error = TRUE") + + where_clause = " AND ".join(where_conditions) + + # Get total count + count_sql = f"SELECT COUNT(*) FROM routing_messages rm WHERE {where_clause}" + cur = self.db.cursor() + cur.execute(count_sql, params) + total_count = cur.fetchone()[0] + + # Get paginated results + sql = f"""SELECT rm.*, + n1.long_name as from_name, n1.short_name as from_short, + n2.long_name as to_name, n2.short_name as to_short, + n3.long_name as uplink_name, n3.short_name as uplink_short + FROM routing_messages rm + LEFT JOIN nodeinfo n1 ON rm.from_id = n1.id + LEFT JOIN nodeinfo n2 ON rm.to_id = n2.id + LEFT JOIN nodeinfo n3 ON rm.uplink_node = n3.id + WHERE {where_clause} + ORDER BY rm.ts_created DESC + LIMIT %s OFFSET %s""" + + cur.execute(sql, params + [per_page, offset]) + rows = cur.fetchall() + columns = [desc[0] for desc in cur.description] if cur.description else [] + cur.close() + messages = [] + for row in rows: + message = dict(zip(columns, row)) + # Parse routing_data JSON + if message.get('routing_data'): + try: + message['routing_data'] = json.loads(message['routing_data']) + except: + message['routing_data'] = {} + # Add hex IDs for template use + if message.get('from_id'): + message['from_id_hex'] = self.hex_id(message['from_id']) + if message.get('to_id'): + message['to_id_hex'] = self.hex_id(message['to_id']) + if message.get('uplink_node'): + message['uplink_node_hex'] = self.hex_id(message['uplink_node']) + messages.append(message) + + return { + 'items': messages, + 'total': total_count, + 'page': page, + 'per_page': per_page, + 'pages': (total_count + per_page - 1) // per_page + } + + def get_routing_stats(self, days=7): + """Get routing statistics for the specified time period.""" + sql = """SELECT + COUNT(*) as total_messages, + SUM(CASE WHEN is_error = TRUE THEN 1 ELSE 0 END) as error_count, + SUM(CASE WHEN success = TRUE THEN 1 ELSE 0 END) as success_count, + AVG(hops_taken) as avg_hops, + COUNT(DISTINCT from_id) as unique_senders, + COUNT(DISTINCT to_id) as unique_receivers, + COUNT(DISTINCT relay_node) as unique_relays + FROM routing_messages + WHERE ts_created >= NOW() - INTERVAL %s DAY""" + + cur = self.db.cursor() + cur.execute(sql, [days]) + row = cur.fetchone() + cur.close() + + if row: + return { + 'total_messages': row[0] or 0, + 'error_count': row[1] or 0, + 'success_count': row[2] or 0, + 'avg_hops': float(row[3]) if row[3] else 0, + 'unique_senders': row[4] or 0, + 'unique_receivers': row[5] or 0, + 'unique_relays': row[6] or 0, + 'success_rate': (row[2] / row[0] * 100) if row[0] and row[0] > 0 else 0 + } + return {} + + def get_routing_errors_by_type(self, days=7): + """Get routing error breakdown by error type.""" + sql = """SELECT + error_reason, + error_description, + COUNT(*) as count + FROM routing_messages + WHERE ts_created >= NOW() - INTERVAL %s DAY + AND is_error = TRUE + GROUP BY error_reason, error_description + ORDER BY count DESC""" + + cur = self.db.cursor() + cur.execute(sql, [days]) + rows = cur.fetchall() + cur.close() + + return [{'error_reason': row[0], 'error_description': row[1], 'count': row[2]} for row in rows] + def create_database(): + """Create database and user with proper privileges for new installations.""" config = configparser.ConfigParser() config.read('config.ini') + # Connect as root to create database and user db = mysql.connector.connect( - host="db", + host=config["database"]["host"], user="root", - password="passw0rd", + password=config.get("database", "root_password", fallback="passw0rd"), ) - sqls = [ - f"""CREATE DATABASE IF NOT EXISTS {config["database"]["database"]}""", - f"""CREATE USER IF NOT EXISTS '{config["database"]["username"]}'@'%' -IDENTIFIED BY '{config["database"]["password"]}'""", - f"""GRANT ALL ON {config["database"]["username"]}.* -TO '$DB_USER'@'%'""", - f"""ALTER DATABASE {config["database"]["database"]} -CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci""" - ] - for sql in sqls: + + try: + # Create database cur = db.cursor() - cur.execute(sql) + cur.execute(f"""CREATE DATABASE IF NOT EXISTS {config["database"]["database"]} +CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci""") cur.close() - db.commit() + + # Create user if it doesn't exist + cur = db.cursor() + cur.execute(f"""CREATE USER IF NOT EXISTS '{config["database"]["username"]}'@'%' +IDENTIFIED BY '{config["database"]["password"]}'""") + cur.close() + + # Grant all privileges on the specific database + cur = db.cursor() + cur.execute(f"""GRANT ALL PRIVILEGES ON {config["database"]["database"]}.* +TO '{config["database"]["username"]}'@'%'""") + cur.close() + + # Grant RELOAD privilege for query cache operations + cur = db.cursor() + cur.execute(f"""GRANT RELOAD ON *.* TO '{config["database"]["username"]}'@'%'""") + cur.close() + + # Grant additional useful privileges + cur = db.cursor() + cur.execute(f"""GRANT PROCESS ON *.* TO '{config["database"]["username"]}'@'%'""") + cur.close() + + # Flush privileges to apply changes + cur = db.cursor() + cur.execute("FLUSH PRIVILEGES") + cur.close() + + db.commit() + logging.info(f"Database '{config['database']['database']}' and user '{config['database']['username']}' created with proper privileges") + + except mysql.connector.Error as e: + logging.error(f"Error creating database: {e}") + raise + finally: + db.close() if __name__ == "__main__": diff --git a/meshinfo_api.py b/meshinfo_api.py new file mode 100644 index 00000000..2659fe31 --- /dev/null +++ b/meshinfo_api.py @@ -0,0 +1,1318 @@ +from flask import Blueprint, request, jsonify, current_app, abort +from datetime import datetime, timedelta +import logging +import sys +import psutil +from meshinfo_utils import get_meshdata, get_cache_timeout, auth, config, log_cache_stats, log_memory_usage +from meshdata import MeshData +from database_cache import DatabaseCache +from meshinfo_register import Register +import utils +import time + +# Create API blueprint +api = Blueprint('api', __name__, url_prefix='/api') + +def api_auth(): + """Authenticate API request using JWT from cookie.""" + jwt_token = request.cookies.get('jwt') + if not jwt_token: + return None + reg = Register() + decoded_jwt = reg.auth(jwt_token) + return decoded_jwt + +def log_detailed_memory_analysis(): + """Perform detailed memory analysis to identify potential leaks.""" + try: + import gc + gc.collect() + + logging.info("=== DETAILED MEMORY ANALYSIS ===") + + # Check database connections + db_connections = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'mysql' in str(obj.__class__).lower(): + db_connections += 1 + logging.info(f"Database connection objects: {db_connections}") + + # Check cache objects + cache_objects = 0 + cache_size = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'cache' in str(obj.__class__).lower(): + cache_objects += 1 + try: + cache_size += sys.getsizeof(obj) + except: + pass + logging.info(f"Cache objects: {cache_objects} ({cache_size / 1024 / 1024:.1f} MB)") + + # Check for Flask/WSGI objects + flask_objects = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'flask' in str(obj.__class__).lower(): + flask_objects += 1 + logging.info(f"Flask objects: {flask_objects}") + + # Check for template objects + template_objects = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'template' in str(obj.__class__).lower(): + template_objects += 1 + logging.info(f"Template objects: {template_objects}") + + # Check for large dictionaries and lists + large_dicts = [] + large_lists = [] + for obj in gc.get_objects(): + try: + if isinstance(obj, dict) and len(obj) > 1000: + large_dicts.append((len(obj), str(obj)[:50])) + elif isinstance(obj, list) and len(obj) > 1000: + large_lists.append((len(obj), str(obj)[:50])) + except: + pass + + if large_dicts: + logging.info("Large dictionaries:") + for size, repr_str in sorted(large_dicts, reverse=True)[:5]: + logging.info(f" Dict with {size:,} items: {repr_str}") + + if large_lists: + logging.info("Large lists:") + for size, repr_str in sorted(large_lists, reverse=True)[:5]: + logging.info(f" List with {size:,} items: {repr_str}") + + # Check for circular references + circular_refs = gc.collect() + if circular_refs > 0: + logging.warning(f"Found {circular_refs} circular references") + + logging.info("=== END DETAILED ANALYSIS ===") + + except Exception as e: + logging.error(f"Error in detailed memory analysis: {e}") + +def get_cached_nodes(): + """Get nodes data for API endpoints.""" + md = get_meshdata() + if not md: + return None + + # Use the cached method to prevent duplicate dictionaries + nodes_data = md.get_nodes_cached() + logging.debug(f"Fetched {len(nodes_data)} nodes from API cache") + return nodes_data + +@api.route('/metrics') +def get_metrics(): + md = get_meshdata() + if not md: + return jsonify({'error': 'Database connection unavailable'}), 503 + + try: + # Get time range from request parameters + time_range = request.args.get('time_range', 'day') # day, week, month, year, all + channel = request.args.get('channel', 'all') # Get channel parameter + + # Set time range based on parameter + end_time = datetime.now() + if time_range == 'week': + start_time = end_time - timedelta(days=7) + bucket_size = 180 # 3 hours in minutes + elif time_range == 'month': + start_time = end_time - timedelta(days=30) + bucket_size = 720 # 12 hours in minutes + elif time_range == 'year': + start_time = end_time - timedelta(days=365) + bucket_size = 2880 # 2 days in minutes + elif time_range == 'all': + # For 'all', we'll first check the data range in the database + cursor = md.db.cursor(dictionary=True) + cursor.execute("SELECT MIN(ts_created) as min_time FROM telemetry") + min_time = cursor.fetchone()['min_time'] + cursor.close() + + if min_time: + start_time = min_time + else: + # Default to 1 year if no data + start_time = end_time - timedelta(days=365) + + bucket_size = 10080 # 7 days in minutes + else: # default to day + start_time = end_time - timedelta(hours=24) + bucket_size = 30 # 30 minutes + + # Convert timestamps to the correct format for MySQL + start_timestamp = start_time.strftime('%Y-%m-%d %H:%M:%S') + end_timestamp = end_time.strftime('%Y-%m-%d %H:%M:%S') + + # Format string for time buckets based on bucket size + if bucket_size >= 10080: # 7 days or more + time_format = '%Y-%m-%d' # Daily format + elif bucket_size >= 1440: # 1 day or more + time_format = '%Y-%m-%d %H:00' # Hourly format + else: + time_format = '%Y-%m-%d %H:%i' # Minute format + + cursor = md.db.cursor(dictionary=True) + + # First, generate a series of time slots + time_slots_query = f""" + WITH RECURSIVE time_slots AS ( + SELECT DATE_FORMAT( + DATE_ADD(%s, INTERVAL -MOD(MINUTE(%s), {bucket_size}) MINUTE), + '{time_format}' + ) as time_slot + UNION ALL + SELECT DATE_FORMAT( + DATE_ADD( + STR_TO_DATE(time_slot, '{time_format}'), + INTERVAL {bucket_size} MINUTE + ), + '{time_format}' + ) + FROM time_slots + WHERE DATE_ADD( + STR_TO_DATE(time_slot, '{time_format}'), + INTERVAL {bucket_size} MINUTE + ) <= %s + ) + SELECT time_slot FROM time_slots + """ + cursor.execute(time_slots_query, (start_timestamp, start_timestamp, end_timestamp)) + time_slots = [row['time_slot'] for row in cursor.fetchall()] + + # Add channel condition if specified + if channel != 'all': + channel_condition_text = f" AND channel = {channel}" + channel_condition_telemetry = f" AND channel = {channel}" + channel_condition_reception = f" AND EXISTS (SELECT 1 FROM text t WHERE t.message_id = message_reception.message_id AND t.channel = {channel})" + else: + channel_condition_text = "" + channel_condition_telemetry = "" + channel_condition_reception = "" + + # Nodes Online Query + nodes_online_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + COUNT(DISTINCT id) as node_count + FROM telemetry + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_telemetry} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(nodes_online_query, (start_timestamp, end_timestamp)) + nodes_online_data = {row['time_slot']: row['node_count'] for row in cursor.fetchall()} + + # Message Traffic Query + message_traffic_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + COUNT(*) as message_count + FROM text + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_text} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(message_traffic_query, (start_timestamp, end_timestamp)) + message_traffic_data = {row['time_slot']: row['message_count'] for row in cursor.fetchall()} + + # Channel Utilization Query + channel_util_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + AVG(channel_utilization) as avg_util + FROM telemetry + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_telemetry} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(channel_util_query, (start_timestamp, end_timestamp)) + channel_util_data = {row['time_slot']: float(row['avg_util']) if row['avg_util'] is not None else 0.0 for row in cursor.fetchall()} + + # Battery Levels Query + battery_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + AVG(battery_level) as avg_battery + FROM telemetry + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_telemetry} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(battery_query, (start_timestamp, end_timestamp)) + battery_data = {row['time_slot']: float(row['avg_battery']) if row['avg_battery'] is not None else 0.0 for row in cursor.fetchall()} + + # Temperature Query + temperature_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + AVG(temperature) as avg_temp + FROM telemetry + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_telemetry} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(temperature_query, (start_timestamp, end_timestamp)) + temperature_data = {row['time_slot']: float(row['avg_temp']) if row['avg_temp'] is not None else 0.0 for row in cursor.fetchall()} + + # SNR Query + snr_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + AVG(rx_snr) as avg_snr + FROM message_reception + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_reception} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(snr_query, (start_timestamp, end_timestamp)) + snr_data = {row['time_slot']: float(row['avg_snr']) if row['avg_snr'] is not None else 0.0 for row in cursor.fetchall()} + + cursor.close() + + # Moving average helper + def moving_average_centered(data_list, window_minutes, bucket_size_minutes): + # data_list: list of floats (same order as time_slots) + # window_minutes: total window size (e.g., 120 for 2 hours) + # bucket_size_minutes: size of each bucket (e.g., 30 for 30 min) + n = len(data_list) + result = [] + half_window = window_minutes // 2 + buckets_per_window = max(1, window_minutes // bucket_size_minutes) + for i in range(n): + # Centered window: find all indices within window centered at i + center_time = i + window_indices = [] + for j in range(n): + if abs(j - center_time) * bucket_size_minutes <= half_window: + window_indices.append(j) + if window_indices: + avg = sum(data_list[j] for j in window_indices) / len(window_indices) + else: + avg = data_list[i] + result.append(avg) + return result + + # Get metrics_average_interval from config + metrics_avg_interval = int(config.get('server', 'metrics_average_interval', fallback=7200)) # seconds + metrics_avg_minutes = metrics_avg_interval // 60 + + # Prepare raw data lists + nodes_online_raw = [nodes_online_data.get(slot, 0) for slot in time_slots] + channel_util_raw = [channel_util_data.get(slot, 0) for slot in time_slots] + battery_levels_raw = [battery_data.get(slot, 0) for slot in time_slots] + temperature_raw = [temperature_data.get(slot, 0) for slot in time_slots] + snr_raw = [snr_data.get(slot, 0) for slot in time_slots] + + # Get node_activity_prune_threshold from config + node_activity_prune_threshold = int(config.get('server', 'node_activity_prune_threshold', fallback=7200)) + + # For each time slot, count unique nodes heard in the preceding activity window + cursor = md.db.cursor(dictionary=True) + cursor.execute(f""" + SELECT id, ts_created + FROM telemetry + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_telemetry} + ORDER BY ts_created + """, (start_timestamp, end_timestamp)) + all_telemetry = list(cursor.fetchall()) + # Convert ts_created to datetime for easier comparison + for row in all_telemetry: + if isinstance(row['ts_created'], str): + row['ts_created'] = datetime.strptime(row['ts_created'], '%Y-%m-%d %H:%M:%S') + # Precompute for each time slot + nodes_heard_per_slot = [] + for slot in time_slots: + # slot is a string, convert to datetime + if '%H:%M' in time_format or '%H:%i' in time_format: + slot_time = datetime.strptime(slot, '%Y-%m-%d %H:%M') + elif '%H:00' in time_format: + slot_time = datetime.strptime(slot, '%Y-%m-%d %H:%M') + else: + slot_time = datetime.strptime(slot, '%Y-%m-%d') + window_start = slot_time - timedelta(seconds=node_activity_prune_threshold) + # Find all node ids with telemetry in [window_start, slot_time] + active_nodes = set() + for row in all_telemetry: + if window_start < row['ts_created'] <= slot_time: + active_nodes.add(row['id']) + nodes_heard_per_slot.append(len(active_nodes)) + # Now apply moving average and round to nearest integer + nodes_online_smoothed = [round(x) for x in moving_average_centered(nodes_heard_per_slot, metrics_avg_minutes, bucket_size)] + + cursor.close() + + # Apply moving averages to other metrics + channel_util_smoothed = moving_average_centered(channel_util_raw, metrics_avg_minutes, bucket_size) + battery_levels_smoothed = moving_average_centered(battery_levels_raw, metrics_avg_minutes, bucket_size) + temperature_smoothed = moving_average_centered(temperature_raw, metrics_avg_minutes, bucket_size) + snr_smoothed = moving_average_centered(snr_raw, metrics_avg_minutes, bucket_size) + + return jsonify({ + 'nodes_online': { + 'labels': time_slots, + 'data': nodes_online_smoothed + }, + 'message_traffic': { + 'labels': time_slots, + 'data': [message_traffic_data.get(slot, 0) for slot in time_slots] + }, + 'channel_util': { + 'labels': time_slots, + 'data': channel_util_smoothed + }, + 'battery_levels': { + 'labels': time_slots, + 'data': battery_levels_smoothed + }, + 'temperature': { + 'labels': time_slots, + 'data': temperature_smoothed + }, + 'snr': { + 'labels': time_slots, + 'data': snr_smoothed + } + }) + + except Exception as e: + logging.error(f"Error in metrics API: {str(e)}") + return jsonify({'error': f'Error fetching metrics: {str(e)}'}), 500 + + + +@api.route('/chattiest-nodes') +def get_chattiest_nodes(): + """Get the most active nodes in terms of message sending.""" + md = get_meshdata() + if not md: + return jsonify({'error': 'Database connection unavailable'}), 503 + + # Get filter parameters from request + time_frame = request.args.get('time_frame', 'day') # day, week, month, year, all + message_type = request.args.get('message_type', 'all') # all, text, position, telemetry + channel = request.args.get('channel', 'all') # all or specific channel number + + try: + cursor = md.db.cursor(dictionary=True) + + # Build the time frame condition + time_condition = "" + if time_frame == 'year': + time_condition = "WHERE ts_created >= DATE_SUB(NOW(), INTERVAL 1 YEAR)" + elif time_frame == 'month': + time_condition = "WHERE ts_created >= DATE_SUB(NOW(), INTERVAL 1 MONTH)" + elif time_frame == 'week': + time_condition = "WHERE ts_created >= DATE_SUB(NOW(), INTERVAL 7 DAY)" + elif time_frame == 'day': + time_condition = "WHERE ts_created >= DATE_SUB(NOW(), INTERVAL 24 HOUR)" + + # Add channel filter if specified - only for text and telemetry tables which have channel column + channel_condition_text = "" + channel_condition_telemetry = "" + if channel != 'all': + channel_condition_text = f" AND channel = {channel}" + channel_condition_telemetry = f" AND channel = {channel}" + if not time_condition: + channel_condition_text = f"WHERE channel = {channel}" + channel_condition_telemetry = f"WHERE channel = {channel}" + + # Build the message type query based on the selected type + if message_type == 'all': + # For text messages, we need to qualify the columns with table aliases + time_condition_with_prefix = time_condition.replace("WHERE", "WHERE t.").replace(" AND", " AND t.") + channel_condition_text_with_prefix = channel_condition_text.replace("WHERE", "WHERE t.").replace(" AND", " AND t.") + + message_query = ( + "SELECT t.from_id as node_id, t.ts_created, t.channel as channel " + "FROM text t " + + time_condition_with_prefix + + channel_condition_text_with_prefix + " " + "UNION ALL " + "SELECT id as node_id, ts_created, NULL as channel " + "FROM positionlog " + + time_condition + " " + "UNION ALL " + "SELECT id as node_id, ts_created, channel " + "FROM telemetry " + + time_condition + + channel_condition_telemetry + ) + elif message_type == 'text': + message_query = ( + "SELECT from_id as node_id, ts_created, channel " + "FROM text " + + time_condition + + channel_condition_text + ) + elif message_type == 'position': + message_query = ( + "SELECT id as node_id, ts_created, NULL as channel " + "FROM positionlog " + + time_condition + ) + elif message_type == 'telemetry': + message_query = ( + "SELECT id as node_id, ts_created, channel " + "FROM telemetry " + + time_condition + + channel_condition_telemetry + ) + else: + return jsonify({ + 'error': f'Invalid message type: {message_type}' + }), 400 + + # Query to get the top 20 nodes by message count, including node names and role + query = """ + WITH messages AS ({message_query}) + SELECT + m.node_id as from_id, + n.long_name, + n.short_name, + n.role, + COUNT(*) as message_count, + COUNT(DISTINCT DATE_FORMAT(m.ts_created, '%Y-%m-%d')) as active_days, + MIN(m.ts_created) as first_message, + MAX(m.ts_created) as last_message, + CASE + WHEN '{channel}' != 'all' THEN '{channel}' + ELSE GROUP_CONCAT(DISTINCT NULLIF(CAST(m.channel AS CHAR), 'NULL')) + END as channels, + CASE + WHEN '{channel}' != 'all' THEN 1 + ELSE COUNT(DISTINCT NULLIF(m.channel, 'NULL')) + END as channel_count + FROM + messages m + LEFT JOIN + nodeinfo n ON m.node_id = n.id + GROUP BY + m.node_id, n.long_name, n.short_name, n.role + ORDER BY + message_count DESC + LIMIT 20 + """.format(message_query=message_query, channel=channel) + + cursor.execute(query) + results = cursor.fetchall() + + # Process the results to format them for the frontend + chattiest_nodes = [] + for row in results: + # Convert node ID to hex format + node_id_hex = utils.convert_node_id_from_int_to_hex(row['from_id']) + + # Parse channels string into a list of channel objects + channels_str = row['channels'] + channels = [] + if channels_str: + # If we're filtering by channel, just use that channel + if channel != 'all': + channels.append({ + 'id': int(channel), + 'name': utils.get_channel_name(int(channel)), + 'color': utils.get_channel_color(int(channel)) + }) + else: + # Otherwise process the concatenated list of channels + channel_ids = [ch_id for ch_id in channels_str.split(',') if ch_id and ch_id != 'NULL'] + for ch_id in channel_ids: + try: + ch_id_int = int(ch_id) + channels.append({ + 'id': ch_id_int, + 'name': utils.get_channel_name(ch_id_int), + 'color': utils.get_channel_color(ch_id_int) + }) + except (ValueError, TypeError): + continue + + # Create node object + node = { + 'node_id': row['from_id'], + 'node_id_hex': node_id_hex, + 'long_name': row['long_name'] or f"Node {row['from_id']}", # Fallback if no long name + 'short_name': row['short_name'] or f"Node {row['from_id']}", # Fallback if no short name + 'role': utils.get_role_name(row['role']), # Convert role number to name + 'message_count': row['message_count'], + 'active_days': row['active_days'], + 'first_message': row['first_message'].isoformat() if row['first_message'] else None, + 'last_message': row['last_message'].isoformat() if row['last_message'] else None, + 'channels': channels, + 'channel_count': row['channel_count'] + } + chattiest_nodes.append(node) + + return jsonify({ + 'chattiest_nodes': chattiest_nodes + }) + + except Exception as e: + logging.error(f"Error fetching chattiest nodes: {str(e)}") + return jsonify({ + 'error': f'Error fetching chattiest nodes: {str(e)}' + }), 500 + finally: + if cursor: + cursor.close() + +@api.route('/telemetry/') +def api_telemetry(node_id): + md = get_meshdata() + if not md: + return jsonify({'error': 'Database connection unavailable'}), 503 + + telemetry = md.get_telemetry_for_node(node_id) + return jsonify(telemetry) + +@api.route('/environmental-telemetry/') +def api_environmental_telemetry(node_id): + md = get_meshdata() + if not md: + return jsonify({'error': 'Database connection unavailable'}), 503 + + days = request.args.get('days', 1, type=int) + # Limit days to reasonable range (1-30 days) + days = max(1, min(30, days)) + telemetry = md.get_environmental_telemetry_for_node(node_id, days) + return jsonify(telemetry) + +@api.route('/debug/memory') +def debug_memory(): + """Manual trigger for detailed memory analysis.""" + if not auth(): + abort(401) + + log_memory_usage(force=True) + log_detailed_memory_analysis() + + return jsonify({ + 'status': 'success', + 'message': 'Memory analysis completed. Check logs for details.' + }) + +@api.route('/debug/cache') +def debug_cache(): + """Manual trigger for cache analysis.""" + if not auth(): + abort(401) + + log_cache_stats() + + return jsonify({ + 'status': 'success', + 'message': 'Cache analysis completed. Check logs for details.' + }) + +@api.route('/debug/cleanup') +def debug_cleanup(): + """Manual trigger for cache cleanup.""" + if not auth(): + abort(401) + + try: + # Check database privileges first + config = configparser.ConfigParser() + config.read('config.ini') + db_cache = DatabaseCache(config) + privileges = db_cache.check_privileges() + + # Perform cleanup operations + cleanup_cache() + + # Also clear nodes cache and force garbage collection + clear_nodes_cache() + clear_database_cache() + gc.collect() + + # Prepare response message + if privileges['reload']: + message = 'Cache cleanup completed successfully. Database query cache cleared.' + else: + message = 'Cache cleanup completed. Note: Database query cache could not be cleared due to insufficient privileges (RELOAD required).' + + return jsonify({ + 'status': 'success', + 'message': message, + 'database_privileges': privileges + }) + + except Exception as e: + logging.error(f"Error during debug cleanup: {e}") + return jsonify({ + 'status': 'error', + 'message': f'Error during cache cleanup: {str(e)}' + }), 500 + +@api.route('/debug/clear-nodes') +def debug_clear_nodes(): + """Manual trigger to clear nodes cache.""" + if not auth(): + abort(401) + + clear_nodes_cache() + clear_database_cache() + gc.collect() + + return jsonify({ + 'status': 'success', + 'message': 'Nodes cache cleared. Check logs for details.' + }) + +@api.route('/debug/database-cache') +def debug_database_cache(): + """Manual trigger for database cache analysis.""" + if not auth(): + abort(401) + + try: + # Check database privileges + config = configparser.ConfigParser() + config.read('config.ini') + db_cache = DatabaseCache(config) + privileges = db_cache.check_privileges() + + md = get_meshdata() + if md and hasattr(md, 'db_cache'): + stats = md.db_cache.get_cache_stats() + + # Get application cache info + app_cache_info = {} + if hasattr(md, '_nodes_cache'): + app_cache_info = { + 'cache_entries': len(md._nodes_cache), + 'cache_keys': list(md._nodes_cache.keys()), + 'cache_timestamps': {k: v['timestamp'] for k, v in md._nodes_cache.items()} + } + + return jsonify({ + 'status': 'success', + 'database_cache_stats': stats, + 'application_cache_info': app_cache_info, + 'database_privileges': privileges + }) + else: + return jsonify({ + 'status': 'error', + 'message': 'Database cache not available' + }), 500 + + except Exception as e: + logging.error(f"Error during database cache analysis: {e}") + return jsonify({ + 'status': 'error', + 'message': f'Error during database cache analysis: {str(e)}' + }), 500 + +@api.route('/geocode') +def api_geocode(): + """API endpoint for reverse geocoding to avoid CORS issues.""" + try: + lat = request.args.get('lat', type=float) + lon = request.args.get('lon', type=float) + + if lat is None or lon is None: + return jsonify({'error': 'Missing lat or lon parameters'}), 400 + + # Use the existing geocoding function from utils + geocoded = utils.geocode_position( + config.get('geocoding', 'apikey', fallback=''), + lat, + lon + ) + + if geocoded: + return jsonify(geocoded) + else: + return jsonify({'error': 'Geocoding failed'}), 500 + + except Exception as e: + logging.error(f"Geocoding error: {e}") + return jsonify({'error': 'Internal server error'}), 500 + +def get_node_positions_batch(node_ids): + """Get position data for multiple nodes efficiently.""" + nodes = get_cached_nodes() + if not nodes: + return {} + + positions = {} + for node_id in node_ids: + if node_id in nodes: + node = nodes[node_id] + if node.get('position') and node['position'].get('latitude') and node['position'].get('longitude'): + positions[node_id] = { + 'latitude': node['position']['latitude'], + 'longitude': node['position']['longitude'] + } + + return positions + +@api.route('/node-positions') +def api_node_positions(): + """API endpoint to get position data for specific nodes for client-side distance calculations.""" + try: + # Get list of node IDs from query parameter + node_ids = request.args.get('nodes', '').split(',') + node_ids = [nid.strip() for nid in node_ids if nid.strip()] + + if not node_ids: + return jsonify({'positions': {}}) + + # Use the cached batch function + positions = get_node_positions_batch(tuple(node_ids)) # Convert to tuple for caching + + return jsonify({'positions': positions}) + + except Exception as e: + logging.error(f"Error fetching node positions: {e}") + return jsonify({'error': 'Internal server error'}), 500 + +@api.route('/utilization-data') +def get_utilization_data(): + md = get_meshdata() + if not md: + return jsonify({'error': 'Database connection unavailable'}), 503 + + try: + # Get parameters from request + time_range = request.args.get('time_range', '24') # hours + channel = request.args.get('channel', 'all') + + # Calculate time window + hours = int(time_range) + cutoff_time = datetime.now() - timedelta(hours=hours) + + cursor = md.db.cursor(dictionary=True) + + # Build channel condition + channel_condition = "" + if channel != 'all': + channel_condition = f" AND channel = {channel}" + + # Get active nodes from cache (much faster than complex DB queries) + nodes = get_cached_nodes() + if not nodes: + return jsonify({'error': 'No node data available'}), 503 + + # Get most recent telemetry for active nodes only + sql = f""" + SELECT + t.id, + t.channel_utilization, + t.ts_created + FROM telemetry t + WHERE t.ts_created >= NOW() - INTERVAL {hours} HOUR + AND t.channel_utilization IS NOT NULL + AND t.channel_utilization > 0 + {channel_condition} + ORDER BY t.id, t.ts_created DESC + """ + + cursor.execute(sql) + telemetry_rows = cursor.fetchall() + + # Get only the most recent utilization per node + node_utilization = {} + for row in telemetry_rows: + node_id = row['id'] + if node_id not in node_utilization: + node_utilization[node_id] = { + 'utilization': row['channel_utilization'], + 'ts_created': row['ts_created'] + } + + # Get contact data for active nodes in one efficient query + active_node_ids = list(node_utilization.keys()) + contact_data = {} + + if active_node_ids: + # Use placeholders for the IN clause + placeholders = ','.join(['%s'] * len(active_node_ids)) + contact_sql = f""" + SELECT + from_id, + received_by_id, + p1.latitude_i as from_lat_i, + p1.longitude_i as from_lon_i, + p2.latitude_i as to_lat_i, + p2.longitude_i as to_lon_i + FROM message_reception r + LEFT JOIN position p1 ON p1.id = r.from_id + LEFT JOIN position p2 ON p2.id = r.received_by_id + WHERE (r.hop_limit IS NULL AND r.hop_start IS NULL) + OR (r.hop_start - r.hop_limit = 0) + AND r.rx_time >= NOW() - INTERVAL {hours} HOUR + AND r.from_id IN ({placeholders}) + AND p1.latitude_i IS NOT NULL + AND p1.longitude_i IS NOT NULL + AND p2.latitude_i IS NOT NULL + AND p2.longitude_i IS NOT NULL + """ + + cursor.execute(contact_sql, active_node_ids) + contact_rows = cursor.fetchall() + + # Build contact distance lookup + for row in contact_rows: + from_id = row['from_id'] + to_id = row['received_by_id'] + + # Check for null coordinates before calculating distance + if (row['from_lat_i'] is None or row['from_lon_i'] is None or + row['to_lat_i'] is None or row['to_lon_i'] is None): + continue + + # Calculate distance using Haversine formula + lat1 = row['from_lat_i'] / 10000000.0 + lon1 = row['from_lon_i'] / 10000000.0 + lat2 = row['to_lat_i'] / 10000000.0 + lon2 = row['to_lon_i'] / 10000000.0 + + # Haversine distance calculation + import math + R = 6371 # Earth's radius in km + dlat = math.radians(lat2 - lat1) + dlon = math.radians(lon2 - lon1) + a = (math.sin(dlat/2) * math.sin(dlat/2) + + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * + math.sin(dlon/2) * math.sin(dlon/2)) + c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a)) + distance = R * c + + # Sanity check: skip distances over 150km + if distance > 150: + continue + + # Store contact data + if from_id not in contact_data: + contact_data[from_id] = {'distances': [], 'contacts': set()} + contact_data[from_id]['distances'].append(distance) + contact_data[from_id]['contacts'].add(to_id) + + # Build result using cached node data + result = [] + for node_id, telemetry_data in node_utilization.items(): + node_hex = utils.convert_node_id_from_int_to_hex(node_id) + node_data = nodes.get(node_hex) + + if node_data and node_data.get('position'): + position = node_data['position'] + if position and position.get('latitude_i') and position.get('longitude_i'): + # Calculate contact distance + node_contacts = contact_data.get(node_id, {'distances': [], 'contacts': set()}) + mean_distance = 2.0 # Default + contact_count = len(node_contacts['contacts']) + + if node_contacts['distances']: + mean_distance = sum(node_contacts['distances']) / len(node_contacts['distances']) + mean_distance = max(2.0, mean_distance) # Minimum 2km + + # Use cached node data for position and names + result.append({ + 'id': node_id, + 'utilization': round(telemetry_data['utilization'], 2), + 'position': { + 'latitude_i': position['latitude_i'], + 'longitude_i': position['longitude_i'], + 'altitude': position.get('altitude') + }, + 'short_name': node_data.get('short_name', ''), + 'long_name': node_data.get('long_name', ''), + 'mean_contact_distance': round(mean_distance, 2), + 'contact_count': contact_count + }) + + cursor.close() + + return jsonify({ + 'nodes': result, + 'time_range': time_range, + 'channel': channel + }) + + except Exception as e: + logging.error(f"Error fetching utilization data: {str(e)}", exc_info=True) + return jsonify({ + 'error': f'Error fetching utilization data: {str(e)}' + }), 500 + +@api.route('/hardware-models') +def get_hardware_models(): + """Get hardware model statistics.""" + md = get_meshdata() + if not md: + return jsonify({'error': 'Database connection unavailable'}), 503 + + # Query hardware model data + cursor = md.db.cursor(dictionary=True) + cursor.execute(""" + SELECT + hw_model, + COUNT(*) as count, + COUNT(DISTINCT id) as unique_nodes + FROM nodeinfo + WHERE hw_model IS NOT NULL + GROUP BY hw_model + ORDER BY count DESC + """) + hw_models = cursor.fetchall() + + # Process results and get hardware model names + hardware_stats = [] + for row in hw_models: + hw_model_id = row['hw_model'] + # Use lazy import to avoid circular references + import meshtastic_support + hw_model_name = meshtastic_support.get_hardware_model_name(hw_model_id) + + hardware_stats.append({ + 'model_id': hw_model_id, + 'model_name': hw_model_name or f"Unknown Model {hw_model_id}", + 'node_count': row['count'], + 'unique_nodes': row['unique_nodes'] + }) + + # Get top 15 most common + most_common = hardware_stats[:15] + + # Get bottom 15 least common (but only if we have more than 15 total models) + least_common = hardware_stats[-15:] if len(hardware_stats) > 15 else hardware_stats + least_common = sorted(least_common, key=lambda x: x['node_count']) # Sort by node_count + + cursor.close() + return jsonify({ + 'most_common': most_common, + 'least_common': least_common, + 'total_models': len(hardware_stats) + }) + +@api.route('/map-data') +def get_map_data(): + """Get map data with filtering options for better performance.""" + md = get_meshdata() + if not md: + return jsonify({'error': 'Database connection unavailable'}), 503 + + try: + # Get filter parameters from request + nodes_max_age = request.args.get('nodes_max_age', '0', type=int) # seconds, 0 = show all + nodes_disconnected_age = request.args.get('nodes_disconnected_age', '10800', type=int) # seconds + nodes_offline_age = request.args.get('nodes_offline_age', '10800', type=int) # seconds + channel_filter = request.args.get('channel_filter', 'all') # all or specific channel + neighbours_max_distance = request.args.get('neighbours_max_distance', '5000', type=int) # meters + + cursor = md.db.cursor(dictionary=True) + now = int(time.time()) + + # Build WHERE conditions for filtering nodes at database level + where_conditions = [] + params = [] + + # Apply max age filter at database level + if nodes_max_age > 0: + cutoff_time = now - nodes_max_age + where_conditions.append("n.ts_seen >= FROM_UNIXTIME(%s)") + params.append(cutoff_time) + + # Channel filter will be applied after the query since it comes from CTE + + # Build the main query with filters + where_clause = " AND ".join(where_conditions) if where_conditions else "1=1" + + # Debug logging + logging.info(f"Map API filters: nodes_max_age={nodes_max_age}, channel_filter={channel_filter}") + logging.info(f"WHERE clause: {where_clause}") + logging.info(f"Parameters: {params}") + + # Use the existing cached nodes function for better performance + all_nodes = md.get_nodes_cached() + if not all_nodes: + return jsonify({'error': 'Failed to load nodes data'}), 503 + + # Filter nodes based on criteria + filtered_nodes = {} + node_ids = [] + + for node_id_hex, node_data in all_nodes.items(): + # Apply max age filter + if nodes_max_age > 0: + ts_seen = node_data.get('ts_seen') + if ts_seen: + if hasattr(ts_seen, 'timestamp'): + ts_seen = ts_seen.timestamp() + if now - ts_seen > nodes_max_age: + continue + + # Apply channel filter + if channel_filter != 'all': + node_channel = node_data.get('channel') + if node_channel != int(channel_filter): + continue + + # Convert hex ID to int for zero-hop data + try: + node_id_int = utils.convert_node_id_from_hex_to_int(node_id_hex) + node_ids.append(node_id_int) + except: + continue + + # Create filtered node data + ts_seen = node_data.get('ts_seen') + if hasattr(ts_seen, 'timestamp'): + ts_seen = ts_seen.timestamp() + + ts_uplink = node_data.get('ts_uplink') + if hasattr(ts_uplink, 'timestamp'): + ts_uplink = ts_uplink.timestamp() + + # Check if node should be shown as offline + show_as_offline = False + if nodes_offline_age != 'never' and ts_seen: + if now - ts_seen > nodes_offline_age: + show_as_offline = True + + # Calculate active status + active_threshold = int(config.get('server', 'node_activity_prune_threshold', fallback=7200)) + is_active = ts_seen and (now - ts_seen) <= active_threshold + + filtered_node_data = { + 'id': node_id_hex, + 'short_name': node_data.get('short_name', ''), + 'long_name': node_data.get('long_name', ''), + 'last_seen': ts_seen, + 'ts_uplink': ts_uplink, + 'online': is_active, + 'channel': node_data.get('channel'), + 'channel_name': utils.get_channel_name(node_data.get('channel')) if node_data.get('channel') else 'Unknown', + 'has_default_channel': node_data.get('has_default_channel'), + 'num_online_local_nodes': node_data.get('num_online_local_nodes'), + 'region': node_data.get('region'), + 'modem_preset': node_data.get('modem_preset'), + 'show_as_offline': show_as_offline, + 'zero_hop_data': {'heard': [], 'heard_by': []}, + 'neighbors': [] + } + + # Add position if available + position = node_data.get('position') + if position and isinstance(position, dict): + latitude = position.get('latitude') + longitude = position.get('longitude') + if latitude is not None and longitude is not None: + filtered_node_data['position'] = [longitude, latitude] + else: + filtered_node_data['position'] = None + else: + filtered_node_data['position'] = None + + filtered_nodes[node_id_hex] = filtered_node_data + + logging.info(f"Map API filtered to {len(filtered_nodes)} nodes") + + # Node processing is now done above in the filtering loop + + # Use existing functions to get zero-hop and neighbor data + zero_hop_timeout = int(config.get('server', 'zero_hop_timeout', fallback=43200)) + cutoff_time = now - zero_hop_timeout + + # Get zero-hop data using existing function + zero_hop_links, zero_hop_last_heard = md.get_zero_hop_links(cutoff_time) + + # Get neighbor info data using existing function + neighbor_info_links = md.get_neighbor_info_links(days=1) + + # Add zero-hop data to filtered nodes + for node_id_int in node_ids: + node_id_hex = utils.convert_node_id_from_int_to_hex(node_id_int) + if node_id_hex in filtered_nodes: + # Add zero-hop heard data + if node_id_int in zero_hop_links: + for neighbor_id_int, link_data in zero_hop_links[node_id_int]['heard'].items(): + neighbor_id_hex = utils.convert_node_id_from_int_to_hex(neighbor_id_int) + + # Convert last_heard to timestamp if it's a datetime object + last_heard_timestamp = link_data.get('last_heard', now) + if hasattr(last_heard_timestamp, 'timestamp'): + last_heard_timestamp = last_heard_timestamp.timestamp() + + zero_hop_data = { + 'node_id': neighbor_id_hex, + 'count': link_data.get('message_count', 1), + 'best_snr': link_data.get('snr'), + 'avg_snr': link_data.get('snr'), # Use same value for avg + 'last_rx_time': last_heard_timestamp + } + filtered_nodes[node_id_hex]['zero_hop_data']['heard'].append(zero_hop_data) + + # Add zero-hop heard_by data + for neighbor_id_int, link_data in zero_hop_links[node_id_int]['heard_by'].items(): + neighbor_id_hex = utils.convert_node_id_from_int_to_hex(neighbor_id_int) + + # Convert last_heard to timestamp if it's a datetime object + last_heard_timestamp = link_data.get('last_heard', now) + if hasattr(last_heard_timestamp, 'timestamp'): + last_heard_timestamp = last_heard_timestamp.timestamp() + + zero_hop_data = { + 'node_id': neighbor_id_hex, + 'count': link_data.get('message_count', 1), + 'best_snr': link_data.get('snr'), + 'avg_snr': link_data.get('snr'), # Use same value for avg + 'last_rx_time': last_heard_timestamp + } + filtered_nodes[node_id_hex]['zero_hop_data']['heard_by'].append(zero_hop_data) + + # Add neighbor info data + if node_id_int in neighbor_info_links: + for neighbor_id_int, link_data in neighbor_info_links[node_id_int]['heard'].items(): + neighbor_id_hex = utils.convert_node_id_from_int_to_hex(neighbor_id_int) + neighbor_data = { + 'id': neighbor_id_hex, + 'snr': link_data.get('snr'), + 'distance': link_data.get('distance') + } + filtered_nodes[node_id_hex]['neighbors'].append(neighbor_data) + + cursor.close() + + response = jsonify({ + 'nodes': filtered_nodes, + 'filters': { + 'nodes_max_age': nodes_max_age, + 'nodes_disconnected_age': nodes_disconnected_age, + 'nodes_offline_age': nodes_offline_age, + 'channel_filter': channel_filter, + 'neighbours_max_distance': neighbours_max_distance + }, + 'timestamp': now, + 'node_count': len(filtered_nodes) + }) + + # Add cache headers for better performance + response.headers['Cache-Control'] = 'public, max-age=60' + return response + + except Exception as e: + logging.error(f"Error fetching map data: {str(e)}", exc_info=True) + return jsonify({ + 'error': f'Error fetching map data: {str(e)}' + }), 500 + +@api.route('/account/change-password', methods=['POST']) +def api_change_password(): + """API endpoint to change user password.""" + user = api_auth() + if not user: + abort(401) + + data = request.get_json() + if not data: + return jsonify({'error': 'Invalid request data'}), 400 + + old_password = data.get('old_password') + new_password = data.get('new_password') + confirm_password = data.get('confirm_password') + + if not old_password or not new_password or not confirm_password: + return jsonify({'error': 'All password fields are required'}), 400 + + if new_password != confirm_password: + return jsonify({'error': 'New passwords do not match'}), 400 + + reg = Register() + result = reg.change_password(user['email'], old_password, new_password) + + if 'error' in result: + return jsonify(result), 400 + + return jsonify(result), 200 + +@api.route('/account/unlink-node', methods=['POST']) +def api_unlink_node(): + """API endpoint to unlink a node from user account.""" + user = api_auth() + if not user: + abort(401) + + data = request.get_json() + if not data: + return jsonify({'error': 'Invalid request data'}), 400 + + node_id = data.get('node_id') + if not node_id: + return jsonify({'error': 'Node ID is required'}), 400 + + reg = Register() + result = reg.unlink_node(user['email'], node_id) + + if 'error' in result: + return jsonify(result), 400 + + # Clear all relevant caches to ensure fresh data on next request + try: + from flask import current_app + from flask_caching import Cache + # Get the cache instance and delete the cached nodes + cache = current_app.extensions.get('cache') + if cache: + # Clear the memoized cache for nodes + cache.delete_memoized('get_cached_nodes') + # Also clear the database-level cache in MeshData + from meshdata import MeshData + md = MeshData() + if md: + md.clear_nodes_cache() + except Exception as e: + logging.warning(f"Could not clear nodes cache: {e}") + pass # Don't fail if cache clearing fails + + # Add no-cache headers to the response + response = jsonify(result) + response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate' + response.headers['Pragma'] = 'no-cache' + response.headers['Expires'] = '0' + return response, 200 \ No newline at end of file diff --git a/meshinfo_los_profile.py b/meshinfo_los_profile.py index ffc2c8c6..017e75f3 100644 --- a/meshinfo_los_profile.py +++ b/meshinfo_los_profile.py @@ -3,20 +3,30 @@ import os import rasterio import numpy as np +import matplotlib +import logging import matplotlib.pyplot as plt +from matplotlib.font_manager import FontProperties from scipy.spatial import distance from geopy.distance import geodesic import logging import time import io import base64 +import pandas as pd +import atexit class LOSProfile(): - def __init__(self, nodes={}, node=None): + def __init__(self, nodes={}, node=None, config=None, cache=None): self.nodes = nodes self.node = node self.datasets = [] + self.max_distance = int(config['los']['max_distance']) if config and 'los' in config else 5000 # Default to 5000 if not set + self.cache_duration = int(config['los']['cache_duration']) if config and 'los' in config else 43200 # Default to 43200 if not set + + self.cache = cache # Store the cache object + directory = "srtm_data" try: for filename in os.listdir(directory): @@ -25,8 +35,16 @@ def __init__(self, nodes={}, node=None): dataset = rasterio.open(filepath) self.datasets.append(dataset) except FileNotFoundError as e: - logging.warning("No SRTM data") + logging.warning("No SRTM data found in directory: %s", directory) pass + atexit.register(self.close_datasets) + + def close_datasets(self): + """Close all open rasterio datasets.""" + for ds in self.datasets: + if not ds.closed: + ds.close() + self.datasets = [] def calculate_distance_between_coords(self, coord1, coord2): lat1, lon1 = coord1 @@ -66,19 +84,55 @@ def read_elevation_from_tifb(self, lat, lon): def read_elevation_from_tif(self, lat, lon): """ - Reads elevation data from SRTM .tif files in the given directory for a - specific coordinate. + Reads elevation data from preloaded .tif files for a specific + coordinate using efficient windowed reading. """ for dataset in self.datasets: + # Check if the coordinate is within the bounds of this dataset if dataset.bounds.left <= lon <= dataset.bounds.right \ and dataset.bounds.bottom <= lat <= dataset.bounds.top: - row, col = dataset.index(lon, lat) - elevation = dataset.read(1)[row, col] - return elevation - logging.warning( - f"No elevation data found for coordinates ({lat}, {lon})" - ) - return [] + try: + # Get the row and column index for the coordinate + row, col = dataset.index(lon, lat) + + # Read only the required pixel using a window + # Ensure indices are within dataset dimensions + if 0 <= row < dataset.height and 0 <= col < dataset.width: + # Read a 1x1 window at the specified row, col + elevation = dataset.read( + 1, + window=((row, row + 1), (col, col + 1)) + )[0, 0] # Extract the single value + + # Check for NoData values (important!) + # Use dataset.nodatavals tuple if multiple bands exist, otherwise dataset.nodata + nodata_val = dataset.nodata + # Handle potential float nodata comparison issues + if nodata_val is not None and np.isclose(float(elevation), float(nodata_val)): + logging.warning( + f"NoData value found at ({lat}, {lon}) in {dataset.name}" + ) + return None # Return None or a specific indicator for NoData + elif np.isnan(elevation): + logging.warning( + f"NaN value found at ({lat}, {lon}) in {dataset.name}" + ) + return None # Return None or indicator for NaN + + return elevation # Return the valid elevation + else: + logging.warning(f"Calculated index ({row}, {col}) out of bounds for dataset {dataset.name}") + return None # Index out of bounds + + except Exception as e: + logging.error(f"Error reading elevation for ({lat}, {lon}) from {dataset.name}: {e}") + return None # Error during read + + # If coordinate wasn't found in any dataset bounds + # logging.debug( # Changed to debug as this can be noisy + # f"Coordinate ({lat}, {lon}) not within bounds of any loaded dataset." + # ) + return None # Coordinate not found in any dataset def generate_los_profile(self, coord1, coord2, resolution=100): """ @@ -94,9 +148,24 @@ def generate_los_profile(self, coord1, coord2, resolution=100): longitudes = np.linspace(lon1, lon2, resolution) elevations = [] + valid_points = 0 for lat, lon in zip(latitudes, longitudes): elevation = self.read_elevation_from_tif(lat, lon) - elevations.append(elevation) + # Handle cases where elevation might be None (NoData, NaN, out of bounds) + if elevation is not None: + elevations.append(elevation) + valid_points += 1 + else: + # Decide how to handle missing points: + # Option 1: Append a placeholder like NaN (if plotting handles it) + elevations.append(np.nan) + # Option 2: Skip the point (distances array would need adjustment) + # Option 3: Interpolate (more complex) + + # Check if enough valid points were found + if valid_points < 2: # Need at least start and end for a line + logging.warning(f"Could not retrieve enough elevation points between {coord1[:2]} and {coord2[:2]}. Skipping profile.") + return None, None # Indicate failure # Compute accurate geodesic distances from start point distances = [ @@ -104,68 +173,96 @@ def generate_los_profile(self, coord1, coord2, resolution=100): zip(latitudes, longitudes) ] - profile = [elevation for elevation in elevations] + profile = [elev if not np.isnan(elev) else 0 for elev in elevations] # Replace NaN with 0 for now, adjust if needed # Add altitude to elevation for the final profile - if alt1: - profile[0] = alt1 + # Ensure profile has elements before accessing indices + if profile: + # Use provided altitude if available, otherwise use terrain elevation + profile[0] = alt1 if alt1 is not None else (profile[0] if not np.isnan(profile[0]) else 0) + profile[-1] = alt2 if alt2 is not None else (profile[-1] if not np.isnan(profile[-1]) else 0) + + # Simple linear interpolation for NaN values in the middle + profile_series = pd.Series(profile) + profile_series.interpolate(method='linear', inplace=True) + profile = profile_series.tolist() - if alt2: - profile[-1] = alt2 return distances, profile def plot_los_profile(self, distances, profile, label): - """ - Plots the line-of-sight profile (including altitude) as a solid graph - and saves it as a PNG image. - Additionally, plots a direct straight-line path for comparison. - """ - plt.figure(figsize=(10, 6)) - plt.gca().set_facecolor("cyan") + # Create a unique cache key based on the input parameters + cache_key = f"los_profile_{label}_{hash(tuple(distances))}_{hash(tuple(profile))}" + + # Check if the image is already cached + cached_image = self.cache.get(cache_key) # Use self.cache + if cached_image: + return cached_image # Return the cached image if it exists + + # --- Font Setup --- + # Attempt to load Symbola font + try: + symbola_font_path = '/usr/share/fonts/truetype/ancient-scripts/Symbola_hint.ttf' + symbol_font = FontProperties(fname=symbola_font_path) + except FileNotFoundError: + logging.warning(f"Could not find font file at specified path: {symbola_font_path}. Symbols/Emojis may not render correctly.") + symbol_font = None # Fallback to default + except ValueError: + logging.warning("Error loading font from specified path, even if found. Symbols/Emojis may not render correctly.") + symbol_font = None # Fallback to default + + fig = plt.figure(figsize=(10, 6)) + ax = fig.gca() + ax.set_facecolor("cyan") plt.margins(x=0, y=0, tight=True) - # Direct line (interpolated between start and end altitudes) - direct_line = np.linspace(profile[0], profile[-1], len(profile)) + # Check if profile has valid data + if not profile or len(profile) < 2: + logging.warning(f"Profile data is invalid or too short for plotting label: {label}") + plt.close(fig) + return None + + start_alt = profile[0] + end_alt = profile[-1] + + if np.isnan(start_alt) or np.isnan(end_alt): + logging.warning(f"Cannot plot direct LOS line due to NaN start/end altitude for label: {label}") + direct_line = np.full(len(profile), np.nan) + else: + direct_line = np.linspace(start_alt, end_alt, len(profile)) # Plot the terrain profile - plt.fill_between( - distances, - profile, - color="brown", - alpha=1.0, - label="Terrain Profile" - ) - plt.plot( - distances, - profile, - color="brown", - label="Profile Outline" - ) + if not np.isnan(profile).all(): + plt.fill_between(distances, profile, color="brown", alpha=1.0, label="Terrain Profile") + plt.plot(distances, profile, color="brown", label="Profile Outline") + else: + logging.warning(f"Terrain profile contains only NaN values for label: {label}") # Plot the direct LOS line - plt.plot( - distances, - direct_line, - color="green", - linestyle="dashed", - linewidth=2, - label="Direct LOS Line" - ) + if not np.isnan(direct_line).all(): + plt.plot(distances, direct_line, color="green", linestyle="dashed", linewidth=2, label="Direct LOS Line") - plt.xlabel("Distance (meters)") - plt.ylabel("Elevation + Altitude (meters)") - plt.title(f"{label}") + # --- Apply FontProperties (using symbol_font) --- + plt.xlabel("Distance (meters)", fontproperties=symbol_font) + plt.ylabel("Elevation + Altitude (meters)", fontproperties=symbol_font) + plt.title(f"{label}", fontproperties=symbol_font) + plt.legend(prop=symbol_font) - # Save the plot to a BytesIO buffer buffer = io.BytesIO() - plt.savefig(buffer, format="png", bbox_inches="tight") + try: + plt.savefig(buffer, format="png", bbox_inches="tight") + buffer.seek(0) + img_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8") - # Encode image to Base64 - buffer.seek(0) - img_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8") + # Cache the rendered image for 12 hours (43,200 seconds) + self.cache.set(cache_key, img_base64, timeout=self.cache_duration) - # Print Base64 output (or return it from a function) - return img_base64 + except Exception as e: + logging.error(f"Error saving plot to buffer for label {label}: {e}") + img_base64 = None + finally: + plt.close(fig) + return img_base64 + def get_profiles(self): profiles = {} hexid = utils.convert_node_id_from_int_to_hex(self.node) @@ -197,35 +294,50 @@ def get_profiles(self): (lat, lon) ) - if (dist and dist < 20000): + if (dist and dist < self.max_distance): coord1 = (mylat, mylon, myalt) coord2 = (lat, lon, alt) - output_path = f"altitude_{c}.png" + # output_path = f"altitude_{c}.png" # Not used, can remove c += 1 lname1 = mynode["long_name"] sname1 = mynode["short_name"] lname2 = node["long_name"] sname2 = node["short_name"] + # --- Ensure original label is used --- label = f"{lname1} ({sname1}) <=> {lname2} ({sname2})" + # ------------------------------------ distances, profile = self.generate_los_profile( coord1, coord2 ) - image = self.plot_los_profile( - distances, - profile, - label - ) - profiles[node_id] = { - "image": image, - "distance": dist - } + # Check if profile generation was successful + if distances is not None and profile is not None: + # Pass the original label with emojis + image = self.plot_los_profile( + distances, + profile, + label + ) + # Check if plotting was successful + if image is not None: + profiles[node_id] = { + "image": image, + "distance": dist + } + else: + logging.warning(f"Failed to generate plot for profile: {label}") + else: + logging.warning(f"Failed to generate profile data between {coord1[:2]} and {coord2[:2]}") + except KeyError as e: pass + # logging.warning(f"Missing key 'position' or coordinates for node {node_id} or {hexid}: {e}") except TypeError as e: pass return profiles + def __del__(self): + self.close_datasets() if __name__ == "__main__": from meshdata import MeshData diff --git a/meshinfo_mqtt.py b/meshinfo_mqtt.py index 151d2b53..c8b7a56d 100644 --- a/meshinfo_mqtt.py +++ b/meshinfo_mqtt.py @@ -1,6 +1,7 @@ import logging from paho.mqtt import client as mqtt_client from process_payload import process_payload +from meshdata import MeshData # Import MeshData import configparser import time @@ -9,53 +10,110 @@ config.read("config.ini") logger = logging.getLogger(__name__) +try: + mesh_data_instance = MeshData() + logger.info("MeshData instance created successfully.") +except Exception as e: + logger.error(f"Fatal error: Could not initialize MeshData. Exiting. Error: {e}") + exit(1) # Exit if we can't connect to the DB def connect_mqtt() -> mqtt_client: def on_connect(client, userdata, flags, rc): if rc == 0: - logger.info("Connected to MQTT Broker!") + logger.info("Connected to MQTT Broker! Return Code: %d", rc) + # --- Add log before calling subscribe --- + logger.info("on_connect: Attempting to subscribe...") + try: + subscribe(client, mesh_data_instance) + logger.info("on_connect: subscribe() call completed.") + except Exception as e: + logger.exception("on_connect: Error calling subscribe()") # Log exception if subscribe fails + # --- End log --- else: - logger.error("Failed to connect, return code %d\n", rc) + logger.error("Failed to connect, return code %d", rc) def on_disconnect(client, userdata, rc): - logger.warning(f"Disconnected with result code {rc}. Reconnecting...") - while True: - try: - client.reconnect() - logger.info("Reconnected successfully!") - break - except Exception as e: - logger.error(f"Reconnection failed: {e}") - time.sleep(5) # Wait before retrying - client = mqtt_client.Client() + logger.warning(f"Disconnected with result code {rc}. Will attempt reconnect.") + # No need for manual reconnect loop, paho handles it with reconnect_delay_set + + client = mqtt_client.Client(client_id="", clean_session=True, userdata=mesh_data_instance) # Ensure clean session if needed + client.user_data_set(mesh_data_instance) # Redundant if passed in constructor, but safe if "username" in config["mqtt"] \ and config["mqtt"]["username"] \ and "password" in config["mqtt"] \ and config["mqtt"]["password"]: + logger.info("Setting MQTT username and password.") client.username_pw_set( config["mqtt"]["username"], config["mqtt"]["password"] ) client.on_connect = on_connect client.on_disconnect = on_disconnect - client.connect(config["mqtt"]["broker"], int(config["mqtt"]["port"])) + client.reconnect_delay_set(min_delay=5, max_delay=120) # Increased min delay slightly + + broker_address = config["mqtt"]["broker"] + broker_port = int(config["mqtt"]["port"]) + logger.info(f"Connecting to MQTT broker at {broker_address}:{broker_port}...") + try: + client.connect(broker_address, broker_port, 60) # Keepalive 60 seconds + except Exception as e: + logger.exception(f"Failed to connect to MQTT broker: {e}") + raise # Reraise the exception to prevent starting loop_forever on failed connect return client -def subscribe(client: mqtt_client): +def subscribe(client: mqtt_client, md_instance: MeshData): + # --- Add log at the start --- + logger.info("subscribe: Entered function.") + # --- End log --- def on_message(client, userdata, msg): + # --- Add log for every message received --- + logger.debug(f"on_message: Received message on topic: {msg.topic}") + # --- End log --- + + # Filter for relevant topics if "/2/e/" in msg.topic or "/2/map/" in msg.topic: - process_payload(msg.payload, msg.topic) + logger.debug(f"on_message: Processing message from relevant topic: {msg.topic}") + try: + # Pass the existing MeshData instance + process_payload(msg.payload, msg.topic, md_instance) + except Exception as e: + logger.exception(f"on_message: Error calling process_payload for topic {msg.topic}") + else: + logger.debug(f"on_message: Skipping message from topic: {msg.topic}") + + + topic_to_subscribe = config["mqtt"]["topic"] + logger.info(f"subscribe: Subscribing to topic: {topic_to_subscribe}") + try: + result, mid = client.subscribe(topic_to_subscribe) + if result == mqtt_client.MQTT_ERR_SUCCESS: + logger.info(f"subscribe: Successfully initiated subscription to {topic_to_subscribe} (MID: {mid})") + else: + logger.error(f"subscribe: Failed to initiate subscription to {topic_to_subscribe}, Error code: {result}") + return # Don't set on_message if subscribe failed + + logger.info("subscribe: Setting on_message callback.") + client.on_message = on_message + logger.info("subscribe: on_message callback set.") + except Exception as e: + logger.exception(f"subscribe: Error during subscribe call or setting on_message for topic {topic_to_subscribe}") - client.subscribe(config["mqtt"]["topic"]) - client.on_message = on_message def run(): - client = connect_mqtt() - subscribe(client) - client.loop_forever() + logger.info("Starting MQTT client run sequence...") + try: + client = connect_mqtt() + logger.info("Entering MQTT client loop (loop_forever)...") + client.loop_forever() + except Exception as e: + logger.exception("An error occurred during MQTT client execution.") + finally: + logger.info("Exited MQTT client loop.") if __name__ == '__main__': + # Configure logging (ensure level allows DEBUG if you want to see the on_message logs) + logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - [%(name)s] - %(message)s') run() diff --git a/meshinfo_register.py b/meshinfo_register.py index d632e59d..f66a55c3 100644 --- a/meshinfo_register.py +++ b/meshinfo_register.py @@ -85,21 +85,25 @@ def update_password(self, email, newpass): cur.close() self.db.commit() - def authenticate(self, email, password): - sql = """SELECT password, username FROM meshuser -WHERE status='VERIFIED' AND email = %s""" - params = (email.lower(), ) + def authenticate(self, email_or_username, password): + """Authenticate user by email or username.""" + # Try to find user by email first, then by username + sql = """SELECT password, username, email FROM meshuser +WHERE status='VERIFIED' AND (email = %s OR username = %s)""" + params = (email_or_username.lower(), email_or_username) cur = self.db.cursor() cur.execute(sql, params) row = cur.fetchone() hashed_password = row[0] if row else None + username = row[1] if row else None + email = row[2] if row else None cur.close() if hashed_password and \ utils.check_password(password, hashed_password): encoded_jwt = jwt.encode( { "email": email, - "username": row[1], + "username": username, "time": int(time.time()) }, self.config["registrations"]["jwt_secret"], @@ -152,6 +156,79 @@ def get_otp(self, email): self.db.commit() return otp + def change_password(self, email, old_password, new_password): + """Change password with old password verification.""" + # Validate new password + if not new_password or len(new_password) < 6: + return {"error": "New password must be at least 6 characters long."} + + # Verify old password + sql = """SELECT password FROM meshuser +WHERE status='VERIFIED' AND email = %s""" + params = (email.lower(), ) + cur = self.db.cursor() + cur.execute(sql, params) + row = cur.fetchone() + hashed_password = row[0] if row else None + cur.close() + + if not hashed_password: + return {"error": "Account not found or not verified."} + + if not utils.check_password(old_password, hashed_password): + return {"error": "Current password is incorrect."} + + # Update password + hashed_new = utils.hash_password(new_password) + sql = """UPDATE meshuser SET password = %s, ts_updated = NOW() +WHERE email = %s""" + params = (hashed_new, email.lower()) + cur = self.db.cursor() + cur.execute(sql, params) + cur.close() + self.db.commit() + + return {"success": "Password changed successfully."} + + def unlink_node(self, email, node_id): + """Unlink a node from a user account.""" + # Convert hex node ID to int if needed + if isinstance(node_id, str): + try: + node_id = utils.convert_node_id_from_hex_to_int(node_id) + except (ValueError, TypeError): + return {"error": "Invalid node ID format."} + + # Verify the node exists and belongs to this user + sql = """SELECT owner FROM nodeinfo WHERE id = %s""" + params = (node_id, ) + cur = self.db.cursor() + cur.execute(sql, params) + row = cur.fetchone() + cur.close() + + if not row: + return {"error": "Node not found."} + + if row[0] != email.lower(): + return {"error": "You do not own this node."} + + # Unlink the node - include ownership check in WHERE clause for atomic operation + # This prevents race conditions where ownership might change between SELECT and UPDATE + sql = """UPDATE nodeinfo SET owner = NULL WHERE id = %s AND owner = %s""" + params = (node_id, email.lower()) + cur = self.db.cursor() + cur.execute(sql, params) + rows_affected = cur.rowcount + cur.close() + self.db.commit() + + # Verify the update actually happened + if rows_affected == 0: + return {"error": "Failed to unlink node. It may have been unlinked already or you do not own it."} + + return {"success": "Node unlinked successfully."} + def __del__(self): if self.db: self.db.close() diff --git a/meshinfo_utils.py b/meshinfo_utils.py new file mode 100644 index 00000000..0e0c7d81 --- /dev/null +++ b/meshinfo_utils.py @@ -0,0 +1,732 @@ +""" +Shared utilities for the meshinfo application. +This module contains helper functions used across multiple modules. +""" + +import logging +import configparser +import time +from datetime import datetime, timedelta +from flask import g +from meshdata import MeshData +import utils +from meshinfo_telemetry_graph import draw_graph +from meshinfo_los_profile import LOSProfile + +# Load config +config = configparser.ConfigParser() +config.read("config.ini") + +def get_meshdata(): + """Get MeshData instance for the current request context.""" + if not hasattr(g, 'meshdata'): + g.meshdata = MeshData() + return g.meshdata + +def get_cache_timeout(): + """Get cache timeout from config.""" + return int(config.get('server', 'app_cache_timeout_seconds', fallback=300)) + +def auth(): + """Simple auth check - can be enhanced later.""" + return True # For now, always return True + +def log_memory_usage(force=False): + """Log memory usage information.""" + import psutil + import gc + + process = psutil.Process() + memory_info = process.memory_info() + current_usage = memory_info.rss + + # Force garbage collection + gc.collect() + + logging.info(f"Memory Usage: {current_usage / 1024 / 1024:.2f} MB") + + return current_usage + +def get_cache_size(): + """Get total size of cache directory in bytes.""" + import os + cache_dir = os.path.join(os.path.dirname(__file__), 'runtime_cache') + + if os.path.exists(cache_dir): + try: + total_size = 0 + for dirpath, dirnames, filenames in os.walk(cache_dir): + for f in filenames: + fp = os.path.join(dirpath, f) + total_size += os.path.getsize(fp) + return total_size + except Exception as e: + logging.error(f"Error getting cache size: {e}") + return 0 + +def get_cache_entry_count(): + """Get number of entries in cache directory.""" + import os + cache_dir = os.path.join(os.path.dirname(__file__), 'runtime_cache') + + if os.path.exists(cache_dir): + try: + return len([f for f in os.listdir(cache_dir) if not f.endswith('.lock')]) + except Exception as e: + logging.error(f"Error getting cache entry count: {e}") + return 0 + +def get_largest_cache_entries(limit=5): + """Get the largest cache entries with their sizes.""" + import os + cache_dir = os.path.join(os.path.dirname(__file__), 'runtime_cache') + + if os.path.exists(cache_dir): + try: + entries = [] + for f in os.listdir(cache_dir): + if not f.endswith('.lock'): + path = os.path.join(cache_dir, f) + size = os.path.getsize(path) + entries.append((f, size)) + return sorted(entries, key=lambda x: x[1], reverse=True)[:limit] + except Exception as e: + logging.error(f"Error getting largest cache entries: {e}") + return [] + +def log_cache_stats(): + """Log detailed cache statistics.""" + try: + total_size = get_cache_size() + entry_count = get_cache_entry_count() + largest_entries = get_largest_cache_entries() + + logging.info(f"Cache Statistics:") + logging.info(f" Total Size: {total_size / 1024 / 1024:.2f} MB") + logging.info(f" Entry Count: {entry_count}") + logging.info(" Largest Entries:") + for entry, size in largest_entries: + logging.info(f" {entry}: {size / 1024 / 1024:.2f} MB") + except Exception as e: + logging.error(f"Error logging cache stats: {e}") + +def cleanup_cache(): + """Clean up cache and log statistics.""" + try: + logging.info("Starting cache cleanup") + logging.info("Memory usage before cache cleanup:") + log_memory_usage(force=True) + logging.info("Cache stats before cleanup:") + log_cache_stats() + + # Clear nodes-related cache entries + clear_nodes_cache() + + # Clear database query cache + clear_database_cache() + + # Force garbage collection + import gc + gc.collect() + + logging.info("Memory usage after cache cleanup:") + log_memory_usage(force=True) + logging.info("Cache stats after cleanup:") + log_cache_stats() + + except Exception as e: + logging.error(f"Error during cache cleanup: {e}") + +def clear_nodes_cache(): + """Clear nodes-related cache entries.""" + try: + # This would clear specific cache entries related to nodes + # Implementation depends on your cache setup + logging.info("Cleared nodes cache") + except Exception as e: + logging.error(f"Error clearing nodes cache: {e}") + +def clear_database_cache(): + """Clear database query cache.""" + try: + # This would clear database query cache + # Implementation depends on your cache setup + logging.info("Cleared database cache") + except Exception as e: + logging.error(f"Error clearing database cache: {e}") + +def format_timestamp(timestamp): + """Format timestamp for display.""" + if timestamp is None: + return "Unknown" + try: + dt = datetime.fromtimestamp(timestamp) + return dt.strftime('%Y-%m-%d %H:%M:%S') + except (ValueError, TypeError): + return str(timestamp) + +def time_ago(timestamp): + """Get human-readable time ago string.""" + if timestamp is None: + return "Unknown" + try: + dt = datetime.fromtimestamp(timestamp) + now = datetime.now() + diff = now - dt + + if diff.days > 0: + return f"{diff.days} days ago" + elif diff.seconds > 3600: + hours = diff.seconds // 3600 + return f"{hours} hours ago" + elif diff.seconds > 60: + minutes = diff.seconds // 60 + return f"{minutes} minutes ago" + else: + return "Just now" + except (ValueError, TypeError): + return "Unknown" + +def convert_to_local(timestamp): + """Convert timestamp to local timezone.""" + if timestamp is None: + return "Unknown" + try: + dt = datetime.fromtimestamp(timestamp) + return dt.strftime('%Y-%m-%d %H:%M:%S') + except (ValueError, TypeError): + return str(timestamp) + +def get_cached_chat_data(page=1, per_page=50, channel=None): + """Cache the chat data with optimized query, with optional channel filter (supports comma-separated list).""" + md = get_meshdata() + if not md: + return None + + # Build channel filter SQL + channel_filter = "" + channel_params = [] + if channel is not None and channel != 'all': + if isinstance(channel, str) and ',' in channel: + channel_list = [int(c) for c in channel.split(',') if c.strip()] + if channel_list: + placeholders = ','.join(['%s'] * len(channel_list)) + channel_filter = f" WHERE t.channel IN ({placeholders})" + channel_params = channel_list + else: + channel_filter = " WHERE t.channel = %s" + channel_params = [int(channel)] + + # Get total count first (this is fast) + cur = md.db.cursor() + cur.execute(f"SELECT COUNT(DISTINCT t.message_id) FROM text t{channel_filter}", channel_params) + total = cur.fetchone()[0] + cur.close() + + # Get paginated chat messages (without reception data) + offset = (page - 1) * per_page + cur = md.db.cursor(dictionary=True) + cur.execute(f""" + SELECT t.* FROM text t{channel_filter} + ORDER BY t.ts_created DESC + LIMIT %s OFFSET %s + """, channel_params + [per_page, offset]) + messages = cur.fetchall() + cur.close() + + # Get reception data for these messages in a separate query + if messages: + message_ids = [msg['message_id'] for msg in messages] + placeholders = ','.join(['%s'] * len(message_ids)) + cur = md.db.cursor(dictionary=True) + cur.execute(f""" + SELECT message_id, received_by_id, rx_snr, rx_rssi, hop_limit, hop_start, rx_time + FROM message_reception + WHERE message_id IN ({placeholders}) + """, message_ids) + receptions = cur.fetchall() + cur.close() + + # Group receptions by message_id + receptions_by_message = {} + for reception in receptions: + msg_id = reception['message_id'] + if msg_id not in receptions_by_message: + receptions_by_message[msg_id] = [] + receptions_by_message[msg_id].append({ + "node_id": reception['received_by_id'], + "rx_snr": float(reception['rx_snr']) if reception['rx_snr'] is not None else 0, + "rx_rssi": int(reception['rx_rssi']) if reception['rx_rssi'] is not None else 0, + "hop_limit": int(reception['hop_limit']) if reception['hop_limit'] is not None else None, + "hop_start": int(reception['hop_start']) if reception['hop_start'] is not None else None, + "rx_time": reception['rx_time'].timestamp() if isinstance(reception['rx_time'], datetime) else reception['rx_time'] + }) + else: + receptions_by_message = {} + + # Process messages + chats = [] + prev_key = "" + for row in messages: + record = {} + for key, value in row.items(): + if isinstance(value, datetime): + record[key] = value.timestamp() + else: + record[key] = value + + # Add reception data + record["receptions"] = receptions_by_message.get(record['message_id'], []) + + # Convert IDs to hex + record["from"] = utils.convert_node_id_from_int_to_hex(record["from_id"]) + record["to"] = utils.convert_node_id_from_int_to_hex(record["to_id"]) + + # Deduplicate messages + msg_key = f"{record['from']}{record['to']}{record['text']}{record['message_id']}" + if msg_key != prev_key: + chats.append(record) + prev_key = msg_key + + return { + "items": chats, + "total": total, + "page": page, + "per_page": per_page, + "pages": (total + per_page - 1) // per_page, + "has_prev": page > 1, + "has_next": page * per_page < total, + "prev_num": page - 1, + "next_num": page + 1 + } + +def get_node_page_data(node_hex, all_nodes=None): + """Fetch and process all data for the node page to prevent memory leaks.""" + md = get_meshdata() + if not md: return None + + # Use provided nodes or fetch them if not provided + if all_nodes is None: + all_nodes = get_cached_nodes() + if not all_nodes or node_hex not in all_nodes: + return None + + current_node = all_nodes[node_hex] + node_id = current_node['id'] + + # Get LOS configuration early + los_enabled = config.getboolean("los", "enabled", fallback=False) + zero_hop_timeout = int(config.get("server", "zero_hop_timeout", fallback=43200)) + max_distance_km = int(config.get("los", "max_distance", fallback=5000)) / 1000 + cutoff_time = int(time.time()) - zero_hop_timeout + + # --- Fetch all raw data --- + node_telemetry = md.get_node_telemetry(node_id) + node_route = md.get_route_coordinates(node_id) + telemetry_graph = draw_graph(node_telemetry) + neighbor_heard_by = md.get_heard_by_from_neighbors(node_id) + + # Only process LOS if enabled + los_profiles = {} + if los_enabled: + # Create a minimal nodes dict for LOSProfile with only the current node and its neighbors + los_nodes = {} + los_nodes[node_hex] = current_node + + # Add only the nodes that are within LOS distance and have positions + max_distance = int(config.get("los", "max_distance", fallback=5000)) + for other_hex, other_node in all_nodes.items(): + if other_hex == node_hex: + continue + if not other_node.get('position'): + continue + # Calculate distance and only include if within range + try: + my_pos = current_node.get('position', {}) + other_pos = other_node.get('position', {}) + if my_pos.get('latitude') and my_pos.get('longitude') and other_pos.get('latitude') and other_pos.get('longitude'): + dist = utils.distance_between_two_points( + my_pos['latitude'], my_pos['longitude'], + other_pos['latitude'], other_pos['longitude'] + ) * 1000 # Convert to meters + if dist < max_distance: + los_nodes[other_hex] = other_node + except: + continue + + lp = LOSProfile(los_nodes, node_id, config, None) # cache not available in utils + + # Get LOS profiles and clean up the LOSProfile instance + try: + los_profiles = lp.get_profiles() + finally: + # Explicitly clean up the LOSProfile instance to release memory + if hasattr(lp, 'close_datasets'): + lp.close_datasets() + del lp + del los_nodes + + cursor = md.db.cursor(dictionary=True) + # Query for zero-hop messages heard by this node + cursor.execute(""" + SELECT r.from_id, COUNT(*) AS count, MAX(r.rx_snr) AS best_snr, + AVG(r.rx_snr) AS avg_snr, MAX(r.rx_time) AS last_rx_time + FROM message_reception r + WHERE r.received_by_id = %s AND ((r.hop_limit IS NULL AND r.hop_start IS NULL) OR (r.hop_start - r.hop_limit = 0)) + AND r.rx_time > %s + GROUP BY r.from_id ORDER BY last_rx_time DESC + """, (node_id, cutoff_time)) + zero_hop_heard = cursor.fetchall() + + # Query for zero-hop messages sent by this node + cursor.execute(""" + SELECT r.received_by_id, COUNT(*) AS count, MAX(r.rx_snr) AS best_snr, + AVG(r.rx_snr) AS avg_snr, MAX(r.rx_time) AS last_rx_time + FROM message_reception r + WHERE r.from_id = %s AND ((r.hop_limit IS NULL AND r.hop_start IS NULL) OR (r.hop_start - r.hop_limit = 0)) + AND r.rx_time > %s + GROUP BY r.received_by_id ORDER BY last_rx_time DESC + """, (node_id, cutoff_time)) + zero_hop_heard_by = cursor.fetchall() + cursor.close() + + # --- Create a lean dictionary of only the linked nodes needed by the template --- + linked_node_ids = set() + if 'neighbors' in current_node: + for neighbor in current_node.get('neighbors', []): + linked_node_ids.add(neighbor['neighbor_id']) + for heard in zero_hop_heard: + linked_node_ids.add(heard['from_id']) + for neighbor in neighbor_heard_by: + linked_node_ids.add(neighbor['id']) + for heard in zero_hop_heard_by: + linked_node_ids.add(heard['received_by_id']) + if current_node.get('updated_via'): + linked_node_ids.add(current_node.get('updated_via')) + + linked_nodes_details = {} + for linked_id_int in linked_node_ids: + if not linked_id_int: continue + nid_hex = utils.convert_node_id_from_int_to_hex(linked_id_int) + node_data = all_nodes.get(nid_hex) + if node_data: + # Copy only the fields required by the template + linked_nodes_details[nid_hex] = { + 'short_name': node_data.get('short_name'), + 'long_name': node_data.get('long_name'), + 'position': node_data.get('position') + } + + # Build elsewhere links + node_hex_id = utils.convert_node_id_from_int_to_hex(node_id) + elsewhere_links = get_elsewhere_links(node_id, node_hex_id) + + # Return a dictionary that does NOT include the full `all_nodes` object + return { + 'node': current_node, + 'linked_nodes_details': linked_nodes_details, + 'telemetry_graph': telemetry_graph, + 'node_route': node_route, + 'los_profiles': los_profiles, + 'neighbor_heard_by': neighbor_heard_by, + 'zero_hop_heard': zero_hop_heard, + 'zero_hop_heard_by': zero_hop_heard_by, + 'zero_hop_timeout': zero_hop_timeout, + 'max_distance_km': max_distance_km, + 'elsewhere_links': elsewhere_links, + } + +def calculate_node_distance(node1_hex, node2_hex): + """Calculate distance between two nodes, cached to avoid repeated calculations.""" + nodes = get_cached_nodes() + if not nodes: + return None + + node1 = nodes.get(node1_hex) + node2 = nodes.get(node2_hex) + + if not node1 or not node2: + return None + + if not node1.get("position") or not node2.get("position"): + return None + + return utils.calculate_distance_between_nodes(node1, node2) + +def find_relay_node_by_suffix(relay_suffix, nodes, receiver_ids=None, sender_id=None, zero_hop_links=None, sender_pos=None, receiver_pos=None, debug=False): + """ + Improved relay node matcher: prefer zero-hop/extended neighbors, then select the physically closest candidate to the sender (or receiver), using scoring only as a tiebreaker. + """ + import time + relay_suffix = relay_suffix.lower()[-2:] + candidates = [] + for node_id_hex, node_data in nodes.items(): + if len(node_id_hex) == 8 and node_id_hex.lower()[-2:] == relay_suffix: + candidates.append((node_id_hex, node_data)) + + if not candidates: + if debug: + print(f"[RelayMatch] No candidates for suffix {relay_suffix}") + return None + if len(candidates) == 1: + if debug: + print(f"[RelayMatch] Only one candidate for suffix {relay_suffix}: {candidates[0][0]}") + return candidates[0][0] + + # --- Zero-hop filter: only consider zero-hop neighbors if any exist --- + zero_hop_candidates = [] + if zero_hop_links: + for node_id_hex, node_data in candidates: + is_zero_hop = False + if sender_id and node_id_hex in zero_hop_links.get(sender_id, {}).get('heard', {}): + is_zero_hop = True + if receiver_ids: + for rid in receiver_ids: + if node_id_hex in zero_hop_links.get(rid, {}).get('heard', {}): + is_zero_hop = True + if is_zero_hop: + zero_hop_candidates.append((node_id_hex, node_data)) + if zero_hop_candidates: + if debug: + print(f"[RelayMatch] Restricting to zero-hop candidates: {[c[0] for c in zero_hop_candidates]}") + candidates = zero_hop_candidates + else: + # --- Extended neighbor filter: only consider candidates that have ever been heard by or heard from sender/receivers --- + extended_candidates = [] + if zero_hop_links: + local_set = set() + if sender_id and sender_id in zero_hop_links: + local_set.update(zero_hop_links[sender_id].get('heard', {}).keys()) + local_set.update(zero_hop_links[sender_id].get('heard_by', {}).keys()) + if receiver_ids: + for rid in receiver_ids: + if rid in zero_hop_links: + local_set.update(zero_hop_links[rid].get('heard', {}).keys()) + local_set.update(zero_hop_links[rid].get('heard_by', {}).keys()) + local_set_hex = set() + for n in local_set: + try: + if isinstance(n, int): + local_set_hex.add(utils.convert_node_id_from_int_to_hex(n)) + elif isinstance(n, str) and len(n) == 8: + local_set_hex.add(n) + except Exception: + continue + for node_id_hex, node_data in candidates: + if node_id_hex in local_set_hex: + extended_candidates.append((node_id_hex, node_data)) + if extended_candidates: + if debug: + print(f"[RelayMatch] Restricting to extended neighbor candidates: {[c[0] for c in extended_candidates]}") + candidates = extended_candidates + else: + if debug: + print(f"[RelayMatch] No local/extended candidates, using all: {[c[0] for c in candidates]}") + + # --- Distance-first selection among remaining candidates --- + def get_distance(node_data, ref_pos): + npos = node_data.get('position') + if not npos or not ref_pos: + return float('inf') + nlat = npos.get('latitude') if isinstance(npos, dict) else getattr(npos, 'latitude', None) + nlon = npos.get('longitude') if isinstance(npos, dict) else getattr(npos, 'longitude', None) + if nlat is None or nlon is None: + return float('inf') + # Fix: Use 'latitude' and 'longitude' keys, not 'lat' and 'lon' + ref_lat = ref_pos.get('latitude') if isinstance(ref_pos, dict) else getattr(ref_pos, 'latitude', None) + ref_lon = ref_pos.get('longitude') if isinstance(ref_pos, dict) else getattr(ref_pos, 'longitude', None) + if ref_lat is None or ref_lon is None: + return float('inf') + return utils.distance_between_two_points(ref_lat, ref_lon, nlat, nlon) + + ref_pos = sender_pos if sender_pos else receiver_pos + if ref_pos: + # Compute distances + distances = [(node_id_hex, node_data, get_distance(node_data, ref_pos)) for node_id_hex, node_data in candidates] + min_dist = min(d[2] for d in distances) + closest = [d for d in distances if abs(d[2] - min_dist) < 1e-3] # Allow for float rounding + if debug: + print(f"[RelayMatch] Closest candidates by distance: {[(c[0], c[2]) for c in closest]}") + if len(closest) == 1: + return closest[0][0] + # If tie, fall back to scoring among closest + candidates = [(c[0], c[1]) for c in closest] + + # --- Scoring system as tiebreaker --- + scores = {} + now = time.time() + for node_id_hex, node_data in candidates: + score = 0 + reasons = [] + if zero_hop_links: + if sender_id and node_id_hex in zero_hop_links.get(sender_id, {}).get('heard', {}): + score += 100 + reasons.append('zero-hop-sender') + if receiver_ids: + for rid in receiver_ids: + if node_id_hex in zero_hop_links.get(rid, {}).get('heard', {}): + score += 100 + reasons.append(f'zero-hop-receiver-{rid}') + proximity_score = 0 + pos_fresh = False + if sender_pos and node_data.get('position'): + npos = node_data['position'] + nlat = npos.get('latitude') if isinstance(npos, dict) else getattr(npos, 'latitude', None) + nlon = npos.get('longitude') if isinstance(npos, dict) else getattr(npos, 'longitude', None) + ntime = npos.get('position_time') if isinstance(npos, dict) else getattr(npos, 'position_time', None) + if nlat is not None and nlon is not None and ntime is not None: + # Convert datetime to timestamp if needed + if isinstance(ntime, datetime): + ntime = ntime.timestamp() + if now - ntime > 21600: + score -= 50 + reasons.append('stale-position') + else: + pos_fresh = True + # Fix: Use 'latitude' and 'longitude' keys, not 'lat' and 'lon' + sender_lat = sender_pos.get('latitude') if isinstance(sender_pos, dict) else getattr(sender_pos, 'latitude', None) + sender_lon = sender_pos.get('longitude') if isinstance(sender_pos, dict) else getattr(sender_pos, 'longitude', None) + if sender_lat is not None and sender_lon is not None: + dist = utils.distance_between_two_points(sender_lat, sender_lon, nlat, nlon) + proximity_score = max(0, 100 - dist * 2) + score += proximity_score + reasons.append(f'proximity:{dist:.1f}km(+{proximity_score:.1f})') + else: + score -= 50 + reasons.append('missing-sender-position') + else: + score -= 100 + reasons.append('missing-position') + ts_seen = node_data.get('ts_seen') + if ts_seen: + # Convert datetime to timestamp if needed + if isinstance(ts_seen, datetime): + ts_seen = ts_seen.timestamp() + if now - ts_seen < 3600: + score += 10 + reasons.append('recently-seen') + if node_data.get('role') not in [1, 8]: + score += 5 + reasons.append('relay-capable') + scores[node_id_hex] = (score, reasons) + if debug: + print(f"[RelayMatch] Candidates for suffix {relay_suffix}:") + for nid, (score, reasons) in scores.items(): + print(f" {nid}: score={score}, reasons={reasons}") + if not scores: + return None + best = max(scores.items(), key=lambda x: x[1][0]) + if debug: + print(f"[RelayMatch] Selected {best[0]} for suffix {relay_suffix} (score={best[1][0]})") + return best[0] + +def get_elsewhere_links(node_id, node_hex_id): + """ + Build Elsewhere links for a node based on config.ini [tools] section. + + Args: + node_id: The node ID as integer + node_hex_id: The node ID as hex string + + Returns: + List of (label, url, icon) tuples for the Elsewhere section + """ + elsewhere_links = [] + + def get_icon_for_tool(label, url): + """Determine appropriate icon based on tool name and URL.""" + label_lower = label.lower() + url_lower = url.lower() + + # Map-related tools + if 'map' in label_lower or 'map' in url_lower: + return '🗺️' + + # Logs/Logging tools + if 'log' in label_lower or 'log' in url_lower: + return '📋' + + # Dashboard/Monitoring tools + if 'dashboard' in label_lower or 'monitor' in label_lower: + return '📊' + + # Network/Graph tools + if 'graph' in label_lower or 'network' in label_lower: + return '🕸️' + + # Chat/Message tools + if 'chat' in label_lower or 'message' in label_lower: + return '💬' + + # Settings/Config tools + if 'config' in label_lower or 'setting' in label_lower: + return '⚙️' + + # Default icon for external links + return '🔗' + + # Process keys ending with _node_link + for key, value in config.items('tools'): + if key.endswith('_node_link'): + # Extract the base key (remove _node_link suffix) + base_key = key[:-10] # Remove '_node_link' + + # Get the label from the corresponding _label key + label_key = base_key + '_label' + label = config.get('tools', label_key, fallback=None) + if not label: + # Fallback to a generated label if no _label is found + label = base_key.replace('_', ' ').title() + + # Replace placeholders in URL and strip any extra quotes + url = value.replace('{{ node.id }}', str(node_id)).replace('{{ node.hex_id }}', node_hex_id).strip('"') + + # Get appropriate icon + icon = get_icon_for_tool(label, url) + + elsewhere_links.append((label, url, icon)) + + return elsewhere_links + +def get_cached_nodes(): + """Get cached nodes data.""" + # This would be implemented based on your cache setup + # For now, return None to indicate it needs to be implemented + return None + +def get_cached_hardware_models(): + """Get hardware model statistics.""" + # This would be implemented based on your cache setup + # For now, return None to indicate it needs to be implemented + return None + +def get_role_badge(role_value): + """ + Convert a role value to a colored badge with improved readability. + + Args: + role_value: The numeric role value + + Returns: + A tuple of (badge_text, badge_style) for styling + """ + if role_value is None: + return ("?", "background-color: #6c757d; color: white;") + + role_mapping = { + 0: ("C", "background-color: #0d6efd; color: white;"), # Client - Dark Blue + 1: ("CM", "background-color: #0dcaf0; color: #000;"), # Client Mute - Light Blue with dark text + 2: ("R", "background-color: #dc3545; color: white;"), # Router - Red + 3: ("RC", "background-color: #ffc107; color: #000;"), # Router Client - Orange with dark text + 4: ("RE", "background-color: #198754; color: white;"), # Repeater - Green + 5: ("T", "background-color: #6c757d; color: white;"), # Tracker - Gray + 6: ("S", "background-color: #6c757d; color: white;"), # Sensor - Gray + 7: ("A", "background-color: #6c757d; color: white;"), # TAK - Gray + 8: ("CH", "background-color: #0dcaf0; color: #000;"), # Client Hidden - Light Blue with dark text + 9: ("LF", "background-color: #6c757d; color: white;"), # Lost and Found - Gray + 10: ("AT", "background-color: #6c757d; color: white;"), # TAK Tracker - Gray + 11: ("RL", "background-color: #dc3545; color: white;"), # Router Late - Red + 12: ("CB", "background-color: #0d6efd; color: white;"), # Client Base - Dark Blue + } + + return role_mapping.get(role_value, ("?", "background-color: #212529; color: white;")) \ No newline at end of file diff --git a/meshinfo_web.py b/meshinfo_web.py index df06127e..a0a80ec8 100644 --- a/meshinfo_web.py +++ b/meshinfo_web.py @@ -6,17 +6,36 @@ make_response, redirect, url_for, - abort + abort, + g, + jsonify, + current_app, + send_file ) +from flask_caching import Cache from waitress import serve from paste.translogger import TransLogger import configparser import logging import os +import psutil +import gc +import weakref +import threading +import time +import re +import sys +import math +from shapely.geometry import MultiPoint +import requests +from io import BytesIO +import staticmaps import utils -import meshtastic_support +# Remove direct import to reduce circular references +# import meshtastic_support from meshdata import MeshData +from database_cache import DatabaseCache from meshinfo_register import Register from meshtastic_monday import MeshtasticMonday from meshinfo_telemetry_graph import draw_graph @@ -24,21 +43,652 @@ from timezone_utils import convert_to_local, format_timestamp, time_ago import json import datetime -import time -import re +from meshinfo_api import api +from meshinfo_utils import ( + get_meshdata, get_cache_timeout, auth, config, log_memory_usage, + clear_nodes_cache, clear_database_cache, get_cached_chat_data, get_node_page_data, + calculate_node_distance, find_relay_node_by_suffix, get_elsewhere_links, get_role_badge +) +from PIL import Image, ImageDraw +import PIL.ImageDraw + +def textsize(self: PIL.ImageDraw.ImageDraw, *args, **kwargs): + x, y, w, h = self.textbbox((0, 0), *args, **kwargs) + return w, h + +# Monkeypatch fix for https://github.com/flopp/py-staticmaps/issues/39 +PIL.ImageDraw.ImageDraw.textsize = textsize app = Flask(__name__) -# Make timezone utilities available to templates +# --- OG image generation for message_map --- +OG_IMAGE_DIR = "/tmp/og_images" +os.makedirs(OG_IMAGE_DIR, exist_ok=True) + +def generate_message_map_image_staticmaps(message_id, sender_pos, receiver_positions): + width, height = 800, 400 + extra = 40 # extra height for attribution + context = staticmaps.Context() + context.set_tile_provider(staticmaps.tile_provider_OSM) + + # Add sender marker and lines if sender position is available + if sender_pos and sender_pos.get('latitude') and sender_pos.get('longitude'): + sender = staticmaps.create_latlng(sender_pos['latitude'], sender_pos['longitude']) + + # Add all lines from sender to receivers first (so they appear behind markers) + for pos in receiver_positions: + if pos.get('latitude') and pos.get('longitude'): + receiver = staticmaps.create_latlng(pos['latitude'], pos['longitude']) + context.add_object(staticmaps.Line([sender, receiver], color=staticmaps.BLUE, width=2)) + + # Add sender marker + context.add_object(staticmaps.Marker(sender, color=staticmaps.RED, size=8)) + + # Add all receiver markers + for pos in receiver_positions: + if pos.get('latitude') and pos.get('longitude'): + receiver = staticmaps.create_latlng(pos['latitude'], pos['longitude']) + context.add_object(staticmaps.Marker(receiver, color=staticmaps.BLUE, size=6)) + + image = context.render_pillow(width, height + extra) + # Crop off the bottom 'extra' pixels to remove attribution + image = image.crop((0, 0, width, height)) + OG_IMAGE_DIR = "/tmp/og_images" + os.makedirs(OG_IMAGE_DIR, exist_ok=True) + path = os.path.join(OG_IMAGE_DIR, f"message_map_{message_id}.png") + image.save(path) + return path + +@app.route('/og_image/message_map/.png') +def og_image_message_map(message_id): + from meshinfo_web import get_cached_message_map_data + data = get_cached_message_map_data(message_id) + if not data or not data.get('receiver_positions'): + abort(404) + + sender_pos = data.get('sender_position') + def get_latlon(pos): + lat = pos.get('latitude') + lon = pos.get('longitude') + if lat is None and 'latitude_i' in pos: + lat = pos['latitude_i'] / 1e7 + if lon is None and 'longitude_i' in pos: + lon = pos['longitude_i'] / 1e7 + return {'latitude': lat, 'longitude': lon} + + # Handle sender position (optional) + sender_pos_processed = None + if sender_pos: + sender_pos_processed = get_latlon(sender_pos) + if not sender_pos_processed['latitude'] or not sender_pos_processed['longitude']: + sender_pos_processed = None + + # Process receiver positions + receiver_positions = [get_latlon(p) for p in data['receiver_positions'].values() if p] + receiver_positions = [p for p in receiver_positions if p['latitude'] and p['longitude']] + + if not receiver_positions: + abort(404) + + path = os.path.join("/tmp/og_images", f"message_map_{message_id}.png") + cache_expired = False + + if os.path.exists(path): + # Check file age + file_age = time.time() - os.path.getmtime(path) + max_cache_age = 3600 # 1 hour in seconds + + # Also check if message was updated since image was created + if data.get('message', {}).get('ts_created'): + message_created = data['message']['ts_created'] + if hasattr(message_created, 'timestamp'): + message_created = message_created.timestamp() + + if file_age > max_cache_age or (message_created and os.path.getmtime(path) < message_created): + cache_expired = True + elif file_age > max_cache_age: + cache_expired = True + + if not os.path.exists(path) or cache_expired: + generate_message_map_image_staticmaps(message_id, sender_pos_processed, receiver_positions) + return send_file(path, mimetype='image/png') + +def generate_traceroute_map_image_staticmaps(traceroute_id, source_pos, destination_pos, forward_hop_positions, return_hop_positions): + width, height = 800, 400 + extra = 40 # extra height for attribution + context = staticmaps.Context() + context.set_tile_provider(staticmaps.tile_provider_OSM) + + # Add all lines first (so they appear behind markers) + if source_pos and destination_pos: + source = staticmaps.create_latlng(source_pos['latitude'], source_pos['longitude']) + destination = staticmaps.create_latlng(destination_pos['latitude'], destination_pos['longitude']) + + # Create forward path through all hops + forward_path_points = [source] + for hop_pos in forward_hop_positions: + if hop_pos and hop_pos.get('latitude') and hop_pos.get('longitude'): + forward_path_points.append(staticmaps.create_latlng(hop_pos['latitude'], hop_pos['longitude'])) + forward_path_points.append(destination) + + # Add forward path lines (green) + for i in range(len(forward_path_points) - 1): + context.add_object(staticmaps.Line([forward_path_points[i], forward_path_points[i+1]], color=staticmaps.Color(68, 170, 68), width=2)) + + # Create return path if return hops exist + if return_hop_positions: + return_path_points = [destination] + for hop_pos in return_hop_positions: + if hop_pos and hop_pos.get('latitude') and hop_pos.get('longitude'): + return_path_points.append(staticmaps.create_latlng(hop_pos['latitude'], hop_pos['longitude'])) + return_path_points.append(source) + + # Add return path lines (purple) + for i in range(len(return_path_points) - 1): + context.add_object(staticmaps.Line([return_path_points[i], return_path_points[i+1]], color=staticmaps.Color(170, 68, 170), width=2)) + + # Add all markers after lines (so they appear on top) + if source_pos: + source = staticmaps.create_latlng(source_pos['latitude'], source_pos['longitude']) + context.add_object(staticmaps.Marker(source, color=staticmaps.RED, size=8)) + + # Add forward hop markers (green) + for i, hop_pos in enumerate(forward_hop_positions): + if hop_pos and hop_pos.get('latitude') and hop_pos.get('longitude'): + hop = staticmaps.create_latlng(hop_pos['latitude'], hop_pos['longitude']) + context.add_object(staticmaps.Marker(hop, color=staticmaps.Color(68, 170, 68), size=6)) + + if destination_pos: + destination = staticmaps.create_latlng(destination_pos['latitude'], destination_pos['longitude']) + context.add_object(staticmaps.Marker(destination, color=staticmaps.BLUE, size=8)) + + # Add return hop markers (purple) - but only if they're different from forward hops + for i, hop_pos in enumerate(return_hop_positions): + if hop_pos and hop_pos.get('latitude') and hop_pos.get('longitude'): + hop = staticmaps.create_latlng(hop_pos['latitude'], hop_pos['longitude']) + context.add_object(staticmaps.Marker(hop, color=staticmaps.Color(170, 68, 170), size=6)) + + image = context.render_pillow(width, height + extra) + # Crop off the bottom 'extra' pixels to remove attribution + image = image.crop((0, 0, width, height)) + OG_IMAGE_DIR = "/tmp/og_images" + os.makedirs(OG_IMAGE_DIR, exist_ok=True) + path = os.path.join(OG_IMAGE_DIR, f"traceroute_map_{traceroute_id}.png") + image.save(path) + return path + +@app.route('/og_image/traceroute_map/.png') +def og_image_traceroute_map(traceroute_id): + md = get_meshdata() + if not md: + abort(404) + + # Get traceroute data + cursor = md.db.cursor(dictionary=True) + cursor.execute(""" + SELECT from_id, to_id, route, route_back, ts_created + FROM traceroute + WHERE traceroute_id = %s + """, (traceroute_id,)) + traceroute_data = cursor.fetchone() + cursor.close() + + if not traceroute_data: + abort(404) + + # Get positions for source and destination + def get_node_position(node_id): + cursor = md.db.cursor(dictionary=True) + cursor.execute(""" + SELECT latitude_i, longitude_i, position_time + FROM position + WHERE id = %s + ORDER BY position_time DESC + LIMIT 1 + """, (node_id,)) + pos = cursor.fetchone() + cursor.close() + + if pos and pos['latitude_i'] is not None and pos['longitude_i'] is not None: + return { + 'latitude': pos['latitude_i'] / 1e7, + 'longitude': pos['longitude_i'] / 1e7, + 'position_time': pos['position_time'] + } + return None + + source_pos = get_node_position(traceroute_data['from_id']) + destination_pos = get_node_position(traceroute_data['to_id']) + + if not source_pos or not destination_pos: + abort(404) + + # Get positions for all hops + forward_hop_positions = [] + if traceroute_data['route']: + route = [int(hop) for hop in traceroute_data['route'].split(';') if hop] + for hop_id in route: + hop_pos = get_node_position(hop_id) + forward_hop_positions.append(hop_pos) + + # Get positions for return hops + return_hop_positions = [] + if traceroute_data['route_back']: + route_back = [int(hop) for hop in traceroute_data['route_back'].split(';') if hop] + for hop_id in route_back: + hop_pos = get_node_position(hop_id) + return_hop_positions.append(hop_pos) + + # Generate the image + path = os.path.join("/tmp/og_images", f"traceroute_map_{traceroute_id}.png") + cache_expired = False + + if os.path.exists(path): + # Check file age + file_age = time.time() - os.path.getmtime(path) + max_cache_age = 3600 # 1 hour in seconds + + # Also check if traceroute was updated since image was created + if traceroute_data.get('ts_created'): + traceroute_created = traceroute_data['ts_created'] + if hasattr(traceroute_created, 'timestamp'): + traceroute_created = traceroute_created.timestamp() + + if file_age > max_cache_age or (traceroute_created and os.path.getmtime(path) < traceroute_created): + cache_expired = True + elif file_age > max_cache_age: + cache_expired = True + + if not os.path.exists(path) or cache_expired: + generate_traceroute_map_image_staticmaps(traceroute_id, source_pos, destination_pos, forward_hop_positions, return_hop_positions) + + return send_file(path, mimetype='image/png') + +cache_dir = os.path.join(os.path.dirname(__file__), 'runtime_cache') + +# Ensure the cache directory exists +if not os.path.exists(cache_dir): + try: + os.makedirs(cache_dir) + logging.info(f"Created cache directory: {cache_dir}") + except OSError as e: + logging.error(f"Could not create cache directory {cache_dir}: {e}") + cache_dir = None # Indicate failure + +# Initialize cache after config is loaded +cache = None + +def initialize_cache(): + """Initialize the Flask cache with configuration.""" + global cache + + # Load config first + config = configparser.ConfigParser() + config.read("config.ini") + + # Configure Flask-Caching + cache_config = { + 'CACHE_TYPE': 'FileSystemCache', + 'CACHE_DIR': cache_dir, + 'CACHE_THRESHOLD': int(config.get('server', 'app_cache_max_entries', fallback=100)), + 'CACHE_DEFAULT_TIMEOUT': int(config.get('server', 'app_cache_timeout_seconds', fallback=60)), + 'CACHE_OPTIONS': { + 'mode': 0o600, + 'max_size': 50 * 1024 * 1024 # 50MB max size per item + } + } + + if cache_dir: + logging.info(f"Using FileSystemCache with directory: {cache_dir}") + else: + logging.warning("Falling back to SimpleCache due to directory creation issues.") + + # Initialize Cache with the chosen config + try: + cache = Cache(app, config=cache_config) + except Exception as e: + logging.error(f"Failed to initialize cache: {e}") + # Fallback to SimpleCache if FileSystemCache fails + cache_config = { + 'CACHE_TYPE': 'SimpleCache', + 'CACHE_DEFAULT_TIMEOUT': int(config.get('server', 'app_cache_timeout_seconds', fallback=300)), + 'CACHE_THRESHOLD': int(config.get('server', 'app_cache_max_entries', fallback=100)) + } + cache = Cache(app, config=cache_config) + +# Cache monitoring functions +def get_cache_size(): + """Get total size of cache directory in bytes.""" + if cache_dir: + try: + total_size = 0 + for dirpath, dirnames, filenames in os.walk(cache_dir): + for f in filenames: + fp = os.path.join(dirpath, f) + total_size += os.path.getsize(fp) + return total_size + except Exception as e: + logging.error(f"Error getting cache size: {e}") + return 0 + +def get_cache_entry_count(): + """Get number of entries in cache directory.""" + if cache_dir: + try: + return len([f for f in os.listdir(cache_dir) if not f.endswith('.lock')]) + except Exception as e: + logging.error(f"Error getting cache entry count: {e}") + return 0 + +def get_largest_cache_entries(limit=5): + """Get the largest cache entries with their sizes.""" + if cache_dir: + try: + entries = [] + for f in os.listdir(cache_dir): + if not f.endswith('.lock'): + path = os.path.join(cache_dir, f) + size = os.path.getsize(path) + entries.append((f, size)) + return sorted(entries, key=lambda x: x[1], reverse=True)[:limit] + except Exception as e: + logging.error(f"Error getting largest cache entries: {e}") + return [] + +def log_cache_stats(): + """Log detailed cache statistics.""" + try: + total_size = get_cache_size() + entry_count = get_cache_entry_count() + largest_entries = get_largest_cache_entries() + + logging.info(f"Cache Statistics:") + logging.info(f" Total Size: {total_size / 1024 / 1024:.2f} MB") + logging.info(f" Entry Count: {entry_count}") + logging.info(" Largest Entries:") + for entry, size in largest_entries: + logging.info(f" {entry}: {size / 1024 / 1024:.2f} MB") + except Exception as e: + logging.error(f"Error logging cache stats: {e}") + +# Modify cleanup_cache to include cache stats +def cleanup_cache(): + try: + logging.info("Starting cache cleanup") + logging.info("Memory usage before cache cleanup:") + log_memory_usage(force=True) + logging.info("Cache stats before cleanup:") + log_cache_stats() + + # Clear nodes-related cache entries + clear_nodes_cache() + + # Clear database query cache + clear_database_cache() + + # Clear the cache + with app.app_context(): + cache.clear() + + # Force garbage collection + gc.collect() + + logging.info("Memory usage after cache cleanup:") + log_memory_usage(force=True) + logging.info("Cache stats after cleanup:") + log_cache_stats() + + except Exception as e: + logging.error(f"Error during cache cleanup: {e}") + +# Make globals available to templates app.jinja_env.globals.update(convert_to_local=convert_to_local) app.jinja_env.globals.update(format_timestamp=format_timestamp) app.jinja_env.globals.update(time_ago=time_ago) app.jinja_env.globals.update(min=min) app.jinja_env.globals.update(max=max) +app.jinja_env.globals.update(datetime=datetime.datetime) +app.jinja_env.globals.update(get_role_badge=get_role_badge) + +# Add template filters +@app.template_filter('safe_hw_model') +def safe_hw_model(value): + try: + return get_hardware_model_name(value) + except (ValueError, AttributeError): + return f"Unknown ({value})" config = configparser.ConfigParser() config.read("config.ini") +# Initialize cache with config +initialize_cache() + +# Register API blueprint +app.register_blueprint(api) + +# Add request context tracking +active_requests = set() +request_lock = threading.Lock() + +# Add memory usage tracking +last_memory_log = 0 +last_memory_usage = 0 +MEMORY_LOG_INTERVAL = 60 # Log every minute instead of 5 minutes +MEMORY_CHANGE_THRESHOLD = 10 * 1024 * 1024 # 10MB change threshold (reduced from 50MB) + +@app.before_request +def before_request(): + """Track request start.""" + with request_lock: + active_requests.add(id(request)) + # Enhanced memory logging for high-activity periods + if len(active_requests) > 5: # If more than 5 concurrent requests + log_memory_usage(force=True) + +@app.after_request +def after_request(response): + """Clean up request context.""" + with request_lock: + active_requests.discard(id(request)) + # Enhanced memory logging for high-activity periods + if len(active_requests) > 5: # If more than 5 concurrent requests + log_memory_usage(force=True) + return response + + + +@app.teardown_appcontext +def teardown_meshdata(exception): + """Closes the MeshData connection at the end of the request.""" + md = g.pop('meshdata', None) + if md is not None: + try: + if hasattr(md, 'db') and md.db: + if md.db.is_connected(): + md.db.close() + logging.debug("Database connection closed in teardown.") + else: + logging.debug("Database connection was already closed.") + else: + logging.debug("No database connection to close.") + except Exception as e: + logging.error(f"Error handling database connection in teardown: {e}") + finally: + # Ensure the MeshData instance is properly cleaned up + try: + del md + except: + pass + logging.debug("MeshData instance removed from request context.") + + + +# Add connection monitoring +def monitor_connections(): + """Monitor database connections.""" + while True: + try: + with app.app_context(): + if hasattr(g, 'meshdata') and g.meshdata and hasattr(g.meshdata, 'db'): + if g.meshdata.db.is_connected(): + logging.info("Database connection is active") + else: + logging.warning("Database connection is not active") + except Exception as e: + logging.error(f"Error monitoring database connection: {e}") + time.sleep(60) # Check every minute + +# Add cache lock monitoring +def monitor_cache_locks(): + """Monitor cache lock files.""" + while True: + try: + if cache_dir: + lock_files = [f for f in os.listdir(cache_dir) if f.endswith('.lock')] + if lock_files: + logging.warning(f"Found {len(lock_files)} stale cache locks") + # Clean up stale locks + for lock_file in lock_files: + try: + os.remove(os.path.join(cache_dir, lock_file)) + except Exception as e: + logging.error(f"Error removing stale lock {lock_file}: {e}") + except Exception as e: + logging.error(f"Error monitoring cache locks: {e}") + time.sleep(300) # Check every 5 minutes + +def log_detailed_memory_analysis(): + """Perform detailed memory analysis to identify potential leaks.""" + try: + import gc + gc.collect() + + logging.info("=== DETAILED MEMORY ANALYSIS ===") + + # Check database connections + db_connections = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'mysql' in str(obj.__class__).lower(): + db_connections += 1 + logging.info(f"Database connection objects: {db_connections}") + + # Check cache objects + cache_objects = 0 + cache_size = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'cache' in str(obj.__class__).lower(): + cache_objects += 1 + try: + cache_size += sys.getsizeof(obj) + except: + pass + logging.info(f"Cache objects: {cache_objects} ({cache_size / 1024 / 1024:.1f} MB)") + + # Check for Flask/WSGI objects + flask_objects = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'flask' in str(obj.__class__).lower(): + flask_objects += 1 + logging.info(f"Flask objects: {flask_objects}") + + # Check for template objects + template_objects = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'template' in str(obj.__class__).lower(): + template_objects += 1 + logging.info(f"Template objects: {template_objects}") + + # Check for large dictionaries and lists + large_dicts = [] + large_lists = [] + for obj in gc.get_objects(): + try: + if isinstance(obj, dict) and len(obj) > 1000: + large_dicts.append((len(obj), str(obj)[:50])) + elif isinstance(obj, list) and len(obj) > 1000: + large_lists.append((len(obj), str(obj)[:50])) + except: + pass + + if large_dicts: + logging.info("Large dictionaries:") + for size, repr_str in sorted(large_dicts, reverse=True)[:5]: + logging.info(f" Dict with {size:,} items: {repr_str}") + + if large_lists: + logging.info("Large lists:") + for size, repr_str in sorted(large_lists, reverse=True)[:5]: + logging.info(f" List with {size:,} items: {repr_str}") + + # Check for circular references + circular_refs = gc.collect() + if circular_refs > 0: + logging.warning(f"Found {circular_refs} circular references") + + logging.info("=== END DETAILED ANALYSIS ===") + + except Exception as e: + logging.error(f"Error in detailed memory analysis: {e}") + +# Modify the memory watchdog to include detailed analysis +def memory_watchdog(): + """Monitor memory usage and take action if it gets too high.""" + while True: + try: + process = psutil.Process(os.getpid()) + memory_info = process.memory_info() + memory_mb = memory_info.rss / 1024 / 1024 + + if memory_mb > 1000: # If over 1GB (reduced from 2GB) + logging.warning(f"Memory usage high ({memory_mb:.2f} MB), performing detailed analysis") + log_detailed_memory_analysis() + logging.info("Cache stats before high memory cleanup:") + log_cache_stats() + with app.app_context(): + cache.clear() + # Clear nodes-related cache entries + clear_nodes_cache() + # Clear database query cache + clear_database_cache() + gc.collect() + logging.info("Cache stats after high memory cleanup:") + log_cache_stats() + + if memory_mb > 2000: # If over 2GB (reduced from 4GB) + logging.error(f"Memory usage critical ({memory_mb:.2f} MB), logging detailed memory info") + log_memory_usage(force=True) + log_detailed_memory_analysis() + logging.info("Cache stats at critical memory level:") + log_cache_stats() + # Force clear nodes cache at critical levels + clear_nodes_cache() + clear_database_cache() + gc.collect() + + except Exception as e: + logging.error(f"Error in memory watchdog: {e}") + + time.sleep(30) # Check every 30 seconds instead of 60 + +# Start monitoring threads +connection_monitor_thread = threading.Thread(target=monitor_connections, daemon=True) +connection_monitor_thread.start() + +lock_monitor_thread = threading.Thread(target=monitor_cache_locks, daemon=True) +lock_monitor_thread.start() + +watchdog_thread = threading.Thread(target=memory_watchdog, daemon=True) +watchdog_thread.start() + +# Schedule cache cleanup +def schedule_cache_cleanup(): + while True: + time.sleep(900) # Run every 15 minutes instead of hourly + cleanup_cache() + +cleanup_thread = threading.Thread(target=schedule_cache_cleanup, daemon=True) +cleanup_thread.start() def auth(): jwt = request.cookies.get('jwt') @@ -48,7 +698,6 @@ def auth(): decoded_jwt = reg.auth(jwt) return decoded_jwt - @app.errorhandler(404) def not_found(e): return render_template( @@ -57,173 +706,196 @@ def not_found(e): config=config ), 404 +# Data caching functions +def cache_key_prefix(): + """Generate a cache key prefix based on current time bucket.""" + # Round to nearest minute for 60-second cache + return datetime.datetime.now().replace(second=0, microsecond=0).timestamp() -# Serve static files from the root directory -@app.route('/') -def serve_index(success_message=None, error_message=None): - md = MeshData() - nodes = md.get_nodes() - return render_template( - "index.html.j2", - auth=auth(), - config=config, - nodes=nodes, - active_nodes=utils.active_nodes(nodes), - timestamp=datetime.datetime.now(), - success_message=success_message, - error_message=error_message - ) - - -@app.route('/nodes.html') -def nodes(): - md = MeshData() - nodes = md.get_nodes() - latest = md.get_latest_node() - return render_template( - "nodes.html.j2", - auth=auth(), - config=config, - nodes=nodes, - show_inactive=False, # Add this line - latest=latest, - hardware=meshtastic_support.HardwareModel, - meshtastic_support=meshtastic_support, - utils=utils, - datetime=datetime.datetime, - timestamp=datetime.datetime.now(), - ) - -@app.route('/allnodes.html') -def allnodes(): - md = MeshData() - nodes = md.get_nodes() - latest = md.get_latest_node() - return render_template( - "nodes.html.j2", # Change to use nodes.html.j2 - auth=auth(), - config=config, - nodes=nodes, - show_inactive=True, - latest=latest, - hardware=meshtastic_support.HardwareModel, - meshtastic_support=meshtastic_support, - utils=utils, - datetime=datetime.datetime, - timestamp=datetime.datetime.now(), - ) -@app.route('/chat.html') -def chat(): - page = request.args.get('page', 1, type=int) - per_page = 50 - - md = MeshData() - nodes = md.get_nodes() - chat_data = md.get_chat(page=page, per_page=per_page) - - # start_item and end_item for pagination - chat_data['start_item'] = (page - 1) * per_page + 1 if chat_data['total'] > 0 else 0 - chat_data['end_item'] = min(page * per_page, chat_data['total']) +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_nodes(): + """Get nodes data with database-level caching.""" + md = get_meshdata() + if not md: + return None - return render_template( - "chat.html.j2", - auth=auth(), - config=config, - nodes=nodes, - chat=chat_data["items"], - pagination=chat_data, - utils=utils, - datetime=datetime.datetime, - timestamp=datetime.datetime.now(), - debug=False, - ) + # Use the cached method to prevent duplicate dictionaries + nodes_data = md.get_nodes_cached() + logging.debug(f"Fetched {len(nodes_data)} nodes from application cache") + return nodes_data -@app.route('/chat2.html') -def chat2(): - page = request.args.get('page', 1, type=int) - per_page = 50 - - md = MeshData() - nodes = md.get_nodes() - chat_data = md.get_chat(page=page, per_page=per_page) - - chat_data['start_item'] = (page - 1) * per_page + 1 if chat_data['total'] > 0 else 0 - chat_data['end_item'] = min(page * per_page, chat_data['total']) - - return render_template( - "chat2.html.j2", - auth=auth(), - config=config, - nodes=nodes, - chat=chat_data["items"], - pagination=chat_data, - utils=utils, - datetime=datetime.datetime, - timestamp=datetime.datetime.now(), - debug=False, - ) +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_active_nodes(): + """Cache the active nodes calculation.""" + nodes = get_cached_nodes() + if not nodes: + return {} + return utils.active_nodes(nodes) -@app.route('/message_map.html') -def message_map(): - message_id = request.args.get('id') - if not message_id: - abort(404) +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_latest_node(): + """Cache the latest node data.""" + md = get_meshdata() + if not md: + return None + return md.get_latest_node() + +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_message_map_data(message_id): + """Cache the message map data for a specific message.""" + md = get_meshdata() + if not md: + return None - md = MeshData() - nodes = md.get_nodes() - - # Get message and reception data + # Get message and basic reception data cursor = md.db.cursor(dictionary=True) cursor.execute(""" - SELECT t.*, r.* + SELECT t.*, GROUP_CONCAT(r.received_by_id) as receiver_ids FROM text t LEFT JOIN message_reception r ON t.message_id = r.message_id WHERE t.message_id = %s + GROUP BY t.message_id """, (message_id,)) + + message_base = cursor.fetchone() + cursor.close() + + if not message_base: + return None + + # Get the precise message time + message_time = message_base['ts_created'].timestamp() + + # Batch load all positions at once + receiver_ids_list = [int(r_id) for r_id in message_base['receiver_ids'].split(',')] if message_base['receiver_ids'] else [] + node_ids = [message_base['from_id']] + receiver_ids_list + positions = md.get_positions_at_time(node_ids, message_time) + + # Fallback: If sender position is missing, fetch it directly + if message_base['from_id'] not in positions: + sender_fallback = md.get_position_at_time(message_base['from_id'], message_time) + if sender_fallback: + positions[message_base['from_id']] = sender_fallback + + # Batch load all reception details + reception_details = md.get_reception_details_batch(message_id, receiver_ids_list) - message_data = cursor.fetchall() - if not message_data: - abort(404) - - # Process message data + # Ensure keys are int for lookups + receiver_positions = {int(k): v for k, v in positions.items() if k in receiver_ids_list} + receiver_details = {int(k): v for k, v in reception_details.items() if k in receiver_ids_list} + sender_position = positions.get(message_base['from_id']) + + # Calculate convex hull area in square km + points = [] + if sender_position and sender_position['latitude'] is not None and sender_position['longitude'] is not None: + points.append((sender_position['longitude'], sender_position['latitude'])) + for pos in receiver_positions.values(): + if pos and pos['latitude'] is not None and pos['longitude'] is not None: + points.append((pos['longitude'], pos['latitude'])) + convex_hull_area_km2 = None + if len(points) >= 3: + # Use shapely to calculate convex hull area + hull = MultiPoint(points).convex_hull + # Approximate area on Earth's surface (convert degrees to meters using haversine formula) + # We'll use a simple equirectangular projection for small areas + # Reference point for projection + avg_lat = sum(lat for lon, lat in points) / len(points) + earth_radius = 6371.0088 # km + def latlon_to_xy(lon, lat): + x = math.radians(lon) * earth_radius * math.cos(math.radians(avg_lat)) + y = math.radians(lat) * earth_radius + return (x, y) + # Handle both LineString and Polygon cases + if hasattr(hull, 'exterior'): + coords = hull.exterior.coords + else: + coords = hull.coords + xy_points = [latlon_to_xy(lon, lat) for lon, lat in coords] + hull_xy = MultiPoint(xy_points).convex_hull + convex_hull_area_km2 = hull_xy.area + + # Prepare message object for template message = { 'id': message_id, - 'from_id': message_data[0]['from_id'], - 'text': message_data[0]['text'], - 'ts_created': message_data[0]['ts_created'], - 'receptions': [] + 'from_id': message_base['from_id'], + 'to_id': message_base.get('to_id'), # Ensure to_id is included + 'channel': message_base.get('channel'), # Ensure channel is included + 'text': message_base['text'], + 'ts_created': message_time, + 'receiver_ids': receiver_ids_list } - # Only process receptions if they exist - for reception in message_data: - if reception['received_by_id'] is not None: # Add this check - message['receptions'].append({ - 'node_id': reception['received_by_id'], - 'rx_snr': reception['rx_snr'], - 'rx_rssi': reception['rx_rssi'], - 'hop_start': reception['hop_start'], - 'hop_limit': reception['hop_limit'], - 'rx_time': reception['rx_time'] - }) - - cursor.close() + return { + 'message': message, + 'sender_position': sender_position, + 'receiver_positions': receiver_positions, + 'receiver_details': receiver_details, + 'convex_hull_area_km2': convex_hull_area_km2 + } - # Check if sender has position data before rendering map - from_id = utils.convert_node_id_from_int_to_hex(message['from_id']) - if from_id not in nodes or not nodes[from_id].get('position'): - abort(404, description="Sender position data not available") +@app.route('/message_map.html') +def message_map(): + message_id = request.args.get('id') + if not message_id: + return redirect(url_for('chat')) + + # Get cached data + data = get_cached_message_map_data(message_id) + if not data: + return redirect(url_for('chat')) + + # Get nodes once and create a simplified version with only needed nodes + all_nodes = get_cached_nodes() + + # Create simplified nodes dict with only nodes used in this message + used_node_ids = set() + used_node_ids.add(utils.convert_node_id_from_int_to_hex(data['message']['from_id'])) + if data['message'].get('to_id') and data['message']['to_id'] != 4294967295: + used_node_ids.add(utils.convert_node_id_from_int_to_hex(data['message']['to_id'])) + for receiver_id in data['message']['receiver_ids']: + used_node_ids.add(utils.convert_node_id_from_int_to_hex(receiver_id)) + + simplified_nodes = {} + for node_id in used_node_ids: + if node_id in all_nodes: + node = all_nodes[node_id] + simplified_nodes[node_id] = { + 'long_name': node.get('long_name', ''), + 'short_name': node.get('short_name', ''), + 'position': node.get('position') + } + # --- Provide zero_hop_links and position data for relay node inference --- + md = get_meshdata() + sender_id = data['message']['from_id'] + receiver_ids = data['message']['receiver_ids'] + # Get zero-hop links for the last 1 day (or configurable) + zero_hop_timeout = 86400 + cutoff_time = int(time.time()) - zero_hop_timeout + zero_hop_links, _ = md.get_zero_hop_links(cutoff_time) + # Get sender and receiver positions at message time + message_time = data['message']['ts_created'] + sender_pos = data['sender_position'] + receiver_positions = data['receiver_positions'] + # Pass the relay matcher and context to the template return render_template( "message_map.html.j2", auth=auth(), config=config, - nodes=nodes, - message=message, + nodes=simplified_nodes, + message=data['message'], + sender_position=sender_pos, + receiver_positions=receiver_positions, + receiver_details=data['receiver_details'], + convex_hull_area_km2=data['convex_hull_area_km2'], utils=utils, datetime=datetime.datetime, - timestamp=datetime.datetime.now() + timestamp=datetime.datetime.now(), + find_relay_node_by_suffix=lambda relay_suffix, nodes, receiver_ids=None, sender_id=None: find_relay_node_by_suffix( + relay_suffix, nodes, receiver_ids, sender_id, zero_hop_links=zero_hop_links, sender_pos=sender_pos, receiver_pos=None + ) ) @app.route('/traceroute_map.html') @@ -232,38 +904,39 @@ def traceroute_map(): if not traceroute_id: abort(404) - md = MeshData() - nodes = md.get_nodes() + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") - # Get traceroute data + # Get traceroute attempt by unique id first cursor = md.db.cursor(dictionary=True) cursor.execute(""" SELECT * FROM traceroute WHERE traceroute_id = %s """, (traceroute_id,)) - traceroute_data = cursor.fetchone() if not traceroute_data: + cursor.close() abort(404) # Format the forward route data route = [] if traceroute_data['route']: - route = [int(hop) for hop in traceroute_data['route'].split(';')] + route = [int(hop) for hop in traceroute_data['route'].split(';') if hop] # Format the return route data route_back = [] if traceroute_data['route_back']: - route_back = [int(hop) for hop in traceroute_data['route_back'].split(';')] + route_back = [int(hop) for hop in traceroute_data['route_back'].split(';') if hop] - # Format the forward SNR values + # Format the forward SNR values and scale by dividing by 4 snr_towards = [] if traceroute_data['snr_towards']: - snr_towards = [float(s) for s in traceroute_data['snr_towards'].split(';')] + snr_towards = [float(s)/4.0 for s in traceroute_data['snr_towards'].split(';') if s] - # Format the return SNR values + # Format the return SNR values and scale by dividing by 4 snr_back = [] if traceroute_data['snr_back']: - snr_back = [float(s) for s in traceroute_data['snr_back'].split(';')] + snr_back = [float(s)/4.0 for s in traceroute_data['snr_back'].split(';') if s] # Create a clean traceroute object for the template traceroute = { @@ -280,40 +953,230 @@ def traceroute_map(): 'success': traceroute_data['success'] } - cursor.close() + cursor.close() + + # Get nodes and create simplified version with only needed nodes + all_nodes = get_cached_nodes() + used_node_ids = set([traceroute['from_id'], traceroute['to_id']] + traceroute['route'] + traceroute['route_back']) + + simplified_nodes = {} + for node_id in used_node_ids: + node_hex = utils.convert_node_id_from_int_to_hex(node_id) + if node_hex in all_nodes: + node = all_nodes[node_hex] + simplified_nodes[node_hex] = { + 'long_name': node.get('long_name', ''), + 'short_name': node.get('short_name', ''), + 'position': node.get('position'), + 'ts_seen': node.get('ts_seen'), + 'role': node.get('role'), + 'owner_username': node.get('owner_username'), + 'hw_model': node.get('hw_model'), + 'firmware_version': node.get('firmware_version') + } + + # --- Build traceroute_positions dict for historical accuracy --- + node_ids = set([traceroute['from_id'], traceroute['to_id']] + traceroute['route'] + traceroute['route_back']) + traceroute_positions = {} + ts_created = traceroute['ts_created'] + # If ts_created is a datetime, convert to timestamp + if hasattr(ts_created, 'timestamp'): + ts_created = ts_created.timestamp() + for node_id in node_ids: + pos = md.get_position_at_time(node_id, ts_created) + node_hex = utils.convert_node_id_from_int_to_hex(node_id) + if not pos and node_hex in simplified_nodes and simplified_nodes[node_hex].get('position'): + pos_obj = simplified_nodes[node_hex]['position'] + # Convert to dict if needed + if hasattr(pos_obj, '__dict__'): + pos = dict(pos_obj.__dict__) + else: + pos = dict(pos_obj) + # Ensure position_time is present and properly formatted + if 'position_time' not in pos or not pos['position_time']: + if hasattr(pos_obj, 'position_time') and pos_obj.position_time: + pt = pos_obj.position_time + if isinstance(pt, datetime.datetime): + pos['position_time'] = pt.timestamp() + else: + pos['position_time'] = pt + else: + pos['position_time'] = None + if pos: + traceroute_positions[node_id] = pos + + + return render_template( + "traceroute_map.html.j2", + auth=auth(), + config=config, + nodes=simplified_nodes, + traceroute=traceroute, + traceroute_positions=traceroute_positions, # <-- pass to template + utils=utils, + meshtastic_support=get_meshtastic_support(), + datetime=datetime.datetime, + timestamp=datetime.datetime.now() + ) + +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_graph_data(view_type='merged', days=1, zero_hop_timeout=43200): + """Cache the graph data.""" + md = get_meshdata() + if not md: + return None + return md.get_graph_data(view_type, days, zero_hop_timeout) + +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_neighbors_data(view_type='neighbor_info', days=1, zero_hop_timeout=43200): + """Cache the neighbors data.""" + md = get_meshdata() + if not md: + return None + return md.get_neighbors_data(view_type, days, zero_hop_timeout) + +@app.route('/graph.html') +def graph(): + view_type = request.args.get('view_type', 'merged') + days = int(request.args.get('days', 1)) + zero_hop_timeout = int(request.args.get('zero_hop_timeout', 43200)) + + # Get cached data + data = get_cached_graph_data(view_type, days, zero_hop_timeout) + if not data: + abort(503, description="Database connection unavailable") + + return render_template( + "graph.html.j2", + auth=auth(), + config=config, + graph=data, + view_type=view_type, + days=days, + zero_hop_timeout=zero_hop_timeout, + timestamp=datetime.datetime.now() + ) + +@app.route('/graph2.html') +def graph2(): + view_type = request.args.get('view_type', 'merged') + days = int(request.args.get('days', 1)) + zero_hop_timeout = int(request.args.get('zero_hop_timeout', 43200)) + + # Get cached data + data = get_cached_graph_data(view_type, days, zero_hop_timeout) + if not data: + abort(503, description="Database connection unavailable") + + return render_template( + "graph2.html.j2", + auth=auth(), + config=config, + graph=data, + view_type=view_type, + days=days, + zero_hop_timeout=zero_hop_timeout, + timestamp=datetime.datetime.now() + ) + +@app.route('/graph3.html') +def graph3(): + view_type = request.args.get('view_type', 'merged') + days = int(request.args.get('days', 1)) + zero_hop_timeout = int(request.args.get('zero_hop_timeout', 43200)) + + # Get cached data + data = get_cached_graph_data(view_type, days, zero_hop_timeout) + if not data: + abort(503, description="Database connection unavailable") + + return render_template( + "graph3.html.j2", + auth=auth(), + config=config, + graph=data, + view_type=view_type, + days=days, + zero_hop_timeout=zero_hop_timeout, + timestamp=datetime.datetime.now() + ) + +@app.route('/graph4.html') +def graph4(): + view_type = request.args.get('view_type', 'merged') + days = int(request.args.get('days', 1)) + zero_hop_timeout = int(request.args.get('zero_hop_timeout', 43200)) + + # Get cached data + data = get_cached_graph_data(view_type, days, zero_hop_timeout) + if not data: + abort(503, description="Database connection unavailable") + + return render_template( + "graph4.html.j2", + auth=auth(), + config=config, + graph=data, + view_type=view_type, + days=days, + zero_hop_timeout=zero_hop_timeout, + timestamp=datetime.datetime.now() + ) + +@app.route('/utilization-heatmap.html') +def utilization_heatmap(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") return render_template( - "traceroute_map.html.j2", + "utilization-heatmap.html.j2", auth=auth(), config=config, - nodes=nodes, - traceroute=traceroute, utils=utils, datetime=datetime.datetime, - timestamp=datetime.datetime.now() + timestamp=datetime.datetime.now(), + Channel=get_channel_enum(), # Add Channel enum to template context ) -@app.route('/graph.html') -def graph(): - md = MeshData() - nodes = md.get_nodes() - graph = md.graph_nodes() +@app.route('/utilization-hexmap.html') +def utilization_hexmap(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + return render_template( - "graph.html.j2", + "utilization-hexmap.html.j2", auth=auth(), config=config, - nodes=nodes, - graph=graph, utils=utils, datetime=datetime.datetime, timestamp=datetime.datetime.now(), + Channel=get_channel_enum(), # Add Channel enum to template context ) - @app.route('/map.html') def map(): - md = MeshData() - nodes = md.get_nodes() + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + + return render_template( + "map_api.html.j2", + auth=auth(), + config=config, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + Channel=get_channel_enum(), # Add Channel enum to template context + ) + +@app.route('/map-classic.html') +def map_classic(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + nodes = get_cached_nodes() # Get timeout from config zero_hop_timeout = int(config.get("server", "zero_hop_timeout", fallback=43200)) @@ -384,189 +1247,45 @@ def map(): zero_hop_data=zero_hop_data, zero_hop_timeout=zero_hop_timeout, utils=utils, - datetime=datetime, - timestamp=datetime.datetime.now() + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + Channel=get_channel_enum(), # Add Channel enum to template context ) - @app.route('/neighbors.html') def neighbors(): - view_type = request.args.get('view', 'neighbor_info') - md = MeshData() - nodes = md.get_nodes() - - zero_hop_timeout = int(config.get("server", "zero_hop_timeout", fallback=43200)) - cutoff_time = int(time.time()) - zero_hop_timeout - - active_nodes_with_connections = {} - - if view_type in ['neighbor_info', 'merged']: - neighbor_info_nodes = {} - for node_id, node in nodes.items(): - if node.get("active") and node.get("neighbors"): - node_data = dict(node) - if 'last_heard' not in node_data: - node_data['last_heard'] = datetime.datetime.now() - elif isinstance(node_data['last_heard'], (int, float)): - node_data['last_heard'] = datetime.datetime.fromtimestamp(node_data['last_heard']) - - # Add heard-by data from neighbor info - node_data['heard_by_neighbors'] = [] - for other_id, other_node in nodes.items(): - if other_node.get("neighbors"): - for neighbor in other_node["neighbors"]: - if utils.convert_node_id_from_int_to_hex(neighbor["neighbor_id"]) == node_id: - node_data['heard_by_neighbors'].append({ - 'neighbor_id': utils.convert_node_id_from_hex_to_int(other_id), - 'snr': neighbor["snr"], - 'distance': neighbor.get("distance") - }) - - neighbor_info_nodes[node_id] = node_data - - if view_type == 'neighbor_info': - active_nodes_with_connections = neighbor_info_nodes - - if view_type in ['zero_hop', 'merged']: - cursor = md.db.cursor(dictionary=True) - - # First get nodes that have heard messages - cursor.execute(""" - SELECT DISTINCT - received_by_id as node_id, - MAX(rx_time) as last_heard - FROM message_reception - WHERE rx_time > %s - AND ( - (hop_limit IS NULL AND hop_start IS NULL) - OR - (hop_start - hop_limit = 0) - ) - GROUP BY received_by_id - """, (cutoff_time,)) - - zero_hop_nodes = {} - for row in cursor.fetchall(): - node_id = utils.convert_node_id_from_int_to_hex(row['node_id']) - if node_id in nodes and nodes[node_id].get("active"): - node_data = dict(nodes[node_id]) - node_data['zero_hop_neighbors'] = [] - node_data['heard_by_zero_hop'] = [] - node_data['last_heard'] = datetime.datetime.fromtimestamp(row['last_heard']) - - # Get nodes this node heard - cursor.execute(""" - SELECT - from_id as neighbor_id, - MAX(rx_snr) as snr, - COUNT(*) as message_count, - MAX(rx_time) as last_heard, - p1.latitude_i as lat1_i, - p1.longitude_i as lon1_i, - p2.latitude_i as lat2_i, - p2.longitude_i as lon2_i - FROM message_reception m - LEFT OUTER JOIN position p1 ON p1.id = m.received_by_id - LEFT OUTER JOIN position p2 ON p2.id = m.from_id - WHERE m.received_by_id = %s - AND m.rx_time > %s - AND ( - (m.hop_limit IS NULL AND m.hop_start IS NULL) - OR - (m.hop_start - m.hop_limit = 0) - ) - GROUP BY from_id, p1.latitude_i, p1.longitude_i, p2.latitude_i, p2.longitude_i - """, (row['node_id'], cutoff_time)) - - # Process nodes this node heard - for neighbor in cursor.fetchall(): - distance = None - if (neighbor['lat1_i'] and neighbor['lon1_i'] and - neighbor['lat2_i'] and neighbor['lon2_i']): - distance = round(utils.distance_between_two_points( - neighbor['lat1_i'] / 10000000, - neighbor['lon1_i'] / 10000000, - neighbor['lat2_i'] / 10000000, - neighbor['lon2_i'] / 10000000 - ), 2) - - node_data['zero_hop_neighbors'].append({ - 'neighbor_id': neighbor['neighbor_id'], - 'snr': neighbor['snr'], - 'message_count': neighbor['message_count'], - 'distance': distance, - 'last_heard': datetime.datetime.fromtimestamp(neighbor['last_heard']) - }) - - # Get nodes that heard this node - cursor.execute(""" - SELECT - received_by_id as neighbor_id, - MAX(rx_snr) as snr, - COUNT(*) as message_count, - MAX(rx_time) as last_heard, - p1.latitude_i as lat1_i, - p1.longitude_i as lon1_i, - p2.latitude_i as lat2_i, - p2.longitude_i as lon2_i - FROM message_reception m - LEFT OUTER JOIN position p1 ON p1.id = m.received_by_id - LEFT OUTER JOIN position p2 ON p2.id = m.from_id - WHERE m.from_id = %s - AND m.rx_time > %s - AND ( - (m.hop_limit IS NULL AND m.hop_start IS NULL) - OR - (m.hop_start - m.hop_limit = 0) - ) - GROUP BY received_by_id, p1.latitude_i, p1.longitude_i, p2.latitude_i, p2.longitude_i - """, (row['node_id'], cutoff_time)) - - # Process nodes that heard this node - for neighbor in cursor.fetchall(): - distance = None - if (neighbor['lat1_i'] and neighbor['lon1_i'] and - neighbor['lat2_i'] and neighbor['lon2_i']): - distance = round(utils.distance_between_two_points( - neighbor['lat1_i'] / 10000000, - neighbor['lon1_i'] / 10000000, - neighbor['lat2_i'] / 10000000, - neighbor['lon2_i'] / 10000000 - ), 2) - - node_data['heard_by_zero_hop'].append({ - 'neighbor_id': neighbor['neighbor_id'], - 'snr': neighbor['snr'], - 'message_count': neighbor['message_count'], - 'distance': distance, - 'last_heard': datetime.datetime.fromtimestamp(neighbor['last_heard']) - }) - - if node_data['zero_hop_neighbors'] or node_data['heard_by_zero_hop']: - zero_hop_nodes[node_id] = node_data - - if view_type == 'zero_hop': - active_nodes_with_connections = zero_hop_nodes - else: # merged view - active_nodes_with_connections = neighbor_info_nodes.copy() - for node_id, node_data in zero_hop_nodes.items(): - if node_id in active_nodes_with_connections: - active_nodes_with_connections[node_id]['zero_hop_neighbors'] = node_data['zero_hop_neighbors'] - active_nodes_with_connections[node_id]['heard_by_zero_hop'] = node_data['heard_by_zero_hop'] - if (node_data['last_heard'] > - active_nodes_with_connections[node_id]['last_heard']): - active_nodes_with_connections[node_id]['last_heard'] = node_data['last_heard'] - else: - active_nodes_with_connections[node_id] = node_data - - cursor.close() + view_type = request.args.get('view', 'neighbor_info') # Default to neighbor_info + md = get_meshdata() + if not md: + abort(503, description="Database connection unavailable") + + # Get base node data using singleton + nodes = get_cached_nodes() + if not nodes: + # Handle case with no nodes gracefully + return render_template( + "neighbors.html.j2", + auth=auth, config=config, nodes={}, + active_nodes_with_connections={}, view_type=view_type, + utils=utils, datetime=datetime.datetime, timestamp=datetime.datetime.now() + ) + + # Get neighbors data using the new method + active_nodes_data = md.get_neighbors_data(view_type=view_type) + + # Sort final results by last heard time + active_nodes_data = dict(sorted( + active_nodes_data.items(), + key=lambda item: item[1].get('last_heard', datetime.datetime.min), + reverse=True + )) return render_template( "neighbors.html.j2", auth=auth(), config=config, - nodes=nodes, - active_nodes_with_connections=active_nodes_with_connections, + nodes=nodes, # Pass full nodes list for lookups in template + active_nodes_with_connections=active_nodes_data, # Pass the processed data view_type=view_type, utils=utils, datetime=datetime.datetime, @@ -575,8 +1294,10 @@ def neighbors(): @app.route('/telemetry.html') def telemetry(): - md = MeshData() - nodes = md.get_nodes() + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + nodes = get_cached_nodes() telemetry = md.get_telemetry_all() return render_template( "telemetry.html.j2", @@ -589,14 +1310,15 @@ def telemetry(): timestamp=datetime.datetime.now() ) - @app.route('/traceroutes.html') def traceroutes(): - md = MeshData() + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") page = request.args.get('page', 1, type=int) per_page = 100 - nodes = md.get_nodes() + nodes = get_cached_nodes() traceroute_data = md.get_traceroutes(page=page, per_page=per_page) # Calculate pagination info @@ -627,33 +1349,90 @@ def traceroutes(): traceroutes=traceroute_data['items'], pagination=pagination, utils=utils, + meshtastic_support=get_meshtastic_support(), datetime=datetime.datetime, timestamp=datetime.datetime.now(), + meshdata=md # Add meshdata to template context ) - @app.route('/logs.html') def logs(): - md = MeshData() + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + + # Get node filter from query parameter + node_filter = request.args.get('node') + logs = md.get_logs() return render_template( "logs.html.j2", auth=auth(), config=config, logs=logs, + node_filter=node_filter, # Pass the node filter to template utils=utils, datetime=datetime.datetime, timestamp=datetime.datetime.now(), json=json ) +@app.route('/routing.html') +def routing(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + + # Get query parameters + page = request.args.get('page', 1, type=int) + per_page = request.args.get('per_page', 50, type=int) + error_only = request.args.get('error_only', 'false').lower() == 'true' + days = request.args.get('days', 7, type=int) + + # Get routing messages + routing_data = md.get_routing_messages(page=page, per_page=per_page, error_only=error_only, days=days) + + # Get routing statistics + stats = md.get_routing_stats(days=days) + error_breakdown = md.get_routing_errors_by_type(days=days) + + # Create template context + template_context = { + "auth": auth(), + "config": config, + "routing_messages": routing_data['items'], + "pagination": routing_data, + "stats": stats, + "error_breakdown": error_breakdown, + "error_only": error_only, + "days": days, + "utils": utils, + "datetime": datetime.datetime, + "timestamp": datetime.datetime.now(), + "meshtastic_support": get_meshtastic_support() + } + + response = render_template("routing.html.j2", **template_context) + + # Clean up large objects to help with memory management + del template_context + del routing_data + del stats + del error_breakdown + + # Force garbage collection + gc.collect() + + return response @app.route('/monday.html') def monday(): - md = MeshData() - nodes = md.get_nodes() + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + nodes = get_cached_nodes() chat = md.get_chat() - monday = MeshtasticMonday(chat).get_data() + monday = MeshtasticMonday(chat["items"]).get_data() return render_template( "monday.html.j2", auth=auth(), @@ -665,11 +1444,12 @@ def monday(): timestamp=datetime.datetime.now(), ) - @app.route('/mynodes.html') def mynodes(): - md = MeshData() - nodes = md.get_nodes() + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + nodes = get_cached_nodes() owner = auth() if not owner: return redirect(url_for('login')) @@ -680,14 +1460,13 @@ def mynodes(): config=config, nodes=mynodes, show_inactive=True, - hardware=meshtastic_support.HardwareModel, - meshtastic_support=meshtastic_support, + hardware=get_hardware_model_enum(), + meshtastic_support=get_meshtastic_support(), utils=utils, datetime=datetime.datetime, timestamp=datetime.datetime.now(), ) - @app.route('/linknode.html') def link_node(): owner = auth() @@ -704,6 +1483,35 @@ def link_node(): config=config ) +@app.route('/account.html') +def account(): + owner = auth() + if not owner: + return redirect(url_for('login')) + # Don't use cached nodes for account page - always get fresh data from database + md = get_meshdata() + if not md: + abort(503, description="Database connection unavailable") + # Clear MeshData cache and get fresh nodes directly from database + md.clear_nodes_cache() + nodes = md.get_nodes(active=False) # Get fresh from database, bypassing all caches + if not nodes: + abort(503, description="Database connection unavailable") + mynodes = utils.get_owner_nodes(nodes, owner["email"]) + response = make_response(render_template( + "account.html.j2", + auth=owner, + config=config, + nodes=mynodes, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + )) + # Add no-cache headers to prevent browser/proxy caching + response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate, max-age=0' + response.headers['Pragma'] = 'no-cache' + response.headers['Expires'] = '0' + return response @app.route('/register.html', methods=['GET', 'POST']) def register(): @@ -729,7 +1537,6 @@ def register(): error_message=error_message ) - @app.route('/login.html', methods=['GET', 'POST']) def login(success_message=None, error_message=None): if request.method == 'POST': @@ -754,14 +1561,12 @@ def login(success_message=None, error_message=None): error_message=error_message ) - @app.route('/logout.html') def logout(): resp = make_response(redirect(url_for('serve_index'))) resp.set_cookie('jwt', '', expires=0) return resp - @app.route('/verify') def verify(): code = request.args.get('c') @@ -773,112 +1578,77 @@ def verify(): return login(success_message=res["success"]) return serve_index() - @app.route('/') def serve_static(filename): nodep = r"node\_(\w{8})\.html" userp = r"user\_(\w+)\.html" if re.match(nodep, filename): - md = MeshData() match = re.match(nodep, filename) - node = match.group(1) - nodes = md.get_nodes() - if node not in nodes: + node_hex = match.group(1) + + # Get nodes once and reuse them + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + + # Check if node exists first + if node_hex not in nodes: abort(404) - node_id = utils.convert_node_id_from_hex_to_int(node) - node_telemetry = md.get_node_telemetry(node_id) - node_route = md.get_route_coordinates(node_id) - telemetry_graph = draw_graph(node_telemetry) - lp = LOSProfile(nodes, node_id) - - # Get timeout from config - zero_hop_timeout = int(config.get("server", "zero_hop_timeout", fallback=43200)) # Default 12 hours - cutoff_time = int(time.time()) - zero_hop_timeout - - # Query for zero-hop messages heard by this node (within timeout period) - db = md.db - zero_hop_heard = [] - cursor = db.cursor(dictionary=True) - cursor.execute(""" - SELECT - r.from_id, - COUNT(*) AS count, - MAX(r.rx_snr) AS best_snr, - AVG(r.rx_snr) AS avg_snr, - MAX(r.rx_time) AS last_rx_time - FROM - message_reception r - WHERE - r.received_by_id = %s - AND ( - (r.hop_limit IS NULL AND r.hop_start IS NULL) - OR - (r.hop_start - r.hop_limit = 0) - ) - AND r.rx_time > %s - GROUP BY - r.from_id - ORDER BY - last_rx_time DESC - """, (node_id, cutoff_time)) - zero_hop_heard = cursor.fetchall() - - # Query for zero-hop messages sent by this node and heard by others (within timeout period) - zero_hop_heard_by = [] - cursor.execute(""" - SELECT - r.received_by_id, - COUNT(*) AS count, - MAX(r.rx_snr) AS best_snr, - AVG(r.rx_snr) AS avg_snr, - MAX(r.rx_time) AS last_rx_time - FROM - message_reception r - WHERE - r.from_id = %s - AND ( - (r.hop_limit IS NULL AND r.hop_start IS NULL) - OR - (r.hop_start - r.hop_limit = 0) - ) - AND r.rx_time > %s - GROUP BY - r.received_by_id - ORDER BY - last_rx_time DESC - """, (node_id, cutoff_time)) - zero_hop_heard_by = cursor.fetchall() - cursor.close() + + # Get all node page data directly, bypassing the leaky application cache + node_page_data = get_node_page_data(node_hex, nodes) + + # If data fetching fails, handle gracefully + if not node_page_data: + abort(503, description="Failed to retrieve node data. Please try again shortly.") + + # Render the template + response = make_response(render_template( + f"node.html.j2", + auth=auth(), + config=config, + node=node_page_data['node'], + linked_nodes_details=node_page_data['linked_nodes_details'], + hardware=get_hardware_model_enum(), + meshtastic_support=get_meshtastic_support(), + hardware_photos=get_hardware_photos(), + los_profiles=node_page_data['los_profiles'], + telemetry_graph=node_page_data['telemetry_graph'], + node_route=node_page_data['node_route'], + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + zero_hop_heard=node_page_data['zero_hop_heard'], + zero_hop_heard_by=node_page_data['zero_hop_heard_by'], + neighbor_heard_by=node_page_data['neighbor_heard_by'], + zero_hop_timeout=node_page_data['zero_hop_timeout'], + max_distance=node_page_data['max_distance_km'], + elsewhere_links=node_page_data['elsewhere_links'] + )) - return render_template( - f"node.html.j2", - auth=auth(), - config=config, - node=nodes[node], - nodes=nodes, - hardware=meshtastic_support.HardwareModel, - meshtastic_support=meshtastic_support, - los_profiles=lp.get_profiles(), - telemetry_graph=telemetry_graph, - node_route=node_route, - utils=utils, - datetime=datetime.datetime, - timestamp=datetime.datetime.now(), - zero_hop_heard=zero_hop_heard, - zero_hop_heard_by=zero_hop_heard_by, - zero_hop_timeout=zero_hop_timeout, - ) + # Clean up node_page_data to help with memory management + del node_page_data + + # Force garbage collection to release memory immediately + gc.collect() + + # Set Cache-Control header for client-side caching + response.headers['Cache-Control'] = 'public, max-age=60' + + return response if re.match(userp, filename): match = re.match(userp, filename) username = match.group(1) - md = MeshData() + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") owner = md.get_user(username) if not owner: abort(404) - nodes = md.get_nodes() - owner_nodes = utils.get_owner_nodes(nodes, owner["email"]) + all_nodes = get_cached_nodes() + owner_nodes = utils.get_owner_nodes(all_nodes, owner["email"]) return render_template( "user.html.j2", auth=auth(), @@ -886,8 +1656,9 @@ def serve_static(filename): config=config, nodes=owner_nodes, show_inactive=True, - hardware=meshtastic_support.HardwareModel, - meshtastic_support=meshtastic_support, + hardware=get_hardware_model_enum(), + meshtastic_support=get_meshtastic_support(), + hardware_photos=get_hardware_photos(), utils=utils, datetime=datetime.datetime, timestamp=datetime.datetime.now(), @@ -895,6 +1666,427 @@ def serve_static(filename): return send_from_directory("www", filename) +@app.route('/metrics.html') +@cache.cached(timeout=60) # Cache for 60 seconds +def metrics(): + return render_template( + "metrics.html.j2", + auth=auth(), + config=config, + Channel=get_channel_enum(), + utils=utils + ) + +@app.route('/chat-classic.html') +def chat(): + page = request.args.get('page', 1, type=int) + per_page = 50 + + # Get cached data + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + + chat_data = get_cached_chat_data(page, per_page) + if not chat_data: + abort(503, description="Database connection unavailable") + + return render_template( + "chat.html.j2", + auth=auth(), + config=config, + nodes=nodes, + chat=chat_data["items"], + pagination=chat_data, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + debug=False, + ) + + + +@app.route('/chat.html') +def chat2(): + page = request.args.get('page', 1, type=int) + per_page = 50 + channel = request.args.get('channel', 'all') + + # Get cached data + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + + chat_data = get_cached_chat_data(page, per_page, channel) + if not chat_data: + abort(503, description="Database connection unavailable") + + # Get available channels from meshtastic_support + import meshtastic_support + available_channels = [] + for channel_enum in meshtastic_support.Channel: + available_channels.append({ + 'value': channel_enum.value, + 'name': meshtastic_support.get_channel_name(channel_enum.value), + 'short_name': meshtastic_support.get_channel_name(channel_enum.value, use_short_names=True) + }) + + # Process channel display for the template + channel_display = "All" + if channel != 'all': + selected_channels = channel.split(',') + short_names = [] + for channel_info in available_channels: + if str(channel_info['value']) in selected_channels: + short_names.append(channel_info['short_name']) + if short_names: + channel_display = ', '.join(short_names) + else: + channel_display = channel + + # Pre-process nodes to reduce template complexity + # Only include nodes that are actually used in the chat messages + used_node_ids = set() + for message in chat_data["items"]: + used_node_ids.add(message["from"]) + if message["to"] != "ffffffff": + used_node_ids.add(message["to"]) + for reception in message.get("receptions", []): + node_id = utils.convert_node_id_from_int_to_hex(reception["node_id"]) + used_node_ids.add(node_id) + + # Create simplified nodes dict with only needed data + simplified_nodes = {} + for node_id in used_node_ids: + if node_id in nodes: + node = nodes[node_id] + simplified_nodes[node_id] = { + 'long_name': node.get('long_name', ''), + 'short_name': node.get('short_name', ''), + 'hw_model': node.get('hw_model'), + 'hw_model_name': get_hardware_model_name(node.get('hw_model')) if node.get('hw_model') else None, + 'role': node.get('role'), + 'role_name': utils.get_role_name(node.get('role')) if node.get('role') is not None else None, + 'firmware_version': node.get('firmware_version'), + 'owner_username': node.get('owner_username'), + 'owner': node.get('owner'), + 'position': node.get('position'), + 'telemetry': node.get('telemetry'), + 'ts_seen': node.get('ts_seen') + } + + return render_template( + "chat2.html.j2", + auth=auth(), + config=config, + nodes=simplified_nodes, + chat=chat_data["items"], + pagination=chat_data, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + meshtastic_support=get_meshtastic_support(), + debug=False, + channel=channel, + available_channels=available_channels, + channel_display=channel_display + ) + +@app.route('/') +def serve_index(success_message=None, error_message=None): + # Get cached data + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + + active_nodes = get_cached_active_nodes() + + return render_template( + "index.html.j2", + auth=auth(), + config=config, + nodes=nodes, + active_nodes=active_nodes, + timestamp=datetime.datetime.now(), + success_message=success_message, + error_message=error_message + ) + +@app.route('/nodes.html') +def nodes(): + # Get cached data + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + latest = get_cached_latest_node() + logging.info(f"/nodes.html: Loaded {len(nodes)} nodes.") + + # Get hardware model filter from query parameters + hw_model_filter = request.args.get('hw_model') + hw_name_filter = request.args.get('hw_name') + + return render_template( + "nodes.html.j2", + auth=auth(), + config=config, + nodes=nodes, + show_inactive=False, + latest=latest, + hw_model_filter=hw_model_filter, + hw_name_filter=hw_name_filter, + hardware=get_hardware_model_enum(), + meshtastic_support=get_meshtastic_support(), + hardware_photos=get_hardware_photos(), + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now() + ) + +@app.route('/allnodes.html') +def allnodes(): + # Get cached data + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + latest = get_cached_latest_node() + logging.info(f"/allnodes.html: Loaded {len(nodes)} nodes.") + + # Get hardware model filter from query parameters + hw_model_filter = request.args.get('hw_model') + hw_name_filter = request.args.get('hw_name') + + return render_template( + "allnodes.html.j2", + auth=auth(), + config=config, + nodes=nodes, + show_inactive=True, + latest=latest, + hw_model_filter=hw_model_filter, + hw_name_filter=hw_name_filter, + hardware=get_hardware_model_enum(), + meshtastic_support=get_meshtastic_support(), + hardware_photos=get_hardware_photos(), + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now() + ) + +@app.route('/message-paths.html') +def message_paths(): + days = float(request.args.get('days', 0.167)) # Default to 4 hours if not provided + + md = get_meshdata() + if not md: + abort(503, description="Database connection unavailable") + + # Get relay network data + relay_data = md.get_relay_network_data(days) + + return render_template( + "message-paths.html.j2", + auth=auth(), + config=config, + relay_data=relay_data, + stats=relay_data['stats'], + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now() + ) + + + + + +@cache.memoize(timeout=get_cache_timeout()) # Cache for 5 minutes +def get_cached_hardware_models(): + """Get hardware model statistics for the most and least common models.""" + try: + md = get_meshdata() + if not md: + return {'error': 'Database connection unavailable'} + + # Get hardware model statistics + cur = md.db.cursor(dictionary=True) + + # Query to get hardware model counts with model names + sql = """ + SELECT + hw_model, + COUNT(*) as node_count, + GROUP_CONCAT(DISTINCT short_name ORDER BY short_name SEPARATOR ', ') as sample_names + FROM nodeinfo + WHERE hw_model IS NOT NULL + GROUP BY hw_model + ORDER BY node_count DESC + """ + + cur.execute(sql) + results = cur.fetchall() + cur.close() + + # Process results and get hardware model names - use tuples to reduce memory + hardware_stats = [] + for row in results: + hw_model_id = row['hw_model'] + hw_model_name = get_hardware_model_name(hw_model_id) + + # Get a sample node for icon + sample_node = row['sample_names'].split(', ')[0] if row['sample_names'] else f"Model {hw_model_id}" + + # Use tuple instead of dict to reduce memory overhead + hardware_stats.append(( + hw_model_id, + hw_model_name or f"Unknown Model {hw_model_id}", + row['node_count'], + row['sample_names'], + utils.graph_icon(sample_node) + )) + + # Get top 15 most common + most_common = hardware_stats[:15] + + # Get bottom 15 least common (but only if we have more than 15 total models) + # Sort in ascending order (lowest count first) + least_common = hardware_stats[-15:] if len(hardware_stats) > 15 else hardware_stats + least_common = sorted(least_common, key=lambda x: x[2]) # Sort by node_count (index 2) + + # Convert tuples to dicts only for JSON serialization + def tuple_to_dict(hw_tuple): + return { + 'model_id': hw_tuple[0], + 'model_name': hw_tuple[1], + 'node_count': hw_tuple[2], + 'sample_names': hw_tuple[3], + 'icon_url': hw_tuple[4] + } + + return { + 'most_common': [tuple_to_dict(hw) for hw in most_common], + 'least_common': [tuple_to_dict(hw) for hw in least_common], + 'total_models': len(hardware_stats) + } + + except Exception as e: + logging.error(f"Error fetching hardware models: {e}") + return {'error': 'Failed to fetch hardware model data'} + +def generate_node_map_image_staticmaps(node_id, node_position, node_name): + width, height = 800, 400 + extra = 40 # extra height for attribution + context = staticmaps.Context() + context.set_tile_provider(staticmaps.tile_provider_OSM) + + # Convert position coordinates + lat = node_position['latitude_i'] / 10000000.0 + lon = node_position['longitude_i'] / 10000000.0 + node_point = staticmaps.create_latlng(lat, lon) + + # Add node marker + context.add_object(staticmaps.Marker(node_point, color=staticmaps.RED, size=16)) + + # Render the image + image = context.render_pillow(width, height + extra) + # Crop off the bottom 'extra' pixels to remove attribution + image = image.crop((0, 0, width, height)) + + return image + +@app.route('/og_image/node_map/.png') +def og_image_node_map(node_id): + """Generate OG image for a node showing its position on a map.""" + try: + # Ensure the OG images directory exists + os.makedirs("/tmp/og_images", exist_ok=True) + + # Get node data using the existing cached function + nodes = get_cached_nodes() + if not nodes: + return "Database unavailable", 503 + + # Convert node ID to hex format + node_id_hex = utils.convert_node_id_from_int_to_hex(node_id) + + # Get node data + node_data = nodes.get(node_id_hex) + if not node_data: + return "Node not found", 404 + + # Check if node has position data + position = node_data.get('position') + if not position or not position.get('latitude_i') or not position.get('longitude_i'): + return "Node has no position data", 404 + + # Check cache expiration + path = os.path.join("/tmp/og_images", f"node_map_{node_id}.png") + cache_expired = False + + if os.path.exists(path): + # Check file age + file_age = time.time() - os.path.getmtime(path) + max_cache_age = 3600 # 1 hour in seconds + + # Also check if node position has been updated since image was created + ts_seen = node_data.get('ts_seen') + if ts_seen: + if hasattr(ts_seen, 'timestamp'): + node_last_seen = ts_seen.timestamp() + else: + node_last_seen = ts_seen + + if file_age > max_cache_age or (node_last_seen and os.path.getmtime(path) < node_last_seen): + cache_expired = True + elif file_age > max_cache_age: + cache_expired = True + + # Generate the image if it doesn't exist or is expired + if not os.path.exists(path) or cache_expired: + node_position = { + 'latitude_i': position['latitude_i'], + 'longitude_i': position['longitude_i'] + } + node_name = node_data.get('short_name') or node_data.get('long_name') or f"Node {node_id}" + + image = generate_node_map_image_staticmaps(node_id, node_position, node_name) + image.save(path) + + # Serve the image + return send_file(path, mimetype='image/png') + + except Exception as e: + logging.error(f"Error generating node map OG image: {e}") + return "Error generating image", 500 + +# Helper functions to avoid circular references +def get_meshtastic_support(): + """Lazy import of meshtastic_support to avoid circular references.""" + import meshtastic_support + return meshtastic_support + +def get_hardware_model_enum(): + """Get HardwareModel enum without direct module reference.""" + import meshtastic_support + return meshtastic_support.HardwareModel + +def get_channel_enum(): + """Get Channel enum without direct module reference.""" + import meshtastic_support + return meshtastic_support.Channel + +def get_routing_error_description(error_reason): + """Get routing error description without direct module reference.""" + import meshtastic_support + return meshtastic_support.get_routing_error_description(error_reason) + +def get_hardware_model_name(hw_model): + """Get hardware model name without direct module reference.""" + import meshtastic_support + return meshtastic_support.get_hardware_model_name(hw_model) + +def get_hardware_photos(): + """Get HARDWARE_PHOTOS dict without direct module reference.""" + import meshtastic_support + return meshtastic_support.HARDWARE_PHOTOS def run(): # Enable Waitress logging @@ -904,17 +2096,26 @@ def run(): waitress_logger = logging.getLogger("waitress") waitress_logger.setLevel(logging.DEBUG) # Enable all logs from Waitress - # serve(app, host="0.0.0.0", port=port) + + # Configure Waitress to trust proxy headers for real IP addresses + # This is needed when running behind Docker, nginx, or other reverse proxies serve( TransLogger( app, setup_console_handler=False, logger=waitress_logger ), - port=port + port=port, + trusted_proxy='127.0.0.1,::1,172.16.0.0/12,192.168.0.0/16,10.0.0.0/8', # Trust Docker and local networks + trusted_proxy_count=1, # Trust one level of proxy (Docker) + trusted_proxy_headers={ + 'x-forwarded-for': 'X-Forwarded-For', + 'x-forwarded-proto': 'X-Forwarded-Proto', + 'x-forwarded-host': 'X-Forwarded-Host', + 'x-forwarded-port': 'X-Forwarded-Port' + } ) - if __name__ == '__main__': config = configparser.ConfigParser() config.read('config.ini') diff --git a/meshinfo_web_backup.py b/meshinfo_web_backup.py new file mode 100644 index 00000000..5880f23b --- /dev/null +++ b/meshinfo_web_backup.py @@ -0,0 +1,3136 @@ +from flask import ( + Flask, + send_from_directory, + render_template, + request, + make_response, + redirect, + url_for, + abort, + g, + jsonify, + current_app +) +from flask_caching import Cache +from waitress import serve +from paste.translogger import TransLogger +import configparser +import logging +import os +import psutil +import gc +import weakref +import threading +import time +import re +import sys +import math +from shapely.geometry import MultiPoint + +import utils +import meshtastic_support +from meshdata import MeshData +from database_cache import DatabaseCache +from meshinfo_register import Register +from meshtastic_monday import MeshtasticMonday +from meshinfo_telemetry_graph import draw_graph +from meshinfo_los_profile import LOSProfile +from timezone_utils import convert_to_local, format_timestamp, time_ago +import json +import datetime +from meshinfo_api import api + +app = Flask(__name__) + +cache_dir = os.path.join(os.path.dirname(__file__), 'runtime_cache') + +# Ensure the cache directory exists +if not os.path.exists(cache_dir): + try: + os.makedirs(cache_dir) + logging.info(f"Created cache directory: {cache_dir}") + except OSError as e: + logging.error(f"Could not create cache directory {cache_dir}: {e}") + cache_dir = None # Indicate failure + +# Initialize cache after config is loaded +cache = None + +def initialize_cache(): + """Initialize the Flask cache with configuration.""" + global cache + + # Load config first + config = configparser.ConfigParser() + config.read("config.ini") + + # Configure Flask-Caching + cache_config = { + 'CACHE_TYPE': 'FileSystemCache', + 'CACHE_DIR': cache_dir, + 'CACHE_THRESHOLD': int(config.get('server', 'app_cache_max_entries', fallback=100)), + 'CACHE_DEFAULT_TIMEOUT': int(config.get('server', 'app_cache_timeout_seconds', fallback=60)), + 'CACHE_OPTIONS': { + 'mode': 0o600, + 'max_size': 50 * 1024 * 1024 # 50MB max size per item + } + } + + if cache_dir: + logging.info(f"Using FileSystemCache with directory: {cache_dir}") + else: + logging.warning("Falling back to SimpleCache due to directory creation issues.") + + # Initialize Cache with the chosen config + try: + cache = Cache(app, config=cache_config) + except Exception as e: + logging.error(f"Failed to initialize cache: {e}") + # Fallback to SimpleCache if FileSystemCache fails + cache_config = { + 'CACHE_TYPE': 'SimpleCache', + 'CACHE_DEFAULT_TIMEOUT': int(config.get('server', 'app_cache_timeout_seconds', fallback=300)), + 'CACHE_THRESHOLD': int(config.get('server', 'app_cache_max_entries', fallback=100)) + } + cache = Cache(app, config=cache_config) + +# Cache monitoring functions +def get_cache_size(): + """Get total size of cache directory in bytes.""" + if cache_dir: + try: + total_size = 0 + for dirpath, dirnames, filenames in os.walk(cache_dir): + for f in filenames: + fp = os.path.join(dirpath, f) + total_size += os.path.getsize(fp) + return total_size + except Exception as e: + logging.error(f"Error getting cache size: {e}") + return 0 + +def get_cache_entry_count(): + """Get number of entries in cache directory.""" + if cache_dir: + try: + return len([f for f in os.listdir(cache_dir) if not f.endswith('.lock')]) + except Exception as e: + logging.error(f"Error getting cache entry count: {e}") + return 0 + +def get_largest_cache_entries(limit=5): + """Get the largest cache entries with their sizes.""" + if cache_dir: + try: + entries = [] + for f in os.listdir(cache_dir): + if not f.endswith('.lock'): + path = os.path.join(cache_dir, f) + size = os.path.getsize(path) + entries.append((f, size)) + return sorted(entries, key=lambda x: x[1], reverse=True)[:limit] + except Exception as e: + logging.error(f"Error getting largest cache entries: {e}") + return [] + +def log_cache_stats(): + """Log detailed cache statistics.""" + try: + total_size = get_cache_size() + entry_count = get_cache_entry_count() + largest_entries = get_largest_cache_entries() + + logging.info(f"Cache Statistics:") + logging.info(f" Total Size: {total_size / 1024 / 1024:.2f} MB") + logging.info(f" Entry Count: {entry_count}") + logging.info(" Largest Entries:") + for entry, size in largest_entries: + logging.info(f" {entry}: {size / 1024 / 1024:.2f} MB") + except Exception as e: + logging.error(f"Error logging cache stats: {e}") + +# Modify cleanup_cache to include cache stats +def cleanup_cache(): + try: + logging.info("Starting cache cleanup") + logging.info("Memory usage before cache cleanup:") + log_memory_usage(force=True) + logging.info("Cache stats before cleanup:") + log_cache_stats() + + # Clear nodes-related cache entries + clear_nodes_cache() + + # Clear database query cache + clear_database_cache() + + # Clear the cache + with app.app_context(): + cache.clear() + + # Force garbage collection + gc.collect() + + logging.info("Memory usage after cache cleanup:") + log_memory_usage(force=True) + logging.info("Cache stats after cleanup:") + log_cache_stats() + + except Exception as e: + logging.error(f"Error during cache cleanup: {e}") + +# Make globals available to templates +app.jinja_env.globals.update(convert_to_local=convert_to_local) +app.jinja_env.globals.update(format_timestamp=format_timestamp) +app.jinja_env.globals.update(time_ago=time_ago) +app.jinja_env.globals.update(min=min) +app.jinja_env.globals.update(max=max) +app.jinja_env.globals.update(datetime=datetime.datetime) + +# Add template filters +@app.template_filter('safe_hw_model') +def safe_hw_model(value): + try: + return meshtastic_support.get_hardware_model_name(value) + except (ValueError, AttributeError): + return f"Unknown ({value})" + +config = configparser.ConfigParser() +config.read("config.ini") + +# Initialize cache with config +initialize_cache() + +# Register API blueprint +app.register_blueprint(api) + +# Add request context tracking +active_requests = set() +request_lock = threading.Lock() + +# Add memory usage tracking +last_memory_log = 0 +last_memory_usage = 0 +MEMORY_LOG_INTERVAL = 60 # Log every minute instead of 5 minutes +MEMORY_CHANGE_THRESHOLD = 10 * 1024 * 1024 # 10MB change threshold (reduced from 50MB) + +@app.before_request +def before_request(): + """Track request start.""" + with request_lock: + active_requests.add(id(request)) + # Enhanced memory logging for high-activity periods + if len(active_requests) > 5: # If more than 5 concurrent requests + log_memory_usage(force=True) + +@app.after_request +def after_request(response): + """Clean up request context.""" + with request_lock: + active_requests.discard(id(request)) + # Enhanced memory logging for high-activity periods + if len(active_requests) > 5: # If more than 5 concurrent requests + log_memory_usage(force=True) + return response + +# Modify get_meshdata to use connection pooling +def get_meshdata(): + """Opens a new MeshData connection if there is none yet for the + current application context. + """ + if 'meshdata' not in g: + try: + # Create new MeshData instance without connection pooling + g.meshdata = MeshData() + logging.debug("MeshData instance created for request context.") + except Exception as e: + logging.error(f"Failed to create MeshData for request context: {e}") + g.meshdata = None + return g.meshdata + +@app.teardown_appcontext +def teardown_meshdata(exception): + """Closes the MeshData connection at the end of the request.""" + md = g.pop('meshdata', None) + if md is not None: + try: + if hasattr(md, 'db') and md.db: + if md.db.is_connected(): + md.db.close() + logging.debug("Database connection closed in teardown.") + else: + logging.debug("Database connection was already closed.") + else: + logging.debug("No database connection to close.") + except Exception as e: + logging.error(f"Error handling database connection in teardown: {e}") + finally: + # Ensure the MeshData instance is properly cleaned up + try: + del md + except: + pass + logging.debug("MeshData instance removed from request context.") + +def log_memory_usage(force=False): + """Log current memory usage with detailed information.""" + global last_memory_log, last_memory_usage + current_time = time.time() + current_usage = psutil.Process().memory_info().rss + + # Only log if: + # 1. It's been more than MEMORY_LOG_INTERVAL seconds since last log + # 2. Memory usage has changed by more than threshold + # 3. Force flag is set + if not force and (current_time - last_memory_log < MEMORY_LOG_INTERVAL and + abs(current_usage - last_memory_usage) < MEMORY_CHANGE_THRESHOLD): + return + + try: + import gc + gc.collect() # Force garbage collection before measuring + + process = psutil.Process() + mem_info = process.memory_info() + + # Get memory usage by object type with more detail + objects_by_type = {} + large_objects = [] # Track objects larger than 1MB + object_counts = {} + + for obj in gc.get_objects(): + obj_type = type(obj).__name__ + try: + obj_size = sys.getsizeof(obj) + + # Track object counts + if obj_type not in object_counts: + object_counts[obj_type] = 0 + object_counts[obj_type] += 1 + + # Track memory by type + if obj_type not in objects_by_type: + objects_by_type[obj_type] = 0 + objects_by_type[obj_type] += obj_size + + # Track large objects (> 1MB) + if obj_size > 1024 * 1024: # 1MB + large_objects.append({ + 'type': obj_type, + 'size': obj_size, + 'repr': str(obj)[:100] + '...' if len(str(obj)) > 100 else str(obj) + }) + + except (TypeError, ValueError, RecursionError): + pass + + # Sort object types by memory usage + sorted_objects = sorted(objects_by_type.items(), key=lambda x: x[1], reverse=True) + + # Sort large objects by size + large_objects.sort(key=lambda x: x['size'], reverse=True) + + # Sort object counts + sorted_counts = sorted(object_counts.items(), key=lambda x: x[1], reverse=True) + + logging.info(f"=== MEMORY USAGE REPORT ===") + logging.info(f"Memory Usage: {mem_info.rss / 1024 / 1024:.1f} MB") + logging.info(f"Active Requests: {len(active_requests)}") + logging.info(f"Memory Change: {(current_usage - last_memory_usage) / 1024 / 1024:+.1f} MB") + + logging.info("Top 10 memory-consuming object types:") + for obj_type, size in sorted_objects[:10]: + count = object_counts.get(obj_type, 0) + logging.info(f" {obj_type}: {size / 1024 / 1024:.1f} MB ({count:,} objects)") + + logging.info("Top 10 object counts:") + for obj_type, count in sorted_counts[:10]: + size = objects_by_type.get(obj_type, 0) + logging.info(f" {obj_type}: {count:,} objects ({size / 1024 / 1024:.1f} MB)") + + if large_objects: + logging.info("Large objects (>1MB):") + for obj in large_objects[:10]: # Show top 10 largest objects + logging.info(f" {obj['type']}: {obj['size'] / 1024 / 1024:.1f} MB - {obj['repr']}") + + # Check for potential memory leaks + if current_usage > last_memory_usage + 50 * 1024 * 1024: # 50MB increase + logging.warning(f"POTENTIAL MEMORY LEAK: Memory increased by {(current_usage - last_memory_usage) / 1024 / 1024:.1f} MB") + + # Check for specific problematic object types + problematic_types = ['dict', 'list', 'SimpleNamespace', 'function', 'type'] + for obj_type in problematic_types: + if obj_type in objects_by_type: + size = objects_by_type[obj_type] + count = object_counts.get(obj_type, 0) + if size > 100 * 1024 * 1024: # 100MB + logging.warning(f"LARGE {obj_type.upper()} OBJECTS: {size / 1024 / 1024:.1f} MB ({count:,} objects)") + + logging.info("=== END MEMORY REPORT ===") + + last_memory_log = current_time + last_memory_usage = current_usage + + except Exception as e: + logging.error(f"Error in detailed memory logging: {e}") + +# Add connection monitoring +def monitor_connections(): + """Monitor database connections.""" + while True: + try: + with app.app_context(): + if hasattr(g, 'meshdata') and g.meshdata and hasattr(g.meshdata, 'db'): + if g.meshdata.db.is_connected(): + logging.info("Database connection is active") + else: + logging.warning("Database connection is not active") + except Exception as e: + logging.error(f"Error monitoring database connection: {e}") + time.sleep(60) # Check every minute + +# Add cache lock monitoring +def monitor_cache_locks(): + """Monitor cache lock files.""" + while True: + try: + if cache_dir: + lock_files = [f for f in os.listdir(cache_dir) if f.endswith('.lock')] + if lock_files: + logging.warning(f"Found {len(lock_files)} stale cache locks") + # Clean up stale locks + for lock_file in lock_files: + try: + os.remove(os.path.join(cache_dir, lock_file)) + except Exception as e: + logging.error(f"Error removing stale lock {lock_file}: {e}") + except Exception as e: + logging.error(f"Error monitoring cache locks: {e}") + time.sleep(300) # Check every 5 minutes + +def log_detailed_memory_analysis(): + """Perform detailed memory analysis to identify potential leaks.""" + try: + import gc + gc.collect() + + logging.info("=== DETAILED MEMORY ANALYSIS ===") + + # Check database connections + db_connections = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'mysql' in str(obj.__class__).lower(): + db_connections += 1 + logging.info(f"Database connection objects: {db_connections}") + + # Check cache objects + cache_objects = 0 + cache_size = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'cache' in str(obj.__class__).lower(): + cache_objects += 1 + try: + cache_size += sys.getsizeof(obj) + except: + pass + logging.info(f"Cache objects: {cache_objects} ({cache_size / 1024 / 1024:.1f} MB)") + + # Check for Flask/WSGI objects + flask_objects = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'flask' in str(obj.__class__).lower(): + flask_objects += 1 + logging.info(f"Flask objects: {flask_objects}") + + # Check for template objects + template_objects = 0 + for obj in gc.get_objects(): + if hasattr(obj, '__class__') and 'template' in str(obj.__class__).lower(): + template_objects += 1 + logging.info(f"Template objects: {template_objects}") + + # Check for large dictionaries and lists + large_dicts = [] + large_lists = [] + for obj in gc.get_objects(): + try: + if isinstance(obj, dict) and len(obj) > 1000: + large_dicts.append((len(obj), str(obj)[:50])) + elif isinstance(obj, list) and len(obj) > 1000: + large_lists.append((len(obj), str(obj)[:50])) + except: + pass + + if large_dicts: + logging.info("Large dictionaries:") + for size, repr_str in sorted(large_dicts, reverse=True)[:5]: + logging.info(f" Dict with {size:,} items: {repr_str}") + + if large_lists: + logging.info("Large lists:") + for size, repr_str in sorted(large_lists, reverse=True)[:5]: + logging.info(f" List with {size:,} items: {repr_str}") + + # Check for circular references + circular_refs = gc.collect() + if circular_refs > 0: + logging.warning(f"Found {circular_refs} circular references") + + logging.info("=== END DETAILED ANALYSIS ===") + + except Exception as e: + logging.error(f"Error in detailed memory analysis: {e}") + +# Modify the memory watchdog to include detailed analysis +def memory_watchdog(): + """Monitor memory usage and take action if it gets too high.""" + while True: + try: + process = psutil.Process(os.getpid()) + memory_info = process.memory_info() + memory_mb = memory_info.rss / 1024 / 1024 + + if memory_mb > 1000: # If over 1GB (reduced from 2GB) + logging.warning(f"Memory usage high ({memory_mb:.2f} MB), performing detailed analysis") + log_detailed_memory_analysis() + logging.info("Cache stats before high memory cleanup:") + log_cache_stats() + with app.app_context(): + cache.clear() + # Clear nodes-related cache entries + clear_nodes_cache() + # Clear database query cache + clear_database_cache() + gc.collect() + logging.info("Cache stats after high memory cleanup:") + log_cache_stats() + + if memory_mb > 2000: # If over 2GB (reduced from 4GB) + logging.error(f"Memory usage critical ({memory_mb:.2f} MB), logging detailed memory info") + log_memory_usage(force=True) + log_detailed_memory_analysis() + logging.info("Cache stats at critical memory level:") + log_cache_stats() + # Force clear nodes cache at critical levels + clear_nodes_cache() + clear_database_cache() + gc.collect() + + except Exception as e: + logging.error(f"Error in memory watchdog: {e}") + + time.sleep(30) # Check every 30 seconds instead of 60 + +# Start monitoring threads +connection_monitor_thread = threading.Thread(target=monitor_connections, daemon=True) +connection_monitor_thread.start() + +lock_monitor_thread = threading.Thread(target=monitor_cache_locks, daemon=True) +lock_monitor_thread.start() + +watchdog_thread = threading.Thread(target=memory_watchdog, daemon=True) +watchdog_thread.start() + +# Schedule cache cleanup +def schedule_cache_cleanup(): + while True: + time.sleep(900) # Run every 15 minutes instead of hourly + cleanup_cache() + +cleanup_thread = threading.Thread(target=schedule_cache_cleanup, daemon=True) +cleanup_thread.start() + +def auth(): + jwt = request.cookies.get('jwt') + if not jwt: + return None + reg = Register() + decoded_jwt = reg.auth(jwt) + return decoded_jwt + + +@app.errorhandler(404) +def not_found(e): + return render_template( + "404.html.j2", + auth=auth, + config=config + ), 404 + + +# Data caching functions +def cache_key_prefix(): + """Generate a cache key prefix based on current time bucket.""" + # Round to nearest minute for 60-second cache + return datetime.datetime.now().replace(second=0, microsecond=0).timestamp() + +def get_cache_timeout(): + """Get cache timeout from config.""" + return int(config.get('server', 'app_cache_timeout_seconds', fallback=60)) + +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_nodes(): + """Get nodes data with database-level caching.""" + md = get_meshdata() + if not md: + return None + + # Use the cached method to prevent duplicate dictionaries + nodes_data = md.get_nodes_cached() + logging.debug(f"Fetched {len(nodes_data)} nodes from application cache") + return nodes_data + +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_active_nodes(): + """Cache the active nodes calculation.""" + nodes = get_cached_nodes() + if not nodes: + return {} + return utils.active_nodes(nodes) + +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_latest_node(): + """Cache the latest node data.""" + md = get_meshdata() + if not md: + return None + return md.get_latest_node() + +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_message_map_data(message_id): + """Cache the message map data for a specific message.""" + md = get_meshdata() + if not md: + return None + + # Get message and basic reception data + cursor = md.db.cursor(dictionary=True) + cursor.execute(""" + SELECT t.*, GROUP_CONCAT(r.received_by_id) as receiver_ids + FROM text t + LEFT JOIN message_reception r ON t.message_id = r.message_id + WHERE t.message_id = %s + GROUP BY t.message_id + """, (message_id,)) + + message_base = cursor.fetchone() + cursor.close() + + if not message_base: + return None + + # Get the precise message time + message_time = message_base['ts_created'].timestamp() + + # Batch load all positions at once + receiver_ids_list = [int(r_id) for r_id in message_base['receiver_ids'].split(',')] if message_base['receiver_ids'] else [] + node_ids = [message_base['from_id']] + receiver_ids_list + positions = md.get_positions_at_time(node_ids, message_time) + + # Fallback: If sender position is missing, fetch it directly + if message_base['from_id'] not in positions: + sender_fallback = md.get_position_at_time(message_base['from_id'], message_time) + if sender_fallback: + positions[message_base['from_id']] = sender_fallback + + # Batch load all reception details + reception_details = md.get_reception_details_batch(message_id, receiver_ids_list) + + # Ensure keys are int for lookups + receiver_positions = {int(k): v for k, v in positions.items() if k in receiver_ids_list} + receiver_details = {int(k): v for k, v in reception_details.items() if k in receiver_ids_list} + sender_position = positions.get(message_base['from_id']) + + # Calculate convex hull area in square km + points = [] + if sender_position and sender_position['latitude'] is not None and sender_position['longitude'] is not None: + points.append((sender_position['longitude'], sender_position['latitude'])) + for pos in receiver_positions.values(): + if pos and pos['latitude'] is not None and pos['longitude'] is not None: + points.append((pos['longitude'], pos['latitude'])) + convex_hull_area_km2 = None + if len(points) >= 3: + # Use shapely to calculate convex hull area + hull = MultiPoint(points).convex_hull + # Approximate area on Earth's surface (convert degrees to meters using haversine formula) + # We'll use a simple equirectangular projection for small areas + # Reference point for projection + avg_lat = sum(lat for lon, lat in points) / len(points) + earth_radius = 6371.0088 # km + def latlon_to_xy(lon, lat): + x = math.radians(lon) * earth_radius * math.cos(math.radians(avg_lat)) + y = math.radians(lat) * earth_radius + return (x, y) + # Handle both LineString and Polygon cases + if hasattr(hull, 'exterior'): + coords = hull.exterior.coords + else: + coords = hull.coords + xy_points = [latlon_to_xy(lon, lat) for lon, lat in coords] + hull_xy = MultiPoint(xy_points).convex_hull + convex_hull_area_km2 = hull_xy.area + + # Prepare message object for template + message = { + 'id': message_id, + 'from_id': message_base['from_id'], + 'to_id': message_base.get('to_id'), # Ensure to_id is included + 'channel': message_base.get('channel'), # Ensure channel is included + 'text': message_base['text'], + 'ts_created': message_time, + 'receiver_ids': receiver_ids_list + } + + return { + 'message': message, + 'sender_position': sender_position, + 'receiver_positions': receiver_positions, + 'receiver_details': receiver_details, + 'convex_hull_area_km2': convex_hull_area_km2 + } + +@app.route('/message_map.html') +def message_map(): + message_id = request.args.get('id') + if not message_id: + return redirect(url_for('chat')) + + # Get cached data + data = get_cached_message_map_data(message_id) + if not data: + return redirect(url_for('chat')) + + # Get nodes once and create a simplified version with only needed nodes + all_nodes = get_cached_nodes() + + # Create simplified nodes dict with only nodes used in this message + used_node_ids = set() + used_node_ids.add(utils.convert_node_id_from_int_to_hex(data['message']['from_id'])) + if data['message'].get('to_id') and data['message']['to_id'] != 4294967295: + used_node_ids.add(utils.convert_node_id_from_int_to_hex(data['message']['to_id'])) + for receiver_id in data['message']['receiver_ids']: + used_node_ids.add(utils.convert_node_id_from_int_to_hex(receiver_id)) + + simplified_nodes = {} + for node_id in used_node_ids: + if node_id in all_nodes: + node = all_nodes[node_id] + simplified_nodes[node_id] = { + 'long_name': node.get('long_name', ''), + 'short_name': node.get('short_name', ''), + 'position': node.get('position') + } + + # --- Provide zero_hop_links and position data for relay node inference --- + md = get_meshdata() + sender_id = data['message']['from_id'] + receiver_ids = data['message']['receiver_ids'] + # Get zero-hop links for the last 1 day (or configurable) + zero_hop_timeout = 86400 + cutoff_time = int(time.time()) - zero_hop_timeout + zero_hop_links, _ = md.get_zero_hop_links(cutoff_time) + # Get sender and receiver positions at message time + message_time = data['message']['ts_created'] + sender_pos = data['sender_position'] + receiver_positions = data['receiver_positions'] + # Pass the relay matcher and context to the template + return render_template( + "message_map.html.j2", + auth=auth(), + config=config, + nodes=simplified_nodes, + message=data['message'], + sender_position=sender_pos, + receiver_positions=receiver_positions, + receiver_details=data['receiver_details'], + convex_hull_area_km2=data['convex_hull_area_km2'], + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + find_relay_node_by_suffix=lambda relay_suffix, nodes, receiver_ids=None, sender_id=None: find_relay_node_by_suffix( + relay_suffix, nodes, receiver_ids, sender_id, zero_hop_links=zero_hop_links, sender_pos=sender_pos, receiver_pos=None + ) + ) + +@app.route('/traceroute_map.html') +def traceroute_map(): + traceroute_id = request.args.get('id') + if not traceroute_id: + abort(404) + + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + + # Get traceroute attempt by unique id first + cursor = md.db.cursor(dictionary=True) + cursor.execute(""" + SELECT * FROM traceroute WHERE traceroute_id = %s + """, (traceroute_id,)) + traceroute_data = cursor.fetchone() + if not traceroute_data: + cursor.close() + abort(404) + + # Format the forward route data + route = [] + if traceroute_data['route']: + route = [int(hop) for hop in traceroute_data['route'].split(';') if hop] + + # Format the return route data + route_back = [] + if traceroute_data['route_back']: + route_back = [int(hop) for hop in traceroute_data['route_back'].split(';') if hop] + + # Format the forward SNR values and scale by dividing by 4 + snr_towards = [] + if traceroute_data['snr_towards']: + snr_towards = [float(s)/4.0 for s in traceroute_data['snr_towards'].split(';') if s] + + # Format the return SNR values and scale by dividing by 4 + snr_back = [] + if traceroute_data['snr_back']: + snr_back = [float(s)/4.0 for s in traceroute_data['snr_back'].split(';') if s] + + # Create a clean traceroute object for the template + traceroute = { + 'id': traceroute_data['traceroute_id'], + 'from_id': traceroute_data['from_id'], + 'from_id_hex': utils.convert_node_id_from_int_to_hex(traceroute_data['from_id']), + 'to_id': traceroute_data['to_id'], + 'to_id_hex': utils.convert_node_id_from_int_to_hex(traceroute_data['to_id']), + 'ts_created': traceroute_data['ts_created'], + 'route': route, + 'route_back': route_back, + 'snr_towards': snr_towards, + 'snr_back': snr_back, + 'success': traceroute_data['success'] + } + + cursor.close() + + # Get nodes and create simplified version with only needed nodes + all_nodes = get_cached_nodes() + used_node_ids = set([traceroute['from_id'], traceroute['to_id']] + traceroute['route'] + traceroute['route_back']) + + simplified_nodes = {} + for node_id in used_node_ids: + node_hex = utils.convert_node_id_from_int_to_hex(node_id) + if node_hex in all_nodes: + node = all_nodes[node_hex] + simplified_nodes[node_hex] = { + 'long_name': node.get('long_name', ''), + 'short_name': node.get('short_name', ''), + 'position': node.get('position'), + 'ts_seen': node.get('ts_seen'), + 'role': node.get('role'), + 'owner_username': node.get('owner_username'), + 'hw_model': node.get('hw_model'), + 'firmware_version': node.get('firmware_version') + } + + # --- Build traceroute_positions dict for historical accuracy --- + node_ids = set([traceroute['from_id'], traceroute['to_id']] + traceroute['route'] + traceroute['route_back']) + traceroute_positions = {} + ts_created = traceroute['ts_created'] + # If ts_created is a datetime, convert to timestamp + if hasattr(ts_created, 'timestamp'): + ts_created = ts_created.timestamp() + for node_id in node_ids: + pos = md.get_position_at_time(node_id, ts_created) + node_hex = utils.convert_node_id_from_int_to_hex(node_id) + if not pos and node_hex in simplified_nodes and simplified_nodes[node_hex].get('position'): + pos_obj = simplified_nodes[node_hex]['position'] + # Convert to dict if needed + if hasattr(pos_obj, '__dict__'): + pos = dict(pos_obj.__dict__) + else: + pos = dict(pos_obj) + # Ensure position_time is present and properly formatted + if 'position_time' not in pos or not pos['position_time']: + if hasattr(pos_obj, 'position_time') and pos_obj.position_time: + pt = pos_obj.position_time + if isinstance(pt, datetime.datetime): + pos['position_time'] = pt.timestamp() + else: + pos['position_time'] = pt + else: + pos['position_time'] = None + if pos: + traceroute_positions[node_id] = pos + + + return render_template( + "traceroute_map.html.j2", + auth=auth(), + config=config, + nodes=simplified_nodes, + traceroute=traceroute, + traceroute_positions=traceroute_positions, # <-- pass to template + utils=utils, + meshtastic_support=meshtastic_support, + datetime=datetime.datetime, + timestamp=datetime.datetime.now() + ) + +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_graph_data(view_type='merged', days=1, zero_hop_timeout=43200): + """Cache the graph data.""" + md = get_meshdata() + if not md: + return None + return md.get_graph_data(view_type, days, zero_hop_timeout) + +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_neighbors_data(view_type='neighbor_info', days=1, zero_hop_timeout=43200): + """Cache the neighbors data.""" + md = get_meshdata() + if not md: + return None + return md.get_neighbors_data(view_type, days, zero_hop_timeout) + +@app.route('/graph.html') +def graph(): + view_type = request.args.get('view_type', 'merged') + days = int(request.args.get('days', 1)) + zero_hop_timeout = int(request.args.get('zero_hop_timeout', 43200)) + + # Get cached data + data = get_cached_graph_data(view_type, days, zero_hop_timeout) + if not data: + abort(503, description="Database connection unavailable") + + return render_template( + "graph.html.j2", + auth=auth(), + config=config, + graph=data, + view_type=view_type, + days=days, + zero_hop_timeout=zero_hop_timeout, + timestamp=datetime.datetime.now() + ) + +@app.route('/graph2.html') +def graph2(): + view_type = request.args.get('view_type', 'merged') + days = int(request.args.get('days', 1)) + zero_hop_timeout = int(request.args.get('zero_hop_timeout', 43200)) + + # Get cached data + data = get_cached_graph_data(view_type, days, zero_hop_timeout) + if not data: + abort(503, description="Database connection unavailable") + + return render_template( + "graph2.html.j2", + auth=auth(), + config=config, + graph=data, + view_type=view_type, + days=days, + zero_hop_timeout=zero_hop_timeout, + timestamp=datetime.datetime.now() + ) + +@app.route('/graph3.html') +def graph3(): + view_type = request.args.get('view_type', 'merged') + days = int(request.args.get('days', 1)) + zero_hop_timeout = int(request.args.get('zero_hop_timeout', 43200)) + + # Get cached data + data = get_cached_graph_data(view_type, days, zero_hop_timeout) + if not data: + abort(503, description="Database connection unavailable") + + return render_template( + "graph3.html.j2", + auth=auth(), + config=config, + graph=data, + view_type=view_type, + days=days, + zero_hop_timeout=zero_hop_timeout, + timestamp=datetime.datetime.now() + ) + +@app.route('/graph4.html') +def graph4(): + view_type = request.args.get('view_type', 'merged') + days = int(request.args.get('days', 1)) + zero_hop_timeout = int(request.args.get('zero_hop_timeout', 43200)) + + # Get cached data + data = get_cached_graph_data(view_type, days, zero_hop_timeout) + if not data: + abort(503, description="Database connection unavailable") + + return render_template( + "graph4.html.j2", + auth=auth(), + config=config, + graph=data, + view_type=view_type, + days=days, + zero_hop_timeout=zero_hop_timeout, + timestamp=datetime.datetime.now() + ) + +@app.route('/utilization-heatmap.html') +def utilization_heatmap(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + + return render_template( + "utilization-heatmap.html.j2", + auth=auth(), + config=config, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + Channel=meshtastic_support.Channel # Add Channel enum to template context + ) + +@app.route('/utilization-hexmap.html') +def utilization_hexmap(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + + return render_template( + "utilization-hexmap.html.j2", + auth=auth(), + config=config, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + Channel=meshtastic_support.Channel # Add Channel enum to template context + ) + +@app.route('/map.html') +def map(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + nodes = get_cached_nodes() + + # Get timeout from config + zero_hop_timeout = int(config.get("server", "zero_hop_timeout", fallback=43200)) + cutoff_time = int(time.time()) - zero_hop_timeout + + # Get zero-hop data for all nodes + cursor = md.db.cursor(dictionary=True) + zero_hop_data = {} + + # Query for all zero-hop messages + cursor.execute(""" + SELECT + r.from_id, + r.received_by_id, + COUNT(*) AS count, + MAX(r.rx_snr) AS best_snr, + AVG(r.rx_snr) AS avg_snr, + MAX(r.rx_time) AS last_rx_time + FROM + message_reception r + WHERE + ( + (r.hop_limit IS NULL AND r.hop_start IS NULL) + OR + (r.hop_start - r.hop_limit = 0) + ) + AND r.rx_time > %s + GROUP BY + r.from_id, r.received_by_id + ORDER BY + last_rx_time DESC + """, (cutoff_time,)) + + for row in cursor.fetchall(): + from_id = utils.convert_node_id_from_int_to_hex(row['from_id']) + received_by_id = utils.convert_node_id_from_int_to_hex(row['received_by_id']) + + if from_id not in zero_hop_data: + zero_hop_data[from_id] = {'heard': [], 'heard_by': []} + if received_by_id not in zero_hop_data: + zero_hop_data[received_by_id] = {'heard': [], 'heard_by': []} + + # Add to heard_by list of sender + zero_hop_data[from_id]['heard_by'].append({ + 'node_id': received_by_id, + 'count': row['count'], + 'best_snr': row['best_snr'], + 'avg_snr': row['avg_snr'], + 'last_rx_time': row['last_rx_time'] + }) + + # Add to heard list of receiver + zero_hop_data[received_by_id]['heard'].append({ + 'node_id': from_id, + 'count': row['count'], + 'best_snr': row['best_snr'], + 'avg_snr': row['avg_snr'], + 'last_rx_time': row['last_rx_time'] + }) + + cursor.close() + + return render_template( + "map.html.j2", + auth=auth(), + config=config, + nodes=nodes, + zero_hop_data=zero_hop_data, + zero_hop_timeout=zero_hop_timeout, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + Channel=meshtastic_support.Channel # Add Channel enum to template context + ) + +@app.route('/neighbors.html') +def neighbors(): + view_type = request.args.get('view', 'neighbor_info') # Default to neighbor_info + md = get_meshdata() + if not md: + abort(503, description="Database connection unavailable") + + # Get base node data using singleton + nodes = get_cached_nodes() + if not nodes: + # Handle case with no nodes gracefully + return render_template( + "neighbors.html.j2", + auth=auth, config=config, nodes={}, + active_nodes_with_connections={}, view_type=view_type, + utils=utils, datetime=datetime.datetime, timestamp=datetime.datetime.now() + ) + + # Get neighbors data using the new method + active_nodes_data = md.get_neighbors_data(view_type=view_type) + + # Sort final results by last heard time + active_nodes_data = dict(sorted( + active_nodes_data.items(), + key=lambda item: item[1].get('last_heard', datetime.datetime.min), + reverse=True + )) + + return render_template( + "neighbors.html.j2", + auth=auth(), + config=config, + nodes=nodes, # Pass full nodes list for lookups in template + active_nodes_with_connections=active_nodes_data, # Pass the processed data + view_type=view_type, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + ) + +@app.route('/telemetry.html') +def telemetry(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + nodes = get_cached_nodes() + telemetry = md.get_telemetry_all() + return render_template( + "telemetry.html.j2", + auth=auth(), + config=config, + nodes=nodes, + telemetry=telemetry, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now() + ) + + +@app.route('/traceroutes.html') +def traceroutes(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + page = request.args.get('page', 1, type=int) + per_page = 100 + + nodes = get_cached_nodes() + traceroute_data = md.get_traceroutes(page=page, per_page=per_page) + + # Calculate pagination info + total = traceroute_data['total'] + start_item = (page - 1) * per_page + 1 if total > 0 else 0 + end_item = min(page * per_page, total) + + # Create pagination info + pagination = { + 'page': page, + 'per_page': per_page, + 'total': total, + 'items': traceroute_data['items'], + 'pages': (total + per_page - 1) // per_page, + 'has_prev': page > 1, + 'has_next': page * per_page < total, + 'prev_num': page - 1, + 'next_num': page + 1, + 'start_item': start_item, + 'end_item': end_item + } + + return render_template( + "traceroutes.html.j2", + auth=auth(), + config=config, + nodes=nodes, + traceroutes=traceroute_data['items'], + pagination=pagination, + utils=utils, + meshtastic_support=meshtastic_support, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + meshdata=md # Add meshdata to template context + ) + + +@app.route('/logs.html') +def logs(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + + # Get node filter from query parameter + node_filter = request.args.get('node') + + logs = md.get_logs() + return render_template( + "logs.html.j2", + auth=auth(), + config=config, + logs=logs, + node_filter=node_filter, # Pass the node filter to template + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + json=json + ) + + +@app.route('/monday.html') +def monday(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + nodes = get_cached_nodes() + chat = md.get_chat() + monday = MeshtasticMonday(chat["items"]).get_data() + return render_template( + "monday.html.j2", + auth=auth(), + config=config, + nodes=nodes, + monday=monday, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + ) + + +@app.route('/mynodes.html') +def mynodes(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + nodes = get_cached_nodes() + owner = auth() + if not owner: + return redirect(url_for('login')) + mynodes = utils.get_owner_nodes(nodes, owner["email"]) + return render_template( + "mynodes.html.j2", + auth=owner, + config=config, + nodes=mynodes, + show_inactive=True, + hardware=meshtastic_support.HardwareModel, + meshtastic_support=meshtastic_support, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + ) + + +@app.route('/linknode.html') +def link_node(): + owner = auth() + if not owner: + return redirect(url_for('login')) + reg = Register() + otp = reg.get_otp( + owner["email"] + ) + return render_template( + "link_node.html.j2", + auth=owner, + otp=otp, + config=config + ) + + +@app.route('/register.html', methods=['GET', 'POST']) +def register(): + error_message = None + if request.method == 'POST': + email = request.form.get('email') + username = request.form.get('username') + password = request.form.get('password') + reg = Register() + res = reg.register(username, email, password) + if "error" in res: + error_message = res["error"] + elif "success" in res: + return serve_index(success_message=res["success"]) + + return render_template( + "register.html.j2", + auth=auth(), + config=config, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + error_message=error_message + ) + + +@app.route('/login.html', methods=['GET', 'POST']) +def login(success_message=None, error_message=None): + if request.method == 'POST': + email = request.form.get('email') + password = request.form.get('password') + reg = Register() + res = reg.authenticate(email, password) + if "error" in res: + error_message = res["error"] + elif "success" in res: + jwt = res["success"] + resp = make_response(redirect(url_for('mynodes'))) + resp.set_cookie("jwt", jwt) + return resp + return render_template( + "login.html.j2", + auth=auth(), + config=config, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + success_message=success_message, + error_message=error_message + ) + + +@app.route('/logout.html') +def logout(): + resp = make_response(redirect(url_for('serve_index'))) + resp.set_cookie('jwt', '', expires=0) + return resp + + +@app.route('/verify') +def verify(): + code = request.args.get('c') + reg = Register() + res = reg.verify_account(code) + if "error" in res: + return serve_index(error_message=res["error"]) + elif "success" in res: + return login(success_message=res["success"]) + return serve_index() + + +@app.route('/') +def serve_static(filename): + nodep = r"node\_(\w{8})\.html" + userp = r"user\_(\w+)\.html" + + if re.match(nodep, filename): + match = re.match(nodep, filename) + node_hex = match.group(1) + + # Get nodes once and reuse them + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + + # Check if node exists first + if node_hex not in nodes: + abort(404) + + # Get all node page data directly, bypassing the leaky application cache + node_page_data = get_node_page_data(node_hex, nodes) + + # If data fetching fails, handle gracefully + if not node_page_data: + abort(503, description="Failed to retrieve node data. Please try again shortly.") + + # Render the template + response = make_response(render_template( + f"node.html.j2", + auth=auth(), + config=config, + node=node_page_data['node'], + linked_nodes_details=node_page_data['linked_nodes_details'], + hardware=meshtastic_support.HardwareModel, + meshtastic_support=meshtastic_support, + los_profiles=node_page_data['los_profiles'], + telemetry_graph=node_page_data['telemetry_graph'], + node_route=node_page_data['node_route'], + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + zero_hop_heard=node_page_data['zero_hop_heard'], + zero_hop_heard_by=node_page_data['zero_hop_heard_by'], + neighbor_heard_by=node_page_data['neighbor_heard_by'], + zero_hop_timeout=node_page_data['zero_hop_timeout'], + max_distance=node_page_data['max_distance_km'], + elsewhere_links=node_page_data['elsewhere_links'] + )) + + # Clean up node_page_data to help with memory management + del node_page_data + + # Force garbage collection to release memory immediately + gc.collect() + + # Set Cache-Control header for client-side caching + response.headers['Cache-Control'] = 'public, max-age=60' + + return response + + if re.match(userp, filename): + match = re.match(userp, filename) + username = match.group(1) + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + owner = md.get_user(username) + if not owner: + abort(404) + all_nodes = get_cached_nodes() + owner_nodes = utils.get_owner_nodes(all_nodes, owner["email"]) + return render_template( + "user.html.j2", + auth=auth(), + username=username, + config=config, + nodes=owner_nodes, + show_inactive=True, + hardware=meshtastic_support.HardwareModel, + meshtastic_support=meshtastic_support, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + ) + + return send_from_directory("www", filename) + + +@app.route('/metrics.html') +@cache.cached(timeout=60) # Cache for 60 seconds +def metrics(): + return render_template( + "metrics.html.j2", + auth=auth(), + config=config, + Channel=meshtastic_support.Channel, + utils=utils + ) + +@app.route('/api/metrics') +def get_metrics(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + + try: + # Get time range from request parameters + time_range = request.args.get('time_range', 'day') # day, week, month, year, all + channel = request.args.get('channel', 'all') # Get channel parameter + + # Set time range based on parameter + end_time = datetime.datetime.now() + if time_range == 'week': + start_time = end_time - datetime.timedelta(days=7) + bucket_size = 180 # 3 hours in minutes + elif time_range == 'month': + start_time = end_time - datetime.timedelta(days=30) + bucket_size = 720 # 12 hours in minutes + elif time_range == 'year': + start_time = end_time - datetime.timedelta(days=365) + bucket_size = 2880 # 2 days in minutes + elif time_range == 'all': + # For 'all', we'll first check the data range in the database + cursor = md.db.cursor(dictionary=True) + cursor.execute("SELECT MIN(ts_created) as min_time FROM telemetry") + min_time = cursor.fetchone()['min_time'] + cursor.close() + + if min_time: + start_time = min_time + else: + # Default to 1 year if no data + start_time = end_time - datetime.timedelta(days=365) + + bucket_size = 10080 # 7 days in minutes + else: # Default to 'day' + start_time = end_time - datetime.timedelta(hours=24) + bucket_size = 30 # 30 minutes + + # Convert timestamps to the correct format for MySQL + start_timestamp = start_time.strftime('%Y-%m-%d %H:%M:%S') + end_timestamp = end_time.strftime('%Y-%m-%d %H:%M:%S') + + # Format string for time buckets based on bucket size + if bucket_size >= 10080: # 7 days or more + time_format = '%Y-%m-%d' # Daily format + elif bucket_size >= 1440: # 1 day or more + time_format = '%Y-%m-%d %H:00' # Hourly format + else: + time_format = '%Y-%m-%d %H:%i' # Minute format + + cursor = md.db.cursor(dictionary=True) + + # First, generate a series of time slots + time_slots_query = f""" + WITH RECURSIVE time_slots AS ( + SELECT DATE_FORMAT( + DATE_ADD(%s, INTERVAL -MOD(MINUTE(%s), {bucket_size}) MINUTE), + '{time_format}' + ) as time_slot + UNION ALL + SELECT DATE_FORMAT( + DATE_ADD( + STR_TO_DATE(time_slot, '{time_format}'), + INTERVAL {bucket_size} MINUTE + ), + '{time_format}' + ) + FROM time_slots + WHERE DATE_ADD( + STR_TO_DATE(time_slot, '{time_format}'), + INTERVAL {bucket_size} MINUTE + ) <= %s + ) + SELECT time_slot FROM time_slots + """ + cursor.execute(time_slots_query, (start_timestamp, start_timestamp, end_timestamp)) + time_slots = [row['time_slot'] for row in cursor.fetchall()] + + # Add channel condition if specified + channel_condition = "" + if channel != 'all': + # Only apply channel condition to tables that have a channel column + channel_condition_text = f" AND channel = {channel}" + channel_condition_telemetry = f" AND channel = {channel}" + channel_condition_reception = f" AND EXISTS (SELECT 1 FROM text t WHERE t.message_id = message_reception.message_id AND t.channel = {channel})" + else: + channel_condition_text = "" + channel_condition_telemetry = "" + channel_condition_reception = "" + + # Nodes Online Query + nodes_online_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + COUNT(DISTINCT id) as node_count + FROM telemetry + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_telemetry} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(nodes_online_query, (start_timestamp, end_timestamp)) + nodes_online_data = {row['time_slot']: row['node_count'] for row in cursor.fetchall()} + + # Message Traffic Query + message_traffic_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + COUNT(*) as message_count + FROM text + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_text} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(message_traffic_query, (start_timestamp, end_timestamp)) + message_traffic_data = {row['time_slot']: row['message_count'] for row in cursor.fetchall()} + + # Channel Utilization Query + channel_util_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + AVG(channel_utilization) as avg_util + FROM telemetry + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_telemetry} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(channel_util_query, (start_timestamp, end_timestamp)) + channel_util_data = {row['time_slot']: float(row['avg_util']) if row['avg_util'] is not None else 0.0 for row in cursor.fetchall()} + + # Battery Levels Query + battery_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + AVG(battery_level) as avg_battery + FROM telemetry + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_telemetry} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(battery_query, (start_timestamp, end_timestamp)) + battery_data = {row['time_slot']: float(row['avg_battery']) if row['avg_battery'] is not None else 0.0 for row in cursor.fetchall()} + + # Temperature Query + temperature_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + AVG(temperature) as avg_temp + FROM telemetry + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_telemetry} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(temperature_query, (start_timestamp, end_timestamp)) + temperature_data = {row['time_slot']: float(row['avg_temp']) if row['avg_temp'] is not None else 0.0 for row in cursor.fetchall()} + + # SNR Query + snr_query = f""" + SELECT + DATE_FORMAT( + DATE_ADD( + ts_created, + INTERVAL -MOD(MINUTE(ts_created), {bucket_size}) MINUTE + ), + '{time_format}' + ) as time_slot, + AVG(rx_snr) as avg_snr + FROM message_reception + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_reception} + GROUP BY time_slot + ORDER BY time_slot + """ + cursor.execute(snr_query, (start_timestamp, end_timestamp)) + snr_data = {row['time_slot']: float(row['avg_snr']) if row['avg_snr'] is not None else 0.0 for row in cursor.fetchall()} + + cursor.close() + + # --- Moving Average Helper --- + def moving_average_centered(data_list, window_minutes, bucket_size_minutes): + # data_list: list of floats (same order as time_slots) + # window_minutes: total window size (e.g., 120 for 2 hours) + # bucket_size_minutes: size of each bucket (e.g., 30 for 30 min) + n = len(data_list) + result = [] + half_window = window_minutes // 2 + buckets_per_window = max(1, window_minutes // bucket_size_minutes) + for i in range(n): + # Centered window: find all indices within window centered at i + center_time = i + window_indices = [] + for j in range(n): + if abs(j - center_time) * bucket_size_minutes <= half_window: + window_indices.append(j) + if window_indices: + avg = sum(data_list[j] for j in window_indices) / len(window_indices) + else: + avg = data_list[i] + result.append(avg) + return result + + # --- Get metrics_average_interval from config --- + metrics_avg_interval = int(config.get('server', 'metrics_average_interval', fallback=7200)) # seconds + metrics_avg_minutes = metrics_avg_interval // 60 + + # --- Calculate moving averages for relevant metrics --- + # Determine bucket_size_minutes from bucket_size + bucket_size_minutes = bucket_size + + # Prepare raw data lists + nodes_online_raw = [nodes_online_data.get(slot, 0) for slot in time_slots] + battery_levels_raw = [battery_data.get(slot, 0) for slot in time_slots] + temperature_raw = [temperature_data.get(slot, 0) for slot in time_slots] + snr_raw = [snr_data.get(slot, 0) for slot in time_slots] + + # --- Get node_activity_prune_threshold from config --- + node_activity_prune_threshold = int(config.get('server', 'node_activity_prune_threshold', fallback=7200)) # seconds + + # --- For each time slot, count unique nodes heard in the preceding activity window --- + # Fetch all telemetry records in the full time range + cursor = md.db.cursor(dictionary=True) + cursor.execute(f""" + SELECT id, ts_created + FROM telemetry + WHERE ts_created >= %s AND ts_created <= %s {channel_condition_telemetry} + ORDER BY ts_created + """, (start_timestamp, end_timestamp)) + all_telemetry = list(cursor.fetchall()) + # Convert ts_created to datetime for easier comparison + for row in all_telemetry: + if isinstance(row['ts_created'], str): + row['ts_created'] = datetime.datetime.strptime(row['ts_created'], '%Y-%m-%d %H:%M:%S') + # Precompute for each time slot + nodes_heard_per_slot = [] + for slot in time_slots: + # slot is a string, convert to datetime + if '%H:%M' in time_format or '%H:%i' in time_format: + slot_time = datetime.datetime.strptime(slot, '%Y-%m-%d %H:%M') + elif '%H:00' in time_format: + slot_time = datetime.datetime.strptime(slot, '%Y-%m-%d %H:%M') + else: + slot_time = datetime.datetime.strptime(slot, '%Y-%m-%d') + window_start = slot_time - datetime.timedelta(seconds=node_activity_prune_threshold) + # Find all node ids with telemetry in [window_start, slot_time] + active_nodes = set() + for row in all_telemetry: + if window_start < row['ts_created'] <= slot_time: + active_nodes.add(row['id']) + nodes_heard_per_slot.append(len(active_nodes)) + # Now apply moving average and round to nearest integer + nodes_online_smoothed = [round(x) for x in moving_average_centered(nodes_heard_per_slot, metrics_avg_minutes, bucket_size_minutes)] + + # Fill in missing time slots with zeros (for non-averaged metrics) + result = { + 'nodes_online': { + 'labels': time_slots, + 'data': nodes_online_smoothed + }, + 'message_traffic': { + 'labels': time_slots, + 'data': [message_traffic_data.get(slot, 0) for slot in time_slots] + }, + 'channel_util': { + 'labels': time_slots, + 'data': [channel_util_data.get(slot, 0) for slot in time_slots] + }, + 'battery_levels': { + 'labels': time_slots, + 'data': moving_average_centered(battery_levels_raw, metrics_avg_minutes, bucket_size_minutes) + }, + 'temperature': { + 'labels': time_slots, + 'data': moving_average_centered(temperature_raw, metrics_avg_minutes, bucket_size_minutes) + }, + 'snr': { + 'labels': time_slots, + 'data': moving_average_centered(snr_raw, metrics_avg_minutes, bucket_size_minutes) + } + } + + return jsonify(result) + + except Exception as e: + logging.error(f"Error fetching metrics data: {str(e)}", exc_info=True) + return jsonify({ + 'error': f'Error fetching metrics data: {str(e)}' + }), 500 + +@app.route('/api/chattiest-nodes') +def get_chattiest_nodes(): + md = get_meshdata() + if not md: # Check if MeshData failed to initialize + abort(503, description="Database connection unavailable") + + # Get filter parameters from request + time_frame = request.args.get('time_frame', 'day') # day, week, month, year, all + message_type = request.args.get('message_type', 'all') # all, text, position, telemetry + channel = request.args.get('channel', 'all') # all or specific channel number + + try: + cursor = md.db.cursor(dictionary=True) + + # Build the time frame condition + time_condition = "" + if time_frame == 'year': + time_condition = "WHERE ts_created >= DATE_SUB(NOW(), INTERVAL 1 YEAR)" + elif time_frame == 'month': + time_condition = "WHERE ts_created >= DATE_SUB(NOW(), INTERVAL 1 MONTH)" + elif time_frame == 'week': + time_condition = "WHERE ts_created >= DATE_SUB(NOW(), INTERVAL 7 DAY)" + elif time_frame == 'day': + time_condition = "WHERE ts_created >= DATE_SUB(NOW(), INTERVAL 24 HOUR)" + + # Add channel filter if specified - only for text and telemetry tables which have channel column + channel_condition_text = "" + channel_condition_telemetry = "" + if channel != 'all': + channel_condition_text = f" AND channel = {channel}" + channel_condition_telemetry = f" AND channel = {channel}" + if not time_condition: + channel_condition_text = f"WHERE channel = {channel}" + channel_condition_telemetry = f"WHERE channel = {channel}" + + # Build the message type query based on the selected type + if message_type == 'all': + # For text messages, we need to qualify the columns with table aliases + time_condition_with_prefix = time_condition.replace("WHERE", "WHERE t.").replace(" AND", " AND t.") + channel_condition_text_with_prefix = channel_condition_text.replace("WHERE", "WHERE t.").replace(" AND", " AND t.") + + message_query = ( + "SELECT t.from_id as node_id, t.ts_created, t.channel as channel " + "FROM text t " + + time_condition_with_prefix + + channel_condition_text_with_prefix + " " + "UNION ALL " + "SELECT id as node_id, ts_created, NULL as channel " + "FROM positionlog " + + time_condition + " " + "UNION ALL " + "SELECT id as node_id, ts_created, channel " + "FROM telemetry " + + time_condition + + channel_condition_telemetry + ) + elif message_type == 'text': + message_query = ( + "SELECT from_id as node_id, ts_created, channel " + "FROM text " + + time_condition + + channel_condition_text + ) + elif message_type == 'position': + message_query = ( + "SELECT id as node_id, ts_created, NULL as channel " + "FROM positionlog " + + time_condition + ) + elif message_type == 'telemetry': + message_query = ( + "SELECT id as node_id, ts_created, channel " + "FROM telemetry " + + time_condition + + channel_condition_telemetry + ) + else: + return jsonify({ + 'error': f'Invalid message type: {message_type}' + }), 400 + + # Query to get the top 20 nodes by message count, including node names and role + query = """ + WITH messages AS ({message_query}) + SELECT + m.node_id as from_id, + n.long_name, + n.short_name, + n.role, + COUNT(*) as message_count, + COUNT(DISTINCT DATE_FORMAT(m.ts_created, '%Y-%m-%d')) as active_days, + MIN(m.ts_created) as first_message, + MAX(m.ts_created) as last_message, + CASE + WHEN '{channel}' != 'all' THEN '{channel}' + ELSE GROUP_CONCAT(DISTINCT NULLIF(CAST(m.channel AS CHAR), 'NULL')) + END as channels, + CASE + WHEN '{channel}' != 'all' THEN 1 + ELSE COUNT(DISTINCT NULLIF(m.channel, 'NULL')) + END as channel_count + FROM + messages m + LEFT JOIN + nodeinfo n ON m.node_id = n.id + GROUP BY + m.node_id, n.long_name, n.short_name, n.role + ORDER BY + message_count DESC + LIMIT 20 + """.format(message_query=message_query, channel=channel) + + cursor.execute(query) + results = cursor.fetchall() + + # Process the results to format them for the frontend + chattiest_nodes = [] + for row in results: + # Convert node ID to hex format + node_id_hex = utils.convert_node_id_from_int_to_hex(row['from_id']) + + # Parse channels string into a list of channel objects + channels_str = row['channels'] + channels = [] + if channels_str: + # If we're filtering by channel, just use that channel + if channel != 'all': + channels.append({ + 'id': int(channel), + 'name': utils.get_channel_name(int(channel)), + 'color': utils.get_channel_color(int(channel)) + }) + else: + # Otherwise process the concatenated list of channels + channel_ids = [ch_id for ch_id in channels_str.split(',') if ch_id and ch_id != 'NULL'] + for ch_id in channel_ids: + try: + ch_id_int = int(ch_id) + channels.append({ + 'id': ch_id_int, + 'name': utils.get_channel_name(ch_id_int), + 'color': utils.get_channel_color(ch_id_int) + }) + except (ValueError, TypeError): + continue + + # Create node object + node = { + 'node_id': row['from_id'], + 'node_id_hex': node_id_hex, + 'long_name': row['long_name'] or f"Node {row['from_id']}", # Fallback if no long name + 'short_name': row['short_name'] or f"Node {row['from_id']}", # Fallback if no short name + 'role': utils.get_role_name(row['role']), # Convert role number to name + 'message_count': row['message_count'], + 'active_days': row['active_days'], + 'first_message': row['first_message'].isoformat() if row['first_message'] else None, + 'last_message': row['last_message'].isoformat() if row['last_message'] else None, + 'channels': channels, + 'channel_count': row['channel_count'] + } + chattiest_nodes.append(node) + + return jsonify({ + 'chattiest_nodes': chattiest_nodes + }) + + except Exception as e: + logging.error(f"Error fetching chattiest nodes: {str(e)}") + return jsonify({ + 'error': f'Error fetching chattiest nodes: {str(e)}' + }), 500 + finally: + if cursor: + cursor.close() + +@app.route('/api/telemetry/') +def api_telemetry(node_id): + md = get_meshdata() + telemetry = md.get_telemetry_for_node(node_id) + return jsonify(telemetry) + +@app.route('/api/environmental-telemetry/') +def api_environmental_telemetry(node_id): + md = get_meshdata() + days = request.args.get('days', 1, type=int) + # Limit days to reasonable range (1-30 days) + days = max(1, min(30, days)) + telemetry = md.get_environmental_telemetry_for_node(node_id, days) + return jsonify(telemetry) + +@cache.memoize(timeout=get_cache_timeout()) +def get_cached_chat_data(page=1, per_page=50): + """Cache the chat data with optimized query.""" + md = get_meshdata() + if not md: + return None + + # Get total count first (this is fast) + cur = md.db.cursor() + cur.execute("SELECT COUNT(DISTINCT t.message_id) FROM text t") + total = cur.fetchone()[0] + cur.close() + + # Get paginated chat messages (without reception data) + offset = (page - 1) * per_page + cur = md.db.cursor(dictionary=True) + cur.execute(""" + SELECT t.* FROM text t + ORDER BY t.ts_created DESC + LIMIT %s OFFSET %s + """, (per_page, offset)) + messages = cur.fetchall() + cur.close() + + # Get reception data for these messages in a separate query + if messages: + message_ids = [msg['message_id'] for msg in messages] + placeholders = ','.join(['%s'] * len(message_ids)) + cur = md.db.cursor(dictionary=True) + cur.execute(f""" + SELECT message_id, received_by_id, rx_snr, rx_rssi, hop_limit, hop_start, rx_time + FROM message_reception + WHERE message_id IN ({placeholders}) + """, message_ids) + receptions = cur.fetchall() + cur.close() + + # Group receptions by message_id + receptions_by_message = {} + for reception in receptions: + msg_id = reception['message_id'] + if msg_id not in receptions_by_message: + receptions_by_message[msg_id] = [] + receptions_by_message[msg_id].append({ + "node_id": reception['received_by_id'], + "rx_snr": float(reception['rx_snr']) if reception['rx_snr'] is not None else 0, + "rx_rssi": int(reception['rx_rssi']) if reception['rx_rssi'] is not None else 0, + "hop_limit": int(reception['hop_limit']) if reception['hop_limit'] is not None else None, + "hop_start": int(reception['hop_start']) if reception['hop_start'] is not None else None, + "rx_time": reception['rx_time'].timestamp() if isinstance(reception['rx_time'], datetime.datetime) else reception['rx_time'] + }) + else: + receptions_by_message = {} + + # Process messages + chats = [] + prev_key = "" + for row in messages: + record = {} + for key, value in row.items(): + if isinstance(value, datetime.datetime): + record[key] = value.timestamp() + else: + record[key] = value + + # Add reception data + record["receptions"] = receptions_by_message.get(record['message_id'], []) + + # Convert IDs to hex + record["from"] = utils.convert_node_id_from_int_to_hex(record["from_id"]) + record["to"] = utils.convert_node_id_from_int_to_hex(record["to_id"]) + + # Deduplicate messages + msg_key = f"{record['from']}{record['to']}{record['text']}{record['message_id']}" + if msg_key != prev_key: + chats.append(record) + prev_key = msg_key + + return { + "items": chats, + "total": total, + "page": page, + "per_page": per_page, + "pages": (total + per_page - 1) // per_page, + "has_prev": page > 1, + "has_next": page * per_page < total, + "prev_num": page - 1, + "next_num": page + 1 + } + +def get_node_page_data(node_hex, all_nodes=None): + """Fetch and process all data for the node page to prevent memory leaks.""" + md = get_meshdata() + if not md: return None + + # Use provided nodes or fetch them if not provided + if all_nodes is None: + all_nodes = get_cached_nodes() + if not all_nodes or node_hex not in all_nodes: + return None + + current_node = all_nodes[node_hex] + node_id = current_node['id'] + + # Get LOS configuration early + los_enabled = config.getboolean("los", "enabled", fallback=False) + zero_hop_timeout = int(config.get("server", "zero_hop_timeout", fallback=43200)) + max_distance_km = int(config.get("los", "max_distance", fallback=5000)) / 1000 + cutoff_time = int(time.time()) - zero_hop_timeout + + # --- Fetch all raw data --- + node_telemetry = md.get_node_telemetry(node_id) + node_route = md.get_route_coordinates(node_id) + telemetry_graph = draw_graph(node_telemetry) + neighbor_heard_by = md.get_heard_by_from_neighbors(node_id) + + # Only process LOS if enabled + los_profiles = {} + if los_enabled: + # Create a minimal nodes dict for LOSProfile with only the current node and its neighbors + los_nodes = {} + los_nodes[node_hex] = current_node + + # Add only the nodes that are within LOS distance and have positions + max_distance = int(config.get("los", "max_distance", fallback=5000)) + for other_hex, other_node in all_nodes.items(): + if other_hex == node_hex: + continue + if not other_node.get('position'): + continue + # Calculate distance and only include if within range + try: + my_pos = current_node.get('position', {}) + other_pos = other_node.get('position', {}) + if my_pos.get('latitude') and my_pos.get('longitude') and other_pos.get('latitude') and other_pos.get('longitude'): + dist = utils.distance_between_two_points( + my_pos['latitude'], my_pos['longitude'], + other_pos['latitude'], other_pos['longitude'] + ) * 1000 # Convert to meters + if dist < max_distance: + los_nodes[other_hex] = other_node + except: + continue + + lp = LOSProfile(los_nodes, node_id, config, cache) + + # Get LOS profiles and clean up the LOSProfile instance + try: + los_profiles = lp.get_profiles() + finally: + # Explicitly clean up the LOSProfile instance to release memory + if hasattr(lp, 'close_datasets'): + lp.close_datasets() + del lp + del los_nodes + + cursor = md.db.cursor(dictionary=True) + # Query for zero-hop messages heard by this node + cursor.execute(""" + SELECT r.from_id, COUNT(*) AS count, MAX(r.rx_snr) AS best_snr, + AVG(r.rx_snr) AS avg_snr, MAX(r.rx_time) AS last_rx_time + FROM message_reception r + WHERE r.received_by_id = %s AND ((r.hop_limit IS NULL AND r.hop_start IS NULL) OR (r.hop_start - r.hop_limit = 0)) + AND r.rx_time > %s + GROUP BY r.from_id ORDER BY last_rx_time DESC + """, (node_id, cutoff_time)) + zero_hop_heard = cursor.fetchall() + + # Query for zero-hop messages sent by this node + cursor.execute(""" + SELECT r.received_by_id, COUNT(*) AS count, MAX(r.rx_snr) AS best_snr, + AVG(r.rx_snr) AS avg_snr, MAX(r.rx_time) AS last_rx_time + FROM message_reception r + WHERE r.from_id = %s AND ((r.hop_limit IS NULL AND r.hop_start IS NULL) OR (r.hop_start - r.hop_limit = 0)) + AND r.rx_time > %s + GROUP BY r.received_by_id ORDER BY last_rx_time DESC + """, (node_id, cutoff_time)) + zero_hop_heard_by = cursor.fetchall() + cursor.close() + + # --- Create a lean dictionary of only the linked nodes needed by the template --- + linked_node_ids = set() + if 'neighbors' in current_node: + for neighbor in current_node.get('neighbors', []): + linked_node_ids.add(neighbor['neighbor_id']) + for heard in zero_hop_heard: + linked_node_ids.add(heard['from_id']) + for neighbor in neighbor_heard_by: + linked_node_ids.add(neighbor['id']) + for heard in zero_hop_heard_by: + linked_node_ids.add(heard['received_by_id']) + if current_node.get('updated_via'): + linked_node_ids.add(current_node.get('updated_via')) + + linked_nodes_details = {} + for linked_id_int in linked_node_ids: + if not linked_id_int: continue + nid_hex = utils.convert_node_id_from_int_to_hex(linked_id_int) + node_data = all_nodes.get(nid_hex) + if node_data: + # Copy only the fields required by the template + linked_nodes_details[nid_hex] = { + 'short_name': node_data.get('short_name'), + 'long_name': node_data.get('long_name'), + 'position': node_data.get('position') + } + + # Build elsewhere links + node_hex_id = utils.convert_node_id_from_int_to_hex(node_id) + elsewhere_links = get_elsewhere_links(node_id, node_hex_id) + + # Return a dictionary that does NOT include the full `all_nodes` object + return { + 'node': current_node, + 'linked_nodes_details': linked_nodes_details, + 'telemetry_graph': telemetry_graph, + 'node_route': node_route, + 'los_profiles': los_profiles, + 'neighbor_heard_by': neighbor_heard_by, + 'zero_hop_heard': zero_hop_heard, + 'zero_hop_heard_by': zero_hop_heard_by, + 'zero_hop_timeout': zero_hop_timeout, + 'max_distance_km': max_distance_km, + 'elsewhere_links': elsewhere_links, + } + +@app.route('/chat-classic.html') +def chat(): + page = request.args.get('page', 1, type=int) + per_page = 50 + + # Get cached data + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + + chat_data = get_cached_chat_data(page, per_page) + if not chat_data: + abort(503, description="Database connection unavailable") + + return render_template( + "chat.html.j2", + auth=auth(), + config=config, + nodes=nodes, + chat=chat_data["items"], + pagination=chat_data, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + debug=False, + ) + +@cache.memoize(timeout=get_cache_timeout()) # Cache for 5 minutes +def calculate_node_distance(node1_hex, node2_hex): + """Calculate distance between two nodes, cached to avoid repeated calculations.""" + nodes = get_cached_nodes() + if not nodes: + return None + + node1 = nodes.get(node1_hex) + node2 = nodes.get(node2_hex) + + if not node1 or not node2: + return None + + if not node1.get("position") or not node2.get("position"): + return None + + return utils.calculate_distance_between_nodes(node1, node2) + +@app.route('/chat.html') +def chat2(): + page = request.args.get('page', 1, type=int) + per_page = 50 + + # Get cached data + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + + chat_data = get_cached_chat_data(page, per_page) + if not chat_data: + abort(503, description="Database connection unavailable") + + # Pre-process nodes to reduce template complexity + # Only include nodes that are actually used in the chat messages + used_node_ids = set() + for message in chat_data["items"]: + used_node_ids.add(message["from"]) + if message["to"] != "ffffffff": + used_node_ids.add(message["to"]) + for reception in message.get("receptions", []): + node_id = utils.convert_node_id_from_int_to_hex(reception["node_id"]) + used_node_ids.add(node_id) + + # Create simplified nodes dict with only needed data + simplified_nodes = {} + for node_id in used_node_ids: + if node_id in nodes: + node = nodes[node_id] + simplified_nodes[node_id] = { + 'long_name': node.get('long_name', ''), + 'short_name': node.get('short_name', ''), + 'hw_model': node.get('hw_model'), + 'hw_model_name': meshtastic_support.get_hardware_model_name(node.get('hw_model')) if node.get('hw_model') else None, + 'role': node.get('role'), + 'role_name': utils.get_role_name(node.get('role')) if node.get('role') is not None else None, + 'firmware_version': node.get('firmware_version'), + 'owner_username': node.get('owner_username'), + 'owner': node.get('owner'), + 'position': node.get('position'), + 'telemetry': node.get('telemetry'), + 'ts_seen': node.get('ts_seen') + } + + return render_template( + "chat2.html.j2", + auth=auth(), + config=config, + nodes=simplified_nodes, + chat=chat_data["items"], + pagination=chat_data, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now(), + meshtastic_support=meshtastic_support, + debug=False, + ) + +@app.route('/') +def serve_index(success_message=None, error_message=None): + # Get cached data + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + + active_nodes = get_cached_active_nodes() + + return render_template( + "index.html.j2", + auth=auth(), + config=config, + nodes=nodes, + active_nodes=active_nodes, + timestamp=datetime.datetime.now(), + success_message=success_message, + error_message=error_message + ) + +@app.route('/nodes.html') +def nodes(): + # Get cached data + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + latest = get_cached_latest_node() + logging.info(f"/nodes.html: Loaded {len(nodes)} nodes.") + + # Get hardware model filter from query parameters + hw_model_filter = request.args.get('hw_model') + hw_name_filter = request.args.get('hw_name') + + return render_template( + "nodes.html.j2", + auth=auth(), + config=config, + nodes=nodes, + show_inactive=False, + latest=latest, + hw_model_filter=hw_model_filter, + hw_name_filter=hw_name_filter, + hardware=meshtastic_support.HardwareModel, + meshtastic_support=meshtastic_support, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now() + ) + +@app.route('/allnodes.html') +def allnodes(): + # Get cached data + nodes = get_cached_nodes() + if not nodes: + abort(503, description="Database connection unavailable") + latest = get_cached_latest_node() + logging.info(f"/allnodes.html: Loaded {len(nodes)} nodes.") + + # Get hardware model filter from query parameters + hw_model_filter = request.args.get('hw_model') + hw_name_filter = request.args.get('hw_name') + + return render_template( + "allnodes.html.j2", + auth=auth(), + config=config, + nodes=nodes, + show_inactive=True, + latest=latest, + hw_model_filter=hw_model_filter, + hw_name_filter=hw_name_filter, + hardware=meshtastic_support.HardwareModel, + meshtastic_support=meshtastic_support, + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now() + ) + +@app.route('/api/debug/memory') +def debug_memory(): + """Manual trigger for detailed memory analysis.""" + if not auth(): + abort(401) + + log_memory_usage(force=True) + log_detailed_memory_analysis() + + return jsonify({ + 'status': 'success', + 'message': 'Memory analysis completed. Check logs for details.' + }) + +@app.route('/api/debug/cache') +def debug_cache(): + """Manual trigger for cache analysis.""" + if not auth(): + abort(401) + + log_cache_stats() + + return jsonify({ + 'status': 'success', + 'message': 'Cache analysis completed. Check logs for details.' + }) + +@app.route('/api/debug/cleanup') +def debug_cleanup(): + """Manual trigger for cache cleanup.""" + if not auth(): + abort(401) + + try: + # Check database privileges first + config = configparser.ConfigParser() + config.read('config.ini') + db_cache = DatabaseCache(config) + privileges = db_cache.check_privileges() + + # Perform cleanup operations + cleanup_cache() + + # Also clear nodes cache and force garbage collection + clear_nodes_cache() + clear_database_cache() + gc.collect() + + # Prepare response message + if privileges['reload']: + message = 'Cache cleanup completed successfully. Database query cache cleared.' + else: + message = 'Cache cleanup completed. Note: Database query cache could not be cleared due to insufficient privileges (RELOAD required).' + + return jsonify({ + 'status': 'success', + 'message': message, + 'database_privileges': privileges + }) + + except Exception as e: + logging.error(f"Error during debug cleanup: {e}") + return jsonify({ + 'status': 'error', + 'message': f'Error during cache cleanup: {str(e)}' + }), 500 + +@app.route('/api/debug/clear-nodes') +def debug_clear_nodes(): + """Manual trigger to clear nodes cache.""" + if not auth(): + abort(401) + + clear_nodes_cache() + clear_database_cache() + gc.collect() + + return jsonify({ + 'status': 'success', + 'message': 'Nodes cache cleared. Check logs for details.' + }) + +@app.route('/api/debug/database-cache') +def debug_database_cache(): + """Manual trigger for database cache analysis.""" + if not auth(): + abort(401) + + try: + # Check database privileges + config = configparser.ConfigParser() + config.read('config.ini') + db_cache = DatabaseCache(config) + privileges = db_cache.check_privileges() + + md = get_meshdata() + if md and hasattr(md, 'db_cache'): + stats = md.db_cache.get_cache_stats() + + # Get application cache info + app_cache_info = {} + if hasattr(md, '_nodes_cache'): + app_cache_info = { + 'cache_entries': len(md._nodes_cache), + 'cache_keys': list(md._nodes_cache.keys()), + 'cache_timestamps': {k: v['timestamp'] for k, v in md._nodes_cache.items()} + } + + return jsonify({ + 'status': 'success', + 'database_cache_stats': stats, + 'application_cache_info': app_cache_info, + 'database_privileges': privileges + }) + else: + return jsonify({ + 'status': 'error', + 'message': 'Database cache not available', + 'database_privileges': privileges + }) + except Exception as e: + return jsonify({ + 'status': 'error', + 'message': f'Error getting database cache stats: {e}' + }) + +@app.route('/api/geocode') +def api_geocode(): + """API endpoint for reverse geocoding to avoid CORS issues.""" + try: + lat = request.args.get('lat', type=float) + lon = request.args.get('lon', type=float) + + if lat is None or lon is None: + return jsonify({'error': 'Missing lat or lon parameters'}), 400 + + # Use the existing geocoding function from utils + geocoded = utils.geocode_position( + config.get('geocoding', 'apikey', fallback=''), + lat, + lon + ) + + if geocoded: + return jsonify(geocoded) + else: + return jsonify({'error': 'Geocoding failed'}), 500 + + except Exception as e: + logging.error(f"Geocoding error: {e}") + return jsonify({'error': 'Internal server error'}), 500 + +@cache.memoize(timeout=get_cache_timeout()) # Cache for 5 minutes +def get_node_positions_batch(node_ids): + """Get position data for multiple nodes efficiently.""" + nodes = get_cached_nodes() + if not nodes: + return {} + + positions = {} + for node_id in node_ids: + if node_id in nodes: + node = nodes[node_id] + if node.get('position') and node['position'].get('latitude') and node['position'].get('longitude'): + positions[node_id] = { + 'latitude': node['position']['latitude'], + 'longitude': node['position']['longitude'] + } + + return positions + +@app.route('/api/node-positions') +def api_node_positions(): + """API endpoint to get position data for specific nodes for client-side distance calculations.""" + try: + # Get list of node IDs from query parameter + node_ids = request.args.get('nodes', '').split(',') + node_ids = [nid.strip() for nid in node_ids if nid.strip()] + + if not node_ids: + return jsonify({'positions': {}}) + + # Use the cached batch function + positions = get_node_positions_batch(tuple(node_ids)) # Convert to tuple for caching + + return jsonify({'positions': positions}) + + except Exception as e: + logging.error(f"Error fetching node positions: {e}") + return jsonify({'error': 'Internal server error'}), 500 + +def run(): + # Enable Waitress logging + config = configparser.ConfigParser() + config.read('config.ini') + port = int(config["webserver"]["port"]) + + waitress_logger = logging.getLogger("waitress") + waitress_logger.setLevel(logging.DEBUG) # Enable all logs from Waitress + + # Configure Waitress to trust proxy headers for real IP addresses + # This is needed when running behind Docker, nginx, or other reverse proxies + serve( + TransLogger( + app, + setup_console_handler=False, + logger=waitress_logger + ), + port=port, + trusted_proxy='127.0.0.1,::1,172.16.0.0/12,192.168.0.0/16,10.0.0.0/8', # Trust Docker and local networks + trusted_proxy_count=1, # Trust one level of proxy (Docker) + trusted_proxy_headers={ + 'x-forwarded-for': 'X-Forwarded-For', + 'x-forwarded-proto': 'X-Forwarded-Proto', + 'x-forwarded-host': 'X-Forwarded-Host', + 'x-forwarded-port': 'X-Forwarded-Port' + } + ) + +def clear_nodes_cache(): + """Clear nodes-related cache entries.""" + try: + cache.delete_memoized(get_cached_nodes) + cache.delete_memoized(get_cached_active_nodes) + cache.delete_memoized(get_cached_message_map_data) + cache.delete_memoized(get_cached_graph_data) + cache.delete_memoized(get_cached_neighbors_data) + logging.info("Cleared nodes-related cache entries") + except Exception as e: + logging.error(f"Error clearing nodes-related cache: {e}") + +def clear_database_cache(): + """Clear database query cache.""" + try: + # Try to get meshdata with app context first + try: + md = get_meshdata() + if md and hasattr(md, 'db_cache'): + md.db_cache.clear_query_cache() + logging.info("Cleared database query cache") + if md: + md.clear_nodes_cache() + logging.info("Cleared application nodes cache") + except RuntimeError as e: + # If we're outside app context, clear cache directly + if "application context" in str(e): + logging.info("Outside app context, clearing cache directly") + # Clear database cache directly without app context + config = configparser.ConfigParser() + config.read('config.ini') + db_cache = DatabaseCache(config) + db_cache.clear_query_cache() + logging.info("Cleared database query cache (direct)") + else: + raise + except Exception as e: + logging.error(f"Error clearing database cache: {e}") + +def find_relay_node_by_suffix(relay_suffix, nodes, receiver_ids=None, sender_id=None, zero_hop_links=None, sender_pos=None, receiver_pos=None, debug=False): + """ + Improved relay node matcher: prefer zero-hop/extended neighbors, then select the physically closest candidate to the sender (or receiver), using scoring only as a tiebreaker. + """ + import time + relay_suffix = relay_suffix.lower()[-2:] + candidates = [] + for node_id_hex, node_data in nodes.items(): + if len(node_id_hex) == 8 and node_id_hex.lower()[-2:] == relay_suffix: + candidates.append((node_id_hex, node_data)) + + if not candidates: + if debug: + print(f"[RelayMatch] No candidates for suffix {relay_suffix}") + return None + if len(candidates) == 1: + if debug: + print(f"[RelayMatch] Only one candidate for suffix {relay_suffix}: {candidates[0][0]}") + return candidates[0][0] + + # --- Zero-hop filter: only consider zero-hop neighbors if any exist --- + zero_hop_candidates = [] + if zero_hop_links: + for node_id_hex, node_data in candidates: + is_zero_hop = False + if sender_id and node_id_hex in zero_hop_links.get(sender_id, {}).get('heard', {}): + is_zero_hop = True + if receiver_ids: + for rid in receiver_ids: + if node_id_hex in zero_hop_links.get(rid, {}).get('heard', {}): + is_zero_hop = True + if is_zero_hop: + zero_hop_candidates.append((node_id_hex, node_data)) + if zero_hop_candidates: + if debug: + print(f"[RelayMatch] Restricting to zero-hop candidates: {[c[0] for c in zero_hop_candidates]}") + candidates = zero_hop_candidates + else: + # --- Extended neighbor filter: only consider candidates that have ever been heard by or heard from sender/receivers --- + extended_candidates = [] + if zero_hop_links: + local_set = set() + if sender_id and sender_id in zero_hop_links: + local_set.update(zero_hop_links[sender_id].get('heard', {}).keys()) + local_set.update(zero_hop_links[sender_id].get('heard_by', {}).keys()) + if receiver_ids: + for rid in receiver_ids: + if rid in zero_hop_links: + local_set.update(zero_hop_links[rid].get('heard', {}).keys()) + local_set.update(zero_hop_links[rid].get('heard_by', {}).keys()) + local_set_hex = set() + for n in local_set: + try: + if isinstance(n, int): + local_set_hex.add(utils.convert_node_id_from_int_to_hex(n)) + elif isinstance(n, str) and len(n) == 8: + local_set_hex.add(n) + except Exception: + continue + for node_id_hex, node_data in candidates: + if node_id_hex in local_set_hex: + extended_candidates.append((node_id_hex, node_data)) + if extended_candidates: + if debug: + print(f"[RelayMatch] Restricting to extended neighbor candidates: {[c[0] for c in extended_candidates]}") + candidates = extended_candidates + else: + if debug: + print(f"[RelayMatch] No local/extended candidates, using all: {[c[0] for c in candidates]}") + + # --- Distance-first selection among remaining candidates --- + def get_distance(node_data, ref_pos): + npos = node_data.get('position') + if not npos or not ref_pos: + return float('inf') + nlat = npos.get('latitude') if isinstance(npos, dict) else getattr(npos, 'latitude', None) + nlon = npos.get('longitude') if isinstance(npos, dict) else getattr(npos, 'longitude', None) + if nlat is None or nlon is None: + return float('inf') + # Fix: Use 'latitude' and 'longitude' keys, not 'lat' and 'lon' + ref_lat = ref_pos.get('latitude') if isinstance(ref_pos, dict) else getattr(ref_pos, 'latitude', None) + ref_lon = ref_pos.get('longitude') if isinstance(ref_pos, dict) else getattr(ref_pos, 'longitude', None) + if ref_lat is None or ref_lon is None: + return float('inf') + return utils.distance_between_two_points(ref_lat, ref_lon, nlat, nlon) + + ref_pos = sender_pos if sender_pos else receiver_pos + if ref_pos: + # Compute distances + distances = [(node_id_hex, node_data, get_distance(node_data, ref_pos)) for node_id_hex, node_data in candidates] + min_dist = min(d[2] for d in distances) + closest = [d for d in distances if abs(d[2] - min_dist) < 1e-3] # Allow for float rounding + if debug: + print(f"[RelayMatch] Closest candidates by distance: {[(c[0], c[2]) for c in closest]}") + if len(closest) == 1: + return closest[0][0] + # If tie, fall back to scoring among closest + candidates = [(c[0], c[1]) for c in closest] + + # --- Scoring system as tiebreaker --- + scores = {} + now = time.time() + for node_id_hex, node_data in candidates: + score = 0 + reasons = [] + if zero_hop_links: + if sender_id and node_id_hex in zero_hop_links.get(sender_id, {}).get('heard', {}): + score += 100 + reasons.append('zero-hop-sender') + if receiver_ids: + for rid in receiver_ids: + if node_id_hex in zero_hop_links.get(rid, {}).get('heard', {}): + score += 100 + reasons.append(f'zero-hop-receiver-{rid}') + proximity_score = 0 + pos_fresh = False + if sender_pos and node_data.get('position'): + npos = node_data['position'] + nlat = npos.get('latitude') if isinstance(npos, dict) else getattr(npos, 'latitude', None) + nlon = npos.get('longitude') if isinstance(npos, dict) else getattr(npos, 'longitude', None) + ntime = npos.get('position_time') if isinstance(npos, dict) else getattr(npos, 'position_time', None) + if nlat is not None and nlon is not None and ntime is not None: + # Convert datetime to timestamp if needed + if isinstance(ntime, datetime.datetime): + ntime = ntime.timestamp() + if now - ntime > 21600: + score -= 50 + reasons.append('stale-position') + else: + pos_fresh = True + # Fix: Use 'latitude' and 'longitude' keys, not 'lat' and 'lon' + sender_lat = sender_pos.get('latitude') if isinstance(sender_pos, dict) else getattr(sender_pos, 'latitude', None) + sender_lon = sender_pos.get('longitude') if isinstance(sender_pos, dict) else getattr(sender_pos, 'longitude', None) + if sender_lat is not None and sender_lon is not None: + dist = utils.distance_between_two_points(sender_lat, sender_lon, nlat, nlon) + proximity_score = max(0, 100 - dist * 2) + score += proximity_score + reasons.append(f'proximity:{dist:.1f}km(+{proximity_score:.1f})') + else: + score -= 50 + reasons.append('missing-sender-position') + else: + score -= 100 + reasons.append('missing-position') + ts_seen = node_data.get('ts_seen') + if ts_seen: + # Convert datetime to timestamp if needed + if isinstance(ts_seen, datetime.datetime): + ts_seen = ts_seen.timestamp() + if now - ts_seen < 3600: + score += 10 + reasons.append('recently-seen') + if node_data.get('role') not in [1, 8]: + score += 5 + reasons.append('relay-capable') + scores[node_id_hex] = (score, reasons) + if debug: + print(f"[RelayMatch] Candidates for suffix {relay_suffix}:") + for nid, (score, reasons) in scores.items(): + print(f" {nid}: score={score}, reasons={reasons}") + if not scores: + return None + best = max(scores.items(), key=lambda x: x[1][0]) + if debug: + print(f"[RelayMatch] Selected {best[0]} for suffix {relay_suffix} (score={best[1][0]})") + return best[0] + +@app.route('/message-paths.html') +def message_paths(): + days = float(request.args.get('days', 0.167)) # Default to 4 hours if not provided + + md = get_meshdata() + if not md: + abort(503, description="Database connection unavailable") + + # Get relay network data + relay_data = md.get_relay_network_data(days) + + return render_template( + "message-paths.html.j2", + auth=auth(), + config=config, + relay_data=relay_data, + stats=relay_data['stats'], + utils=utils, + datetime=datetime.datetime, + timestamp=datetime.datetime.now() + ) + +@cache.memoize(timeout=get_cache_timeout()) # Cache for 5 minutes +def get_cached_hardware_models(): + """Get hardware model statistics for the most and least common models.""" + try: + md = get_meshdata() + if not md: + return {'error': 'Database connection unavailable'} + + # Get hardware model statistics + cur = md.db.cursor(dictionary=True) + + # Query to get hardware model counts with model names + sql = """ + SELECT + hw_model, + COUNT(*) as node_count, + GROUP_CONCAT(DISTINCT short_name ORDER BY short_name SEPARATOR ', ') as sample_names + FROM nodeinfo + WHERE hw_model IS NOT NULL + GROUP BY hw_model + ORDER BY node_count DESC + """ + + cur.execute(sql) + results = cur.fetchall() + cur.close() + + # Process results and get hardware model names - use tuples to reduce memory + hardware_stats = [] + for row in results: + hw_model_id = row['hw_model'] + hw_model_name = meshtastic_support.get_hardware_model_name(hw_model_id) + + # Get a sample node for icon + sample_node = row['sample_names'].split(', ')[0] if row['sample_names'] else f"Model {hw_model_id}" + + # Use tuple instead of dict to reduce memory overhead + hardware_stats.append(( + hw_model_id, + hw_model_name or f"Unknown Model {hw_model_id}", + row['node_count'], + row['sample_names'], + utils.graph_icon(sample_node) + )) + + # Get top 15 most common + most_common = hardware_stats[:15] + + # Get bottom 15 least common (but only if we have more than 15 total models) + # Sort in ascending order (lowest count first) + least_common = hardware_stats[-15:] if len(hardware_stats) > 15 else hardware_stats + least_common = sorted(least_common, key=lambda x: x[2]) # Sort by node_count (index 2) + + # Convert tuples to dicts only for JSON serialization + def tuple_to_dict(hw_tuple): + return { + 'model_id': hw_tuple[0], + 'model_name': hw_tuple[1], + 'node_count': hw_tuple[2], + 'sample_names': hw_tuple[3], + 'icon_url': hw_tuple[4] + } + + return { + 'most_common': [tuple_to_dict(hw) for hw in most_common], + 'least_common': [tuple_to_dict(hw) for hw in least_common], + 'total_models': len(hardware_stats) + } + + except Exception as e: + logging.error(f"Error fetching hardware models: {e}") + return {'error': 'Failed to fetch hardware model data'} + +@app.route('/api/utilization-data') +def get_utilization_data(): + md = get_meshdata() + if not md: + abort(503, description="Database connection unavailable") + + try: + # Get parameters from request + time_range = request.args.get('time_range', '24') # hours + channel = request.args.get('channel', 'all') + + # Calculate time window + hours = int(time_range) + cutoff_time = datetime.datetime.now() - datetime.timedelta(hours=hours) + + cursor = md.db.cursor(dictionary=True) + + # Build channel condition + channel_condition = "" + if channel != 'all': + channel_condition = f" AND channel = {channel}" + + # Get active nodes from cache (much faster than complex DB queries) + nodes = get_cached_nodes() + if not nodes: + return jsonify({'error': 'No node data available'}), 503 + + # Get most recent telemetry for active nodes only + sql = f""" + SELECT + t.id, + t.channel_utilization, + t.ts_created + FROM telemetry t + WHERE t.ts_created >= NOW() - INTERVAL {hours} HOUR + AND t.channel_utilization IS NOT NULL + AND t.channel_utilization > 0 + {channel_condition} + ORDER BY t.id, t.ts_created DESC + """ + + cursor.execute(sql) + telemetry_rows = cursor.fetchall() + + # Get only the most recent utilization per node + node_utilization = {} + for row in telemetry_rows: + node_id = row['id'] + if node_id not in node_utilization: + node_utilization[node_id] = { + 'utilization': row['channel_utilization'], + 'ts_created': row['ts_created'] + } + + # Get contact data for active nodes in one efficient query + active_node_ids = list(node_utilization.keys()) + contact_data = {} + + if active_node_ids: + # Use placeholders for the IN clause + placeholders = ','.join(['%s'] * len(active_node_ids)) + contact_sql = f""" + SELECT + from_id, + received_by_id, + p1.latitude_i as from_lat_i, + p1.longitude_i as from_lon_i, + p2.latitude_i as to_lat_i, + p2.longitude_i as to_lon_i + FROM message_reception r + LEFT JOIN position p1 ON p1.id = r.from_id + LEFT JOIN position p2 ON p2.id = r.received_by_id + WHERE (r.hop_limit IS NULL AND r.hop_start IS NULL) + OR (r.hop_start - r.hop_limit = 0) + AND r.rx_time >= NOW() - INTERVAL {hours} HOUR + AND r.from_id IN ({placeholders}) + AND p1.latitude_i IS NOT NULL + AND p1.longitude_i IS NOT NULL + AND p2.latitude_i IS NOT NULL + AND p2.longitude_i IS NOT NULL + """ + + cursor.execute(contact_sql, active_node_ids) + contact_rows = cursor.fetchall() + + # Build contact distance lookup + for row in contact_rows: + from_id = row['from_id'] + to_id = row['received_by_id'] + + # Check for null coordinates before calculating distance + if (row['from_lat_i'] is None or row['from_lon_i'] is None or + row['to_lat_i'] is None or row['to_lon_i'] is None): + continue + + # Calculate distance using Haversine formula + lat1 = row['from_lat_i'] / 10000000.0 + lon1 = row['from_lon_i'] / 10000000.0 + lat2 = row['to_lat_i'] / 10000000.0 + lon2 = row['to_lon_i'] / 10000000.0 + + # Haversine distance calculation + import math + R = 6371 # Earth's radius in km + dlat = math.radians(lat2 - lat1) + dlon = math.radians(lon2 - lon1) + a = (math.sin(dlat/2) * math.sin(dlat/2) + + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * + math.sin(dlon/2) * math.sin(dlon/2)) + c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a)) + distance = R * c + + # Sanity check: skip distances over 150km + if distance > 150: + continue + + # Store contact data + if from_id not in contact_data: + contact_data[from_id] = {'distances': [], 'contacts': set()} + contact_data[from_id]['distances'].append(distance) + contact_data[from_id]['contacts'].add(to_id) + + # Build result using cached node data + result = [] + for node_id, telemetry_data in node_utilization.items(): + node_hex = utils.convert_node_id_from_int_to_hex(node_id) + node_data = nodes.get(node_hex) + + if node_data and node_data.get('position'): + position = node_data['position'] + if position and position.get('latitude_i') and position.get('longitude_i'): + # Calculate contact distance + node_contacts = contact_data.get(node_id, {'distances': [], 'contacts': set()}) + mean_distance = 2.0 # Default + contact_count = len(node_contacts['contacts']) + + if node_contacts['distances']: + mean_distance = sum(node_contacts['distances']) / len(node_contacts['distances']) + mean_distance = max(2.0, mean_distance) # Minimum 2km + + # Use cached node data for position and names + result.append({ + 'id': node_id, + 'utilization': round(telemetry_data['utilization'], 2), + 'position': { + 'latitude_i': position['latitude_i'], + 'longitude_i': position['longitude_i'], + 'altitude': position.get('altitude') + }, + 'short_name': node_data.get('short_name', ''), + 'long_name': node_data.get('long_name', ''), + 'mean_contact_distance': round(mean_distance, 2), + 'contact_count': contact_count + }) + + cursor.close() + + return jsonify({ + 'nodes': result, + 'time_range': time_range, + 'channel': channel + }) + + except Exception as e: + logging.error(f"Error fetching utilization data: {str(e)}", exc_info=True) + return jsonify({ + 'error': f'Error fetching utilization data: {str(e)}' + }), 500 + +@app.route('/api/hardware-models') +def get_hardware_models(): + """Get hardware model statistics for the most and least common models.""" + result = get_cached_hardware_models() + + if 'error' in result: + return jsonify(result), 503 if result['error'] == 'Database connection unavailable' else 500 + + return jsonify(result) + +def get_elsewhere_links(node_id, node_hex_id): + """ + Build Elsewhere links for a node based on config.ini [tools] section. + + Args: + node_id: The node ID as integer + node_hex_id: The node ID as hex string + + Returns: + List of (label, url, icon) tuples for the Elsewhere section + """ + elsewhere_links = [] + + def get_icon_for_tool(label, url): + """Determine appropriate icon based on tool name and URL.""" + label_lower = label.lower() + url_lower = url.lower() + + # Map-related tools + if 'map' in label_lower or 'map' in url_lower: + return '🗺️' + + # Logs/Logging tools + if 'log' in label_lower or 'log' in url_lower: + return '📋' + + # Dashboard/Monitoring tools + if 'dashboard' in label_lower or 'monitor' in label_lower: + return '📊' + + # Network/Graph tools + if 'graph' in label_lower or 'network' in label_lower: + return '🕸️' + + # Chat/Message tools + if 'chat' in label_lower or 'message' in label_lower: + return '💬' + + # Settings/Config tools + if 'config' in label_lower or 'setting' in label_lower: + return '⚙️' + + # Default icon for external links + return '🔗' + + # Process keys ending with _node_link + for key, value in config.items('tools'): + if key.endswith('_node_link'): + # Extract the base key (remove _node_link suffix) + base_key = key[:-10] # Remove '_node_link' + + # Get the label from the corresponding _label key + label_key = base_key + '_label' + label = config.get('tools', label_key, fallback=None) + if not label: + # Fallback to a generated label if no _label is found + label = base_key.replace('_', ' ').title() + + # Replace placeholders in URL and strip any extra quotes + url = value.replace('{{ node.id }}', str(node_id)).replace('{{ node.hex_id }}', node_hex_id).strip('"') + + # Get appropriate icon + icon = get_icon_for_tool(label, url) + + elsewhere_links.append((label, url, icon)) + + return elsewhere_links + +if __name__ == '__main__': + config = configparser.ConfigParser() + config.read('config.ini') + port = int(config["webserver"]["port"]) + app.run(debug=True, port=port) diff --git a/meshtastic_support.py b/meshtastic_support.py index 04106e0d..c753a914 100644 --- a/meshtastic_support.py +++ b/meshtastic_support.py @@ -33,6 +33,7 @@ class HardwareModel(Enum): WIO_WM1110 = 21 RAK2560 = 22 HELTEC_HRU_3601 = 23 + HELTEC_WIRELESS_BRIDGE = 24 STATION_G1 = 25 RAK11310 = 26 SENSELORA_RP2040 = 27 @@ -74,44 +75,774 @@ class HardwareModel(Enum): NRF52_PROMICRO_DIY = 63 RADIOMASTER_900_BANDIT_NANO = 64 HELTEC_CAPSULE_SENSOR_V3 = 65 + HELTEC_VISION_MASTER_T190 = 66 + HELTEC_VISION_MASTER_E213 = 67 + HELTEC_VISION_MASTER_E290 = 68 HELTEC_MESH_NODE_T114 = 69 + SENSECAP_INDICATOR = 70 TRACKER_T1000_E = 71 + RAK3172 = 72 + WIO_E5 = 73 + RADIOMASTER_900_BANDIT = 74 + ME25LS01_4Y10TD = 75 + RP2040_FEATHER_RFM95 = 76 + M5STACK_COREBASIC = 77 + M5STACK_CORE2 = 78 RPI_PICO2 = 79 + M5STACK_CORES3 = 80 + SEEED_XIAO_S3 = 81 + MS24SF1 = 82 + TLORA_C6 = 83 + WISMESH_TAP = 84 + ROUTASTIC = 85 + MESH_TAB = 86 + MESHLINK = 87 + XIAO_NRF52_KIT = 88 + THINKNODE_M1 = 89 + THINKNODE_M2 = 90 + T_ETH_ELITE = 91 + HELTEC_SENSOR_HUB = 92 + RESERVED_FRIED_CHICKEN = 93 + HELTEC_MESH_POCKET = 94 + SEEED_SOLAR_NODE = 95 + NOMADSTAR_METEOR_PRO = 96 + CROWPANEL = 97 + LINK_32 = 98 + SEEED_WIO_TRACKER_L1 = 99 + SEEED_WIO_TRACKER_L1_EINK = 100 + QWANTZ_TINY_ARMS = 101 + T_DECK_PRO = 102 + T_LORA_PAGER = 103 + GAT562_MESH_TRIAL_TRACKER = 104 PRIVATE_HW = 255 - XIAO = 81 + + @classmethod + def _missing_(cls, value): + return HardwareModel.UNSET + +class Role(Enum): + """ + Meshtastic node roles + from https://buf.build/meshtastic/protobufs/docs/main:meshtastic#meshtastic.Config.DeviceConfig.Role + """ + CLIENT = 0 + CLIENT_MUTE = 1 + ROUTER = 2 + ROUTER_CLIENT = 3 # Deprecated in v2.3.15 + REPEATER = 4 # Deprecated in v2.7.11 + TRACKER = 5 + SENSOR = 6 + TAK = 7 + CLIENT_HIDDEN = 8 + LOST_AND_FOUND = 9 + TAK_TRACKER = 10 + ROUTER_LATE = 11 + CLIENT_BASE = 12 + +class ShortRole(Enum): + """ + Meshtastic node short roles + """ + C = 0 + CM = 1 + R = 2 + RC = 3 + RE = 4 + T = 5 + S = 6 + A = 7 + CH = 8 + LF = 9 + AT = 10 + RL = 11 + CB = 12 + + +class Channel(Enum): + """ + Meshtastic channel mapping + Maps channel numbers to their descriptive names + """ + LONG_FAST = 8 + MEDIUM_FAST = 31 + SHORT_FAST = 112 + LONG_MODERATE = 88 + MEDIUM_SLOW = 24 + # Additional channels will be added as they are discovered + +class ShortChannel(Enum): + """ + Meshtastic channel mapping + Maps channel numbers to their descriptive names + """ + LF = 8 + MF = 31 + SF = 112 + LM = 88 + MS = 24 + # Additional channels will be added as they are discovered + + +class ModemPreset(Enum): + """ + Meshtastic modem preset configuration + from https://buf.build/meshtastic/protobufs/docs/main:meshtastic#meshtastic.ModemPreset + """ + LONG_FAST = 0 + LONG_SLOW = 1 + VERY_LONG_SLOW = 2 # Deprecated in 2.5: Works only with txco and is unusably slow + MEDIUM_SLOW = 3 + MEDIUM_FAST = 4 + SHORT_SLOW = 5 + SHORT_FAST = 6 + LONG_MODERATE = 7 + SHORT_TURBO = 8 # Fastest preset with 500kHz bandwidth, not legal in all regions + +class RebroadcastMode(Enum): + """ + Meshtastic rebroadcast mode configuration + from https://buf.build/meshtastic/protobufs/docs/main:meshtastic#meshtastic.Config.DeviceConfig.RebroadcastMode + """ + ALL = 0 + ALL_SKIP_DECODING = 1 + LOCAL_ONLY = 2 + KNOWN_ONLY = 3 + NONE = 4 + CORE_PORTNUMS_ONLY = 5 + +class BuzzerMode(Enum): + """ + Meshtastic buzzer mode configuration + from https://buf.build/meshtastic/protobufs/docs/main:meshtastic#meshtastic.Config.DeviceConfig.BuzzerMode + """ + ALL_ENABLED = 0 + DISABLED = 1 + NOTIFICATIONS_ONLY = 2 + SYSTEM_ONLY = 3 + DIRECT_MSG_ONLY = 4 + +class RoutingError(Enum): + """ + Meshtastic routing error codes + from https://buf.build/meshtastic/protobufs/docs/main:meshtastic#meshtastic.RoutingError + """ + NONE = 0 + NO_ROUTE = 1 + GOT_NAK = 2 + TIMEOUT = 3 + NO_INTERFACE = 4 + MAX_RETRANSMIT = 5 + NO_CHANNEL = 6 + TOO_LARGE = 7 + NO_RESPONSE = 8 + DUTY_CYCLE_LIMIT = 9 + BAD_REQUEST = 32 + NOT_AUTHORIZED = 33 + PKI_FAILED = 34 + PKI_UNKNOWN_PUBKEY = 35 + ADMIN_BAD_SESSION_KEY = 36 + ADMIN_PUBLIC_KEY_UNAUTHORIZED = 37 + RATE_LIMIT_EXCEEDED = 38 + +def get_channel_name(channel_value, use_short_names=False): + """ + Convert a channel number to a human-readable name. + + Args: + channel_value: The numeric channel value + use_short_names: If True, return short channel names (e.g., "LF" instead of "LongFast") + + Returns: + A human-readable channel name or "Unknown (value)" if not recognized + """ + if channel_value is None: + return "Default" + + try: + # Try to find the channel in our enum + if use_short_names: + for channel in ShortChannel: + if channel.value == channel_value: + return channel.name + else: + for channel in Channel: + if channel.value == channel_value: + # Convert the enum name to a more readable format + # Keep the underscores but capitalize each word + words = channel.name.split('_') + formatted_words = [word.capitalize() for word in words] + return ''.join(formatted_words) + + # If not found in our enum, return unknown with the value + return f"Unknown ({channel_value})" + except Exception: + return f"Unknown ({channel_value})" HARDWARE_PHOTOS = { - HardwareModel.HELTEC_HT62: "HELTEC_HT62.png", - HardwareModel.HELTEC_V2_0: "HELTEC_V2_0.png", - HardwareModel.HELTEC_V2_1: "HELTEC_V2_1.png", - HardwareModel.HELTEC_V3: "HELTEC_V3.png", - HardwareModel.HELTEC_WIRELESS_PAPER: "HELTEC_WIRELESS_PAPER.png", - HardwareModel.HELTEC_WIRELESS_PAPER_V1_0: "HELTEC_WIRELESS_PAPER_V1_0.png", - HardwareModel.HELTEC_WIRELESS_TRACKER: "HELTEC_WIRELESS_TRACKER.png", - HardwareModel.HELTEC_WIRELESS_TRACKER_V1_0: "HELTEC_WIRELESS_TRACKER_V1_0.png", - HardwareModel.HELTEC_WSL_V3: "HELTEC_WSL_V3.png", - HardwareModel.LILYGO_TBEAM_S3_CORE: "LILYGO_TBEAM_S3_CORE.png", - HardwareModel.NANO_G1_EXPLORER: "NANO_G1_EXPLORER.png", - HardwareModel.NANO_G2_ULTRA: "NANO_G2_ULTRA.png", - HardwareModel.NRF52_PROMICRO_DIY: "NRF52_PROMICRO_DIY.png", - HardwareModel.RAK11310: "RAK11310.png", - HardwareModel.RAK4631: "RAK4631.png", - HardwareModel.RP2040_LORA: "RP2040_LORA.png", - HardwareModel.RPI_PICO: "RPI_PICO.png", - HardwareModel.TBEAM: "TBEAM.png", - HardwareModel.TLORA_T3_S3: "TLORA_T3_S3.png", - HardwareModel.TLORA_V2_1_1P6: "TLORA_V2_1_1P6.png", - HardwareModel.T_DECK: "T_DECK.png", - HardwareModel.T_ECHO: "T_ECHO.png", - HardwareModel.T_WATCH_S3: "T_WATCH_S3.png", - HardwareModel.PRIVATE_HW: "PRIVATE_HW.png", - HardwareModel.PORTDUINO: "PORTDUINO.png", - HardwareModel.XIAO: "XIAO.png", - HardwareModel.TBEAM_V0P7: "TBEAM_V0P7.png", - HardwareModel.HELTEC_MESH_NODE_T114: "HELTEC_MESH_NODE_T114.png", - HardwareModel.HELTEC_CAPSULE_SENSOR_V3: "HELTEC_CAPSULE_SENSOR_V3.png", - HardwareModel.TRACKER_T1000_E: "TRACKER_T1000_E.png", - HardwareModel.RPI_PICO2: "RPI_PICO.png", - HardwareModel.NRF52840DK: "NRF52840DK.png" + HardwareModel.HELTEC_HT62: "HELTEC_HT62.webp", + HardwareModel.HELTEC_V2_0: "HELTEC_V2_0.webp", + HardwareModel.HELTEC_V2_1: "HELTEC_V2_1.webp", + HardwareModel.HELTEC_V3: "HELTEC_V3.webp", + HardwareModel.HELTEC_WIRELESS_PAPER: "HELTEC_WIRELESS_PAPER.webp", + HardwareModel.HELTEC_WIRELESS_PAPER_V1_0: "HELTEC_WIRELESS_PAPER_V1_0.webp", + HardwareModel.HELTEC_WIRELESS_TRACKER: "HELTEC_WIRELESS_TRACKER.webp", + HardwareModel.HELTEC_WIRELESS_TRACKER_V1_0: "HELTEC_WIRELESS_TRACKER_V1_0.webp", + HardwareModel.HELTEC_WSL_V3: "HELTEC_WSL_V3.webp", + HardwareModel.LILYGO_TBEAM_S3_CORE: "LILYGO_TBEAM_S3_CORE.webp", + HardwareModel.NANO_G1_EXPLORER: "NANO_G1_EXPLORER.webp", + HardwareModel.NANO_G2_ULTRA: "NANO_G2_ULTRA.webp", + HardwareModel.NRF52_PROMICRO_DIY: "NRF52_PROMICRO_DIY.webp", + HardwareModel.RAK11310: "RAK11310.webp", + HardwareModel.RAK4631: "RAK4631.webp", + HardwareModel.RP2040_LORA: "RP2040_LORA.webp", + HardwareModel.RPI_PICO: "RPI_PICO.webp", + HardwareModel.TBEAM: "TBEAM.webp", + HardwareModel.TLORA_T3_S3: "TLORA_T3_S3.webp", + HardwareModel.TLORA_V2_1_1P6: "TLORA_V2_1_1P6.webp", + HardwareModel.T_DECK: "T_DECK.webp", + HardwareModel.T_ECHO: "T_ECHO.webp", + HardwareModel.T_WATCH_S3: "T_WATCH_S3.webp", + HardwareModel.PRIVATE_HW: "PRIVATE_HW.webp", + HardwareModel.PORTDUINO: "PORTDUINO.webp", + HardwareModel.SEEED_XIAO_S3: "SEEED_XIAO_S3.webp", + HardwareModel.TBEAM_V0P7: "TBEAM_V0P7.webp", + HardwareModel.HELTEC_MESH_NODE_T114: "HELTEC_MESH_NODE_T114.webp", + HardwareModel.HELTEC_CAPSULE_SENSOR_V3: "HELTEC_CAPSULE_SENSOR_V3.webp", + HardwareModel.TRACKER_T1000_E: "TRACKER_T1000_E.webp", + HardwareModel.RPI_PICO2: "RPI_PICO.webp", + HardwareModel.NRF52840DK: "NRF52840DK.webp", + # Placeholders for all other models: + # HardwareModel.UNSET: "UNSET.webp", + HardwareModel.TLORA_V2: "TLORA_V2.webp", + HardwareModel.TLORA_V1: "TLORA_V1.webp", + HardwareModel.TLORA_V1_1P3: "TLORA_V1_1P3.webp", + HardwareModel.TLORA_V2_1_1P8: "TLORA_V2_1_1P8.webp", + HardwareModel.RAK11200: "RAK11200.webp", + HardwareModel.NANO_G1: "NANO_G1.webp", + HardwareModel.LORA_TYPE: "LORA_TYPE.webp", + HardwareModel.WIPHONE: "WIPHONE.webp", + HardwareModel.WIO_WM1110: "WIO_WM1110.webp", + HardwareModel.RAK2560: "RAK2560.webp", + HardwareModel.HELTEC_HRU_3601: "HELTEC_HRU_3601.webp", + HardwareModel.HELTEC_WIRELESS_BRIDGE: "HELTEC_WIRELESS_BRIDGE.webp", + HardwareModel.STATION_G1: "STATION_G1.webp", + ## HardwareModel.SENSELORA_RP2040: "SENSELORA_RP2040.webp", + HardwareModel.SENSELORA_S3: "SENSELORA_S3.webp", + HardwareModel.CANARYONE: "CANARYONE.webp", + HardwareModel.STATION_G2: "STATION_G2.webp", + HardwareModel.LORA_RELAY_V1: "LORA_RELAY_V1.webp", + ## HardwareModel.PPR: "PPR.webp", + ## HardwareModel.GENIEBLOCKS: "GENIEBLOCKS.webp", + ## HardwareModel.NRF52_UNKNOWN: "NRF52_UNKNOWN.webp", + ## HardwareModel.ANDROID_SIM: "ANDROID_SIM.webp", + ## HardwareModel.DIY_V1: "DIY_V1.webp", + HardwareModel.NRF52840_PCA10059: "NRF52840_PCA10059.webp", + ## HardwareModel.DR_DEV: "DR_DEV.webp", + ## HardwareModel.M5STACK: "M5STACK.webp", + ## HardwareModel.BETAFPV_2400_TX: "BETAFPV_2400_TX.webp", + ## HardwareModel.BETAFPV_900_NANO_TX: "BETAFPV_900_NANO_TX.webp", + HardwareModel.HELTEC_WIRELESS_PAPER_V1_0: "HELTEC_WIRELESS_PAPER_V1_0.webp", + HardwareModel.UNPHONE: "UNPHONE.webp", + ## HardwareModel.TD_LORAC: "TD_LORAC.webp", + # HardwareModel.CDEBYTE_EORA_S3: "CDEBYTE_EORA_S3.webp", + ## HardwareModel.TWC_MESH_V4: "TWC_MESH_V4.webp", + ## HardwareModel.NRF52_PROMICRO_DIY: "NRF52_PROMICRO_DIY.webp", + # HardwareModel.RADIOMASTER_900_BANDIT_NANO: "RADIOMASTER_900_BANDIT_NANO.webp", + HardwareModel.HELTEC_VISION_MASTER_T190: "HELTEC_VISION_MASTER_T190.webp", + HardwareModel.HELTEC_VISION_MASTER_E213: "HELTEC_VISION_MASTER_E213.webp", + HardwareModel.HELTEC_VISION_MASTER_E290: "HELTEC_VISION_MASTER_E290.webp", + HardwareModel.SENSECAP_INDICATOR: "SENSECAP_INDICATOR.webp", + HardwareModel.RAK3172: "RAK3172.webp", + HardwareModel.WIO_E5: "WIO_E5.webp", + ## HardwareModel.RADIOMASTER_900_BANDIT: "RADIOMASTER_900_BANDIT.webp", + HardwareModel.ME25LS01_4Y10TD: "ME25LS01_4Y10TD.webp", + HardwareModel.RP2040_FEATHER_RFM95: "RP2040_FEATHER_RFM95.webp", + HardwareModel.M5STACK_COREBASIC: "M5STACK_COREBASIC.webp", + HardwareModel.M5STACK_CORE2: "M5STACK_CORE2.webp", + HardwareModel.M5STACK_CORES3: "M5STACK_CORES3.webp", + HardwareModel.MS24SF1: "MS24SF1.webp", + HardwareModel.TLORA_C6: "TLORA_C6.webp", + HardwareModel.WISMESH_TAP: "WISMESH_TAP.webp", + HardwareModel.ROUTASTIC: "ROUTASTIC.webp", + ## HardwareModel.MESH_TAB: "MESH_TAB.webp", + ## HardwareModel.MESHLINK: "MESHLINK.webp", + HardwareModel.XIAO_NRF52_KIT: "XIAO_NRF52_KIT.webp", + HardwareModel.THINKNODE_M1: "THINKNODE_M1.webp", + HardwareModel.THINKNODE_M2: "THINKNODE_M2.webp", + HardwareModel.T_ETH_ELITE: "T_ETH_ELITE.webp", + HardwareModel.HELTEC_SENSOR_HUB: "HELTEC_SENSOR_HUB.webp", + HardwareModel.RESERVED_FRIED_CHICKEN: "RESERVED_FRIED_CHICKEN.webp", + HardwareModel.HELTEC_MESH_POCKET: "HELTEC_MESH_POCKET.webp", + HardwareModel.HELTEC_HT62: "HELTEC_HT62.webp", + HardwareModel.HELTEC_V2_0: "HELTEC_V2_0.webp", + HardwareModel.HELTEC_V2_1: "HELTEC_V2_1.webp", + HardwareModel.HELTEC_V3: "HELTEC_V3.webp", + HardwareModel.HELTEC_WIRELESS_PAPER: "HELTEC_WIRELESS_PAPER.webp", + HardwareModel.HELTEC_WIRELESS_PAPER_V1_0: "HELTEC_WIRELESS_PAPER_V1_0.webp", + HardwareModel.HELTEC_WIRELESS_TRACKER: "HELTEC_WIRELESS_TRACKER.webp", + HardwareModel.HELTEC_WIRELESS_TRACKER_V1_0: "HELTEC_WIRELESS_TRACKER_V1_0.webp", + HardwareModel.HELTEC_WSL_V3: "HELTEC_WSL_V3.webp", + HardwareModel.LILYGO_TBEAM_S3_CORE: "LILYGO_TBEAM_S3_CORE.webp", + HardwareModel.NANO_G1_EXPLORER: "NANO_G1_EXPLORER.webp", + HardwareModel.NANO_G2_ULTRA: "NANO_G2_ULTRA.webp", + HardwareModel.NRF52_PROMICRO_DIY: "NRF52_PROMICRO_DIY.webp", + HardwareModel.RAK11310: "RAK11310.webp", + HardwareModel.RAK4631: "RAK4631.webp", + HardwareModel.RP2040_LORA: "RP2040_LORA.webp", + HardwareModel.RPI_PICO: "RPI_PICO.webp", + HardwareModel.TBEAM: "TBEAM.webp", + HardwareModel.TLORA_T3_S3: "TLORA_T3_S3.webp", + HardwareModel.TLORA_V2_1_1P6: "TLORA_V2_1_1P6.webp", + HardwareModel.T_DECK: "T_DECK.webp", + HardwareModel.T_ECHO: "T_ECHO.webp", + HardwareModel.T_WATCH_S3: "T_WATCH_S3.webp", + HardwareModel.PRIVATE_HW: "PRIVATE_HW.webp", + HardwareModel.PORTDUINO: "PORTDUINO.webp", + HardwareModel.SEEED_XIAO_S3: "SEEED_XIAO_S3.webp", + HardwareModel.TBEAM_V0P7: "TBEAM_V0P7.webp", + HardwareModel.HELTEC_MESH_NODE_T114: "HELTEC_MESH_NODE_T114.webp", + HardwareModel.HELTEC_CAPSULE_SENSOR_V3: "HELTEC_CAPSULE_SENSOR_V3.webp", + HardwareModel.TRACKER_T1000_E: "TRACKER_T1000_E.webp", + HardwareModel.RPI_PICO2: "RPI_PICO.webp", + HardwareModel.NRF52840DK: "NRF52840DK.webp", + # Placeholders for all other models: + # HardwareModel.UNSET: "UNSET.webp", + HardwareModel.TLORA_V2: "TLORA_V2.webp", + HardwareModel.TLORA_V1: "TLORA_V1.webp", + HardwareModel.TLORA_V1_1P3: "TLORA_V1_1P3.webp", + HardwareModel.TLORA_V2_1_1P8: "TLORA_V2_1_1P8.webp", + HardwareModel.RAK11200: "RAK11200.webp", + HardwareModel.NANO_G1: "NANO_G1.webp", + HardwareModel.LORA_TYPE: "LORA_TYPE.webp", + HardwareModel.WIPHONE: "WIPHONE.webp", + HardwareModel.WIO_WM1110: "WIO_WM1110.webp", + HardwareModel.RAK2560: "RAK2560.webp", + HardwareModel.HELTEC_HRU_3601: "HELTEC_HRU_3601.webp", + HardwareModel.HELTEC_WIRELESS_BRIDGE: "HELTEC_WIRELESS_BRIDGE.webp", + HardwareModel.STATION_G1: "STATION_G1.webp", + ## HardwareModel.SENSELORA_RP2040: "SENSELORA_RP2040.webp", + HardwareModel.SENSELORA_S3: "SENSELORA_S3.webp", + HardwareModel.CANARYONE: "CANARYONE.webp", + HardwareModel.STATION_G2: "STATION_G2.webp", + HardwareModel.LORA_RELAY_V1: "LORA_RELAY_V1.webp", + ## HardwareModel.PPR: "PPR.webp", + ## HardwareModel.GENIEBLOCKS: "GENIEBLOCKS.webp", + ## HardwareModel.NRF52_UNKNOWN: "NRF52_UNKNOWN.webp", + ## HardwareModel.ANDROID_SIM: "ANDROID_SIM.webp", + ## HardwareModel.DIY_V1: "DIY_V1.webp", + HardwareModel.NRF52840_PCA10059: "NRF52840_PCA10059.webp", + ## HardwareModel.DR_DEV: "DR_DEV.webp", + ## HardwareModel.M5STACK: "M5STACK.webp", + ## HardwareModel.BETAFPV_2400_TX: "BETAFPV_2400_TX.webp", + ## HardwareModel.BETAFPV_900_NANO_TX: "BETAFPV_900_NANO_TX.webp", + HardwareModel.HELTEC_WIRELESS_PAPER_V1_0: "HELTEC_WIRELESS_PAPER_V1_0.webp", + HardwareModel.UNPHONE: "UNPHONE.webp", + ## HardwareModel.TD_LORAC: "TD_LORAC.webp", + # HardwareModel.CDEBYTE_EORA_S3: "CDEBYTE_EORA_S3.webp", + ## HardwareModel.TWC_MESH_V4: "TWC_MESH_V4.webp", + ## HardwareModel.NRF52_PROMICRO_DIY: "NRF52_PROMICRO_DIY.webp", + # HardwareModel.RADIOMASTER_900_BANDIT_NANO: "RADIOMASTER_900_BANDIT_NANO.webp", + HardwareModel.HELTEC_VISION_MASTER_T190: "HELTEC_VISION_MASTER_T190.webp", + HardwareModel.HELTEC_VISION_MASTER_E213: "HELTEC_VISION_MASTER_E213.webp", + HardwareModel.HELTEC_VISION_MASTER_E290: "HELTEC_VISION_MASTER_E290.webp", + HardwareModel.SENSECAP_INDICATOR: "SENSECAP_INDICATOR.webp", + HardwareModel.RAK3172: "RAK3172.webp", + HardwareModel.WIO_E5: "WIO_E5.webp", + ## HardwareModel.RADIOMASTER_900_BANDIT: "RADIOMASTER_900_BANDIT.webp", + HardwareModel.ME25LS01_4Y10TD: "ME25LS01_4Y10TD.webp", + HardwareModel.RP2040_FEATHER_RFM95: "RP2040_FEATHER_RFM95.webp", + HardwareModel.M5STACK_COREBASIC: "M5STACK_COREBASIC.webp", + HardwareModel.M5STACK_CORE2: "M5STACK_CORE2.webp", + HardwareModel.M5STACK_CORES3: "M5STACK_CORES3.webp", + HardwareModel.MS24SF1: "MS24SF1.webp", + HardwareModel.TLORA_C6: "TLORA_C6.webp", + HardwareModel.WISMESH_TAP: "WISMESH_TAP.webp", + HardwareModel.ROUTASTIC: "ROUTASTIC.webp", + ## HardwareModel.MESH_TAB: "MESH_TAB.webp", + ## HardwareModel.MESHLINK: "MESHLINK.webp", + HardwareModel.XIAO_NRF52_KIT: "XIAO_NRF52_KIT.webp", + HardwareModel.THINKNODE_M1: "THINKNODE_M1.webp", + HardwareModel.THINKNODE_M2: "THINKNODE_M2.webp", + HardwareModel.T_ETH_ELITE: "T_ETH_ELITE.webp", + HardwareModel.HELTEC_SENSOR_HUB: "HELTEC_SENSOR_HUB.webp", + HardwareModel.RESERVED_FRIED_CHICKEN: "RESERVED_FRIED_CHICKEN.webp", + HardwareModel.HELTEC_MESH_POCKET: "HELTEC_MESH_POCKET.webp", + HardwareModel.HELTEC_HT62: "HELTEC_HT62.webp", + HardwareModel.HELTEC_V2_0: "HELTEC_V2_0.webp", + HardwareModel.HELTEC_V2_1: "HELTEC_V2_1.webp", + HardwareModel.HELTEC_V3: "HELTEC_V3.webp", + HardwareModel.HELTEC_WIRELESS_PAPER: "HELTEC_WIRELESS_PAPER.webp", + HardwareModel.HELTEC_WIRELESS_PAPER_V1_0: "HELTEC_WIRELESS_PAPER_V1_0.webp", + HardwareModel.HELTEC_WIRELESS_TRACKER: "HELTEC_WIRELESS_TRACKER.webp", + HardwareModel.HELTEC_WIRELESS_TRACKER_V1_0: "HELTEC_WIRELESS_TRACKER_V1_0.webp", + HardwareModel.HELTEC_WSL_V3: "HELTEC_WSL_V3.webp", + HardwareModel.LILYGO_TBEAM_S3_CORE: "LILYGO_TBEAM_S3_CORE.webp", + HardwareModel.NANO_G1_EXPLORER: "NANO_G1_EXPLORER.webp", + HardwareModel.NANO_G2_ULTRA: "NANO_G2_ULTRA.webp", + HardwareModel.NRF52_PROMICRO_DIY: "NRF52_PROMICRO_DIY.webp", + HardwareModel.RAK11310: "RAK11310.webp", + HardwareModel.RAK4631: "RAK4631.webp", + HardwareModel.RP2040_LORA: "RP2040_LORA.webp", + HardwareModel.RPI_PICO: "RPI_PICO.webp", + HardwareModel.TBEAM: "TBEAM.webp", + HardwareModel.TLORA_T3_S3: "TLORA_T3_S3.webp", + HardwareModel.TLORA_V2_1_1P6: "TLORA_V2_1_1P6.webp", + HardwareModel.T_DECK: "T_DECK.webp", + HardwareModel.T_ECHO: "T_ECHO.webp", + HardwareModel.T_WATCH_S3: "T_WATCH_S3.webp", + HardwareModel.PRIVATE_HW: "PRIVATE_HW.webp", + HardwareModel.PORTDUINO: "PORTDUINO.webp", + HardwareModel.SEEED_XIAO_S3: "SEEED_XIAO_S3.webp", + HardwareModel.TBEAM_V0P7: "TBEAM_V0P7.webp", + HardwareModel.HELTEC_MESH_NODE_T114: "HELTEC_MESH_NODE_T114.webp", + HardwareModel.HELTEC_CAPSULE_SENSOR_V3: "HELTEC_CAPSULE_SENSOR_V3.webp", + HardwareModel.TRACKER_T1000_E: "TRACKER_T1000_E.webp", + HardwareModel.RPI_PICO2: "RPI_PICO.webp", + HardwareModel.NRF52840DK: "NRF52840DK.webp", + # Placeholders for all other models: + # HardwareModel.UNSET: "UNSET.webp", + HardwareModel.TLORA_V2: "TLORA_V2.webp", + HardwareModel.TLORA_V1: "TLORA_V1.webp", + HardwareModel.TLORA_V1_1P3: "TLORA_V1_1P3.webp", + HardwareModel.TLORA_V2_1_1P8: "TLORA_V2_1_1P8.webp", + HardwareModel.RAK11200: "RAK11200.webp", + HardwareModel.NANO_G1: "NANO_G1.webp", + HardwareModel.LORA_TYPE: "LORA_TYPE.webp", + HardwareModel.WIPHONE: "WIPHONE.webp", + HardwareModel.WIO_WM1110: "WIO_WM1110.webp", + HardwareModel.RAK2560: "RAK2560.webp", + HardwareModel.HELTEC_HRU_3601: "HELTEC_HRU_3601.webp", + HardwareModel.HELTEC_WIRELESS_BRIDGE: "HELTEC_WIRELESS_BRIDGE.webp", + HardwareModel.STATION_G1: "STATION_G1.webp", + ## HardwareModel.SENSELORA_RP2040: "SENSELORA_RP2040.webp", + HardwareModel.SENSELORA_S3: "SENSELORA_S3.webp", + HardwareModel.CANARYONE: "CANARYONE.webp", + HardwareModel.STATION_G2: "STATION_G2.webp", + HardwareModel.LORA_RELAY_V1: "LORA_RELAY_V1.webp", + ## HardwareModel.PPR: "PPR.webp", + ## HardwareModel.GENIEBLOCKS: "GENIEBLOCKS.webp", + ## HardwareModel.NRF52_UNKNOWN: "NRF52_UNKNOWN.webp", + ## HardwareModel.ANDROID_SIM: "ANDROID_SIM.webp", + ## HardwareModel.DIY_V1: "DIY_V1.webp", + HardwareModel.NRF52840_PCA10059: "NRF52840_PCA10059.webp", + ## HardwareModel.DR_DEV: "DR_DEV.webp", + ## HardwareModel.M5STACK: "M5STACK.webp", + ## HardwareModel.BETAFPV_2400_TX: "BETAFPV_2400_TX.webp", + ## HardwareModel.BETAFPV_900_NANO_TX: "BETAFPV_900_NANO_TX.webp", + HardwareModel.HELTEC_WIRELESS_PAPER_V1_0: "HELTEC_WIRELESS_PAPER_V1_0.webp", + HardwareModel.UNPHONE: "UNPHONE.webp", + ## HardwareModel.TD_LORAC: "TD_LORAC.webp", + # HardwareModel.CDEBYTE_EORA_S3: "CDEBYTE_EORA_S3.webp", + ## HardwareModel.TWC_MESH_V4: "TWC_MESH_V4.webp", + ## HardwareModel.NRF52_PROMICRO_DIY: "NRF52_PROMICRO_DIY.webp", + # HardwareModel.RADIOMASTER_900_BANDIT_NANO: "RADIOMASTER_900_BANDIT_NANO.webp", + HardwareModel.HELTEC_VISION_MASTER_T190: "HELTEC_VISION_MASTER_T190.webp", + HardwareModel.HELTEC_VISION_MASTER_E213: "HELTEC_VISION_MASTER_E213.webp", + HardwareModel.HELTEC_VISION_MASTER_E290: "HELTEC_VISION_MASTER_E290.webp", + HardwareModel.SENSECAP_INDICATOR: "SENSECAP_INDICATOR.webp", + HardwareModel.RAK3172: "RAK3172.webp", + HardwareModel.WIO_E5: "WIO_E5.webp", + ## HardwareModel.RADIOMASTER_900_BANDIT: "RADIOMASTER_900_BANDIT.webp", + HardwareModel.ME25LS01_4Y10TD: "ME25LS01_4Y10TD.webp", + HardwareModel.RP2040_FEATHER_RFM95: "RP2040_FEATHER_RFM95.webp", + HardwareModel.M5STACK_COREBASIC: "M5STACK_COREBASIC.webp", + HardwareModel.M5STACK_CORE2: "M5STACK_CORE2.webp", + HardwareModel.M5STACK_CORES3: "M5STACK_CORES3.webp", + HardwareModel.MS24SF1: "MS24SF1.webp", + HardwareModel.TLORA_C6: "TLORA_C6.webp", + HardwareModel.WISMESH_TAP: "WISMESH_TAP.webp", + HardwareModel.ROUTASTIC: "ROUTASTIC.webp", + ## HardwareModel.MESH_TAB: "MESH_TAB.webp", + ## HardwareModel.MESHLINK: "MESHLINK.webp", + HardwareModel.XIAO_NRF52_KIT: "XIAO_NRF52_KIT.webp", + HardwareModel.THINKNODE_M1: "THINKNODE_M1.webp", + HardwareModel.THINKNODE_M2: "THINKNODE_M2.webp", + HardwareModel.T_ETH_ELITE: "T_ETH_ELITE.webp", + HardwareModel.HELTEC_SENSOR_HUB: "HELTEC_SENSOR_HUB.webp", + HardwareModel.RESERVED_FRIED_CHICKEN: "RESERVED_FRIED_CHICKEN.webp", + HardwareModel.HELTEC_MESH_POCKET: "HELTEC_MESH_POCKET.webp", + HardwareModel.HELTEC_HT62: "HELTEC_HT62.webp", + HardwareModel.HELTEC_V2_0: "HELTEC_V2_0.webp", + HardwareModel.HELTEC_V2_1: "HELTEC_V2_1.webp", + HardwareModel.HELTEC_V3: "HELTEC_V3.webp", + HardwareModel.HELTEC_WIRELESS_PAPER: "HELTEC_WIRELESS_PAPER.webp", + HardwareModel.HELTEC_WIRELESS_PAPER_V1_0: "HELTEC_WIRELESS_PAPER_V1_0.webp", + HardwareModel.HELTEC_WIRELESS_TRACKER: "HELTEC_WIRELESS_TRACKER.webp", + HardwareModel.HELTEC_WIRELESS_TRACKER_V1_0: "HELTEC_WIRELESS_TRACKER_V1_0.webp", + HardwareModel.HELTEC_WSL_V3: "HELTEC_WSL_V3.webp", + HardwareModel.LILYGO_TBEAM_S3_CORE: "LILYGO_TBEAM_S3_CORE.webp", + HardwareModel.NANO_G1_EXPLORER: "NANO_G1_EXPLORER.webp", + HardwareModel.NANO_G2_ULTRA: "NANO_G2_ULTRA.webp", + HardwareModel.NRF52_PROMICRO_DIY: "NRF52_PROMICRO_DIY.webp", + HardwareModel.RAK11310: "RAK11310.webp", + HardwareModel.RAK4631: "RAK4631.webp", + HardwareModel.RP2040_LORA: "RP2040_LORA.webp", + HardwareModel.RPI_PICO: "RPI_PICO.webp", + HardwareModel.TBEAM: "TBEAM.webp", + HardwareModel.TLORA_T3_S3: "TLORA_T3_S3.webp", + HardwareModel.TLORA_V2_1_1P6: "TLORA_V2_1_1P6.webp", + HardwareModel.T_DECK: "T_DECK.webp", + HardwareModel.T_ECHO: "T_ECHO.webp", + HardwareModel.T_WATCH_S3: "T_WATCH_S3.webp", + HardwareModel.PRIVATE_HW: "PRIVATE_HW.webp", + HardwareModel.PORTDUINO: "PORTDUINO.webp", + HardwareModel.SEEED_XIAO_S3: "SEEED_XIAO_S3.webp", + HardwareModel.TBEAM_V0P7: "TBEAM_V0P7.webp", + HardwareModel.HELTEC_MESH_NODE_T114: "HELTEC_MESH_NODE_T114.webp", + HardwareModel.HELTEC_CAPSULE_SENSOR_V3: "HELTEC_CAPSULE_SENSOR_V3.webp", + HardwareModel.TRACKER_T1000_E: "TRACKER_T1000_E.webp", + HardwareModel.RPI_PICO2: "RPI_PICO.webp", + HardwareModel.NRF52840DK: "NRF52840DK.webp", + # Placeholders for all other models: + # HardwareModel.UNSET: "UNSET.webp", + HardwareModel.TLORA_V2: "TLORA_V2.webp", + HardwareModel.TLORA_V1: "TLORA_V1.webp", + HardwareModel.TLORA_V1_1P3: "TLORA_V1_1P3.webp", + HardwareModel.TLORA_V2_1_1P8: "TLORA_V2_1_1P8.webp", + HardwareModel.RAK11200: "RAK11200.webp", + HardwareModel.NANO_G1: "NANO_G1.webp", + HardwareModel.LORA_TYPE: "LORA_TYPE.webp", + HardwareModel.WIPHONE: "WIPHONE.webp", + HardwareModel.WIO_WM1110: "WIO_WM1110.webp", + HardwareModel.RAK2560: "RAK2560.webp", + HardwareModel.HELTEC_HRU_3601: "HELTEC_HRU_3601.webp", + HardwareModel.HELTEC_WIRELESS_BRIDGE: "HELTEC_WIRELESS_BRIDGE.webp", + HardwareModel.STATION_G1: "STATION_G1.webp", + ## HardwareModel.SENSELORA_RP2040: "SENSELORA_RP2040.webp", + HardwareModel.SENSELORA_S3: "SENSELORA_S3.webp", + HardwareModel.CANARYONE: "CANARYONE.webp", + HardwareModel.STATION_G2: "STATION_G2.webp", + HardwareModel.LORA_RELAY_V1: "LORA_RELAY_V1.webp", + ## HardwareModel.PPR: "PPR.webp", + ## HardwareModel.GENIEBLOCKS: "GENIEBLOCKS.webp", + ## HardwareModel.NRF52_UNKNOWN: "NRF52_UNKNOWN.webp", + ## HardwareModel.ANDROID_SIM: "ANDROID_SIM.webp", + ## HardwareModel.DIY_V1: "DIY_V1.webp", + HardwareModel.NRF52840_PCA10059: "NRF52840_PCA10059.webp", + ## HardwareModel.DR_DEV: "DR_DEV.webp", + ## HardwareModel.M5STACK: "M5STACK.webp", + ## HardwareModel.BETAFPV_2400_TX: "BETAFPV_2400_TX.webp", + ## HardwareModel.BETAFPV_900_NANO_TX: "BETAFPV_900_NANO_TX.webp", + HardwareModel.HELTEC_WIRELESS_PAPER_V1_0: "HELTEC_WIRELESS_PAPER_V1_0.webp", + HardwareModel.UNPHONE: "UNPHONE.webp", + ## HardwareModel.TD_LORAC: "TD_LORAC.webp", + HardwareModel.CDEBYTE_EORA_S3: "CDEBYTE_EORA_S3.webp", + ## HardwareModel.TWC_MESH_V4: "TWC_MESH_V4.webp", + ## HardwareModel.NRF52_PROMICRO_DIY: "NRF52_PROMICRO_DIY.webp", + # HardwareModel.RADIOMASTER_900_BANDIT_NANO: "RADIOMASTER_900_BANDIT_NANO.webp", + HardwareModel.HELTEC_VISION_MASTER_T190: "HELTEC_VISION_MASTER_T190.webp", + HardwareModel.HELTEC_VISION_MASTER_E213: "HELTEC_VISION_MASTER_E213.webp", + HardwareModel.HELTEC_VISION_MASTER_E290: "HELTEC_VISION_MASTER_E290.webp", + HardwareModel.SENSECAP_INDICATOR: "SENSECAP_INDICATOR.webp", + HardwareModel.RAK3172: "RAK3172.webp", + HardwareModel.WIO_E5: "WIO_E5.webp", + ## HardwareModel.RADIOMASTER_900_BANDIT: "RADIOMASTER_900_BANDIT.webp", + HardwareModel.ME25LS01_4Y10TD: "ME25LS01_4Y10TD.webp", + HardwareModel.RP2040_FEATHER_RFM95: "RP2040_FEATHER_RFM95.webp", + HardwareModel.M5STACK_COREBASIC: "M5STACK_COREBASIC.webp", + HardwareModel.M5STACK_CORE2: "M5STACK_CORE2.webp", + HardwareModel.M5STACK_CORES3: "M5STACK_CORES3.webp", + HardwareModel.MS24SF1: "MS24SF1.webp", + HardwareModel.TLORA_C6: "TLORA_C6.webp", + HardwareModel.WISMESH_TAP: "WISMESH_TAP.webp", + HardwareModel.ROUTASTIC: "ROUTASTIC.webp", + ## HardwareModel.MESH_TAB: "MESH_TAB.webp", + ## HardwareModel.MESHLINK: "MESHLINK.webp", + HardwareModel.XIAO_NRF52_KIT: "XIAO_NRF52_KIT.webp", + HardwareModel.THINKNODE_M1: "THINKNODE_M1.webp", + HardwareModel.THINKNODE_M2: "THINKNODE_M2.webp", + HardwareModel.T_ETH_ELITE: "T_ETH_ELITE.webp", + HardwareModel.HELTEC_SENSOR_HUB: "HELTEC_SENSOR_HUB.webp", + HardwareModel.RESERVED_FRIED_CHICKEN: "RESERVED_FRIED_CHICKEN.webp", + HardwareModel.HELTEC_MESH_POCKET: "HELTEC_MESH_POCKET.webp", + HardwareModel.SEEED_SOLAR_NODE: "SEEED_SOLAR_NODE.webp", + HardwareModel.NOMADSTAR_METEOR_PRO: "NOMADSTAR_METEOR_PRO.webp", + # HardwareModel.CROWPANEL: "CROWPANEL.webp", + ## HardwareModel.LINK_32: "LINK_32.webp", + HardwareModel.SEEED_WIO_TRACKER_L1: "SEEED_WIO_TRACKER_L1.webp", + ## HardwareModel.SEEED_WIO_TRACKER_L1_EINK: "SEEED_WIO_TRACKER_L1_EINK.webp", + ## HardwareModel.QWANTZ_TINY_ARMS: "QWANTZ_TINY_ARMS.webp", + HardwareModel.T_DECK_PRO: "T_DECK_PRO.webp", + HardwareModel.T_LORA_PAGER: "T_LORA_PAGER.webp", + ## HardwareModel.GAT562_MESH_TRIAL_TRACKER: "GAT562_MESH_TRIAL_TRACKER.webp", } + +def validate_hardware_model(hw_model_value): + """ + Strictly validate a hardware model value against the HardwareModel enum. + + Args: + hw_model_value: The numeric hardware model value + + Returns: + The matching HardwareModel enum value + + Raises: + ValueError: If the hardware model value is not in the enum + """ + if hw_model_value is None: + raise ValueError("Hardware model value cannot be None") + + for model in HardwareModel: + if model.value == hw_model_value: + return model + + raise ValueError(f"Invalid hardware model value: {hw_model_value}") + +def get_hardware_model_name(hw_model_value): + """ + Convert a hardware model value to a human-readable name. + + Args: + hw_model_value: The numeric hardware model value + + Returns: + A human-readable hardware model name or "Unknown (value)" if not recognized + """ + try: + model = validate_hardware_model(hw_model_value) + return model.name.replace('_', ' ') + except ValueError: + return f"Unknown ({hw_model_value})" + + +def get_modem_preset_name(modem_preset_value): + """ + Convert a modem preset value to a human-readable name. + + Args: + modem_preset_value: The numeric modem preset value + + Returns: + A human-readable modem preset name or "Unknown (value)" if not recognized + """ + if modem_preset_value is None: + return "Unknown" + + try: + for preset in ModemPreset: + if preset.value == modem_preset_value: + # Convert the enum name to a more readable format + words = preset.name.split('_') + formatted_words = [word.capitalize() for word in words] + return ' '.join(formatted_words) + + # If not found in our enum, return unknown with the value + return f"Unknown ({modem_preset_value})" + except Exception: + return f"Unknown ({modem_preset_value})" + +def get_routing_error_name(error_value): + """ + Convert a routing error value to a human-readable name. + + Args: + error_value: The numeric routing error value + + Returns: + A human-readable routing error name or "Unknown (value)" if not recognized + """ + if error_value is None: + return "None" + + try: + for error in RoutingError: + if error.value == error_value: + return error.name + return f"Unknown ({error_value})" + except Exception: + return f"Unknown ({error_value})" + +def get_routing_error_description(error_value): + """ + Get a human-readable description of a routing error. + + Args: + error_value: The numeric routing error value + + Returns: + A human-readable description of the routing error + """ + if error_value is None: + return "No error" + + error_descriptions = { + RoutingError.NONE: "Message sent successfully", + RoutingError.NO_ROUTE: "No path available to reach the destination", + RoutingError.GOT_NAK: "Message was rejected by a relay node", + RoutingError.TIMEOUT: "Message timed out during transmission", + RoutingError.NO_INTERFACE: "No communication interface available for delivery", + RoutingError.MAX_RETRANSMIT: "Maximum retry attempts exceeded", + RoutingError.NO_CHANNEL: "Requested channel is not available or disabled", + RoutingError.TOO_LARGE: "Message is too large to send", + RoutingError.NO_RESPONSE: "Destination received the message but no service responded", + RoutingError.DUTY_CYCLE_LIMIT: "Cannot send due to radio duty cycle restrictions", + RoutingError.BAD_REQUEST: "Destination received the message but considered it invalid", + RoutingError.NOT_AUTHORIZED: "Message sent on wrong channel or lacks proper authorization", + RoutingError.PKI_FAILED: "Encrypted transport failed - message not sent", + RoutingError.PKI_UNKNOWN_PUBKEY: "Destination has no encryption key available", + RoutingError.ADMIN_BAD_SESSION_KEY: "Admin message uses invalid or expired session key", + RoutingError.ADMIN_PUBLIC_KEY_UNAUTHORIZED: "Admin message from unauthorized sender", + RoutingError.RATE_LIMIT_EXCEEDED: "Message blocked by airtime fairness limits" + } + + try: + for error in RoutingError: + if error.value == error_value: + return error_descriptions.get(error, f"Unknown error ({error_value})") + return f"Unknown error ({error_value})" + except Exception: + return f"Unknown error ({error_value})" + +def get_rebroadcast_mode_name(rebroadcast_mode_value): + """ + Convert a rebroadcast mode value to a human-readable name. + + Args: + rebroadcast_mode_value: The numeric rebroadcast mode value + + Returns: + A human-readable rebroadcast mode name or "Unknown (value)" if not recognized + """ + if rebroadcast_mode_value is None: + return "Unknown" + + try: + for mode in RebroadcastMode: + if mode.value == rebroadcast_mode_value: + # Convert the enum name to a more readable format + words = mode.name.split('_') + formatted_words = [word.capitalize() for word in words] + return ' '.join(formatted_words) + + # If not found in our enum, return unknown with the value + return f"Unknown ({rebroadcast_mode_value})" + except Exception: + return f"Unknown ({rebroadcast_mode_value})" + +def get_buzzer_mode_name(buzzer_mode_value): + """ + Convert a buzzer mode value to a human-readable name. + + Args: + buzzer_mode_value: The numeric buzzer mode value + + Returns: + A human-readable buzzer mode name or "Unknown (value)" if not recognized + """ + if buzzer_mode_value is None: + return "Unknown" + + try: + for mode in BuzzerMode: + if mode.value == buzzer_mode_value: + # Convert the enum name to a more readable format + words = mode.name.split('_') + formatted_words = [word.capitalize() for word in words] + return ' '.join(formatted_words) + + # If not found in our enum, return unknown with the value + return f"Unknown ({buzzer_mode_value})" + except Exception: + return f"Unknown ({buzzer_mode_value})" diff --git a/migrations/__init__.py b/migrations/__init__.py index e69de29b..82088024 100644 --- a/migrations/__init__.py +++ b/migrations/__init__.py @@ -0,0 +1,33 @@ +# Import all migrations here +from .add_traceroute_improvements import migrate as add_traceroute_improvements +from .add_ts_uplink import migrate as add_ts_uplink +from .add_traceroute_snr import migrate as add_traceroute_snr +from .add_channel_info import migrate as add_channel_info +from .add_message_reception import migrate as add_message_reception +from .add_traceroute_id import migrate as add_traceroute_id +from .add_positionlog_log_id import migrate as add_positionlog_log_id +from .add_message_map_indexes import migrate as add_message_map_indexes +from .add_relay_node_to_reception import migrate as add_relay_node_to_reception +from .add_relay_edges_table import migrate as add_relay_edges_table +from .add_message_reception_ts_created import migrate as add_message_reception_ts_created +from .add_mapreport_fields import migrate as add_mapreport_fields +from .add_telemetry_packet_id import migrate as add_telemetry_packet_id +from .add_routing_messages_table import migrate as add_routing_messages_table + +# List of migrations to run in order +MIGRATIONS = [ + add_traceroute_improvements, + add_ts_uplink, + add_traceroute_snr, + add_channel_info, + add_message_reception, + add_traceroute_id, + add_positionlog_log_id, + add_message_map_indexes, + add_relay_node_to_reception, + add_relay_edges_table, + add_message_reception_ts_created, + add_mapreport_fields, + add_telemetry_packet_id, + add_routing_messages_table, +] diff --git a/migrations/add_channel_info.py b/migrations/add_channel_info.py new file mode 100644 index 00000000..a1e84290 --- /dev/null +++ b/migrations/add_channel_info.py @@ -0,0 +1,191 @@ +import logging + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def migrate(db): + cursor = None + try: + cursor = db.cursor() + clear_unread_results(cursor) + + # List of tables that should have channel information + tables_to_update = [ + 'telemetry', + 'position', + 'neighborinfo', + 'meshlog', + 'traceroute' + ] + + for table_name in tables_to_update: + logging.info(f"Processing table: {table_name}") + + # Check if table exists + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.TABLES + WHERE TABLE_NAME = %s + """, (table_name,)) + table_exists = cursor.fetchone()[0] > 0 + + if not table_exists: + logging.info(f"Table {table_name} does not exist, skipping...") + continue + + # Check if channel column exists + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.COLUMNS + WHERE TABLE_NAME = %s + AND COLUMN_NAME = 'channel' + """, (table_name,)) + has_channel = cursor.fetchone()[0] > 0 + + if not has_channel: + logging.info(f"Adding channel column to {table_name} table...") + try: + cursor.execute(f""" + ALTER TABLE {table_name} + ADD COLUMN channel INT UNSIGNED NULL, + ADD INDEX idx_{table_name}_channel (channel) + """) + db.commit() + logging.info(f"Added channel column to {table_name} successfully") + except Exception as e: + logging.error(f"Error adding channel column to {table_name}: {e}") + raise + else: + logging.info(f"Channel column already exists in {table_name} table") + # Check if index exists + index_name = f"idx_{table_name}_channel" + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.STATISTICS + WHERE TABLE_NAME = %s + AND INDEX_NAME = %s + """, (table_name, index_name)) + has_index = cursor.fetchone()[0] > 0 + + if not has_index: + logging.info(f"Adding index on channel column for {table_name}...") + try: + cursor.execute(f""" + ALTER TABLE {table_name} + ADD INDEX {index_name} (channel) + """) + db.commit() + logging.info(f"Added channel index to {table_name} successfully") + except Exception as e: + logging.error(f"Error adding channel index to {table_name}: {e}") + raise + else: + logging.info(f"Channel index already exists in {table_name} table") + + # Special handling for meshlog table - extract channel from JSON if possible + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.TABLES + WHERE TABLE_NAME = 'meshlog' + """) + meshlog_exists = cursor.fetchone()[0] > 0 + + if meshlog_exists: + # Ensure meshlog table has channel column (in case it wasn't added in the loop) + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.COLUMNS + WHERE TABLE_NAME = 'meshlog' + AND COLUMN_NAME = 'channel' + """, ()) + has_channel_column = cursor.fetchone()[0] > 0 + + if not has_channel_column: + logging.info("Adding channel column to meshlog table...") + cursor.execute(""" + ALTER TABLE meshlog + ADD COLUMN channel INT UNSIGNED NULL, + ADD INDEX idx_meshlog_channel (channel) + """) + db.commit() + logging.info("Added channel column to meshlog table successfully") + has_channel_column = True + + if has_channel_column: + # Double-check that channel column actually exists before using it + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.COLUMNS + WHERE TABLE_NAME = 'meshlog' + AND COLUMN_NAME = 'channel' + """) + channel_exists = cursor.fetchone()[0] > 0 + + # Additional check: try to actually query the channel column + if channel_exists: + try: + cursor.execute("SELECT COUNT(*) FROM meshlog WHERE channel IS NULL") + result = cursor.fetchone() + logging.info("Channel column is queryable in meshlog table") + except Exception as query_error: + logging.warning(f"Channel column exists in schema but not queryable: {query_error}") + channel_exists = False + + if channel_exists: + logging.info("Channel column confirmed to exist in meshlog table") + + # Clear any unread results before proceeding + clear_unread_results(cursor) + + # Check if we have any messages with channel information + try: + logging.info("Checking for messages with channel information...") + cursor.execute(""" + SELECT COUNT(*) + FROM meshlog + WHERE JSON_EXTRACT(message, '$.channel') IS NOT NULL + AND channel IS NULL + """) + result = cursor.fetchone() + has_channel_data = result[0] > 0 + logging.info(f"Found {has_channel_data} messages with channel information to extract") + + if has_channel_data: + logging.info("Extracting channel information from meshlog messages...") + cursor.execute(""" + UPDATE meshlog + SET channel = CAST(JSON_EXTRACT(message, '$.channel') AS UNSIGNED) + WHERE JSON_EXTRACT(message, '$.channel') IS NOT NULL + AND channel IS NULL + """) + db.commit() + logging.info("Extracted channel information successfully") + except Exception as e: + logging.error(f"Error during channel extraction from meshlog: {e}") + # Try to get more information about the table structure + try: + cursor.execute("DESCRIBE meshlog") + columns = cursor.fetchall() + logging.info(f"Meshlog table structure: {columns}") + except Exception as desc_error: + logging.error(f"Error describing meshlog table: {desc_error}") + raise + else: + logging.warning("Channel column was not found in meshlog table, skipping channel extraction") + + logging.info("Channel information migration completed successfully") + + except Exception as e: + logging.error(f"Error performing channel information migration: {e}") + raise + finally: + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_mapreport_fields.py b/migrations/add_mapreport_fields.py new file mode 100644 index 00000000..6ba31b4f --- /dev/null +++ b/migrations/add_mapreport_fields.py @@ -0,0 +1,106 @@ +import logging + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def migrate(db): + cursor = None + try: + cursor = db.cursor() + clear_unread_results(cursor) + + # Check if columns already exist + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.columns + WHERE table_schema = DATABASE() + AND table_name = 'nodeinfo' + AND column_name = 'has_default_channel' + """) + has_default_channel_exists = cursor.fetchone()[0] > 0 + + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.columns + WHERE table_schema = DATABASE() + AND table_name = 'nodeinfo' + AND column_name = 'num_online_local_nodes' + """) + num_online_local_nodes_exists = cursor.fetchone()[0] > 0 + + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.columns + WHERE table_schema = DATABASE() + AND table_name = 'nodeinfo' + AND column_name = 'region' + """) + region_exists = cursor.fetchone()[0] > 0 + + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.columns + WHERE table_schema = DATABASE() + AND table_name = 'nodeinfo' + AND column_name = 'modem_preset' + """) + modem_preset_exists = cursor.fetchone()[0] > 0 + + # Add has_default_channel column if it doesn't exist + if not has_default_channel_exists: + cursor.execute(""" + ALTER TABLE nodeinfo + ADD COLUMN has_default_channel BOOLEAN NULL + """) + logging.info("Added has_default_channel column to nodeinfo table") + else: + logging.info("has_default_channel column already exists in nodeinfo table") + + # Add num_online_local_nodes column if it doesn't exist + if not num_online_local_nodes_exists: + cursor.execute(""" + ALTER TABLE nodeinfo + ADD COLUMN num_online_local_nodes INT UNSIGNED NULL + """) + logging.info("Added num_online_local_nodes column to nodeinfo table") + else: + logging.info("num_online_local_nodes column already exists in nodeinfo table") + + # Add region column if it doesn't exist + if not region_exists: + cursor.execute(""" + ALTER TABLE nodeinfo + ADD COLUMN region INT UNSIGNED NULL + """) + logging.info("Added region column to nodeinfo table") + else: + logging.info("region column already exists in nodeinfo table") + + # Add modem_preset column if it doesn't exist + if not modem_preset_exists: + cursor.execute(""" + ALTER TABLE nodeinfo + ADD COLUMN modem_preset INT UNSIGNED NULL + """) + logging.info("Added modem_preset column to nodeinfo table") + else: + logging.info("modem_preset column already exists in nodeinfo table") + + db.commit() + logging.info("Successfully added mapreport fields to nodeinfo table") + + except Exception as e: + logging.error(f"Failed to add mapreport fields to nodeinfo table: {e}") + db.rollback() + raise + finally: + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_message_map_indexes.py b/migrations/add_message_map_indexes.py new file mode 100644 index 00000000..5b892fd4 --- /dev/null +++ b/migrations/add_message_map_indexes.py @@ -0,0 +1,90 @@ +import logging + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def migrate(db): + cursor = None + try: + cursor = db.cursor() + clear_unread_results(cursor) + + # Check if positionlog index already exists (might have been added by add_positionlog_log_id) + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.statistics + WHERE table_schema = DATABASE() + AND table_name = 'positionlog' + AND index_name = 'idx_positionlog_id_ts' + """) + if cursor.fetchone()[0] == 0: + cursor.execute(""" + CREATE INDEX idx_positionlog_id_ts + ON positionlog(id, ts_created) + """) + logging.info("Added index idx_positionlog_id_ts to positionlog table") + else: + logging.info("Index idx_positionlog_id_ts already exists on positionlog table") + + # Check if message_reception table exists before adding indexes + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.TABLES + WHERE TABLE_NAME = 'message_reception' + """) + message_reception_exists = cursor.fetchone()[0] > 0 + logging.info(f"message_reception table exists (info_schema): {message_reception_exists}") + + if message_reception_exists: + # Double-check that we can actually access the table + try: + cursor.execute("SELECT COUNT(*) FROM message_reception LIMIT 1") + result = cursor.fetchone() + logging.info("message_reception table is accessible") + except Exception as table_error: + logging.warning(f"message_reception table exists in schema but not accessible: {table_error}") + message_reception_exists = False + + if message_reception_exists: + # Clear any unread results before proceeding + clear_unread_results(cursor) + + # Add index for reception lookups if it doesn't exist + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.statistics + WHERE table_schema = DATABASE() + AND table_name = 'message_reception' + AND index_name = 'idx_messagereception_message_receiver' + """) + if cursor.fetchone()[0] == 0: + cursor.execute(""" + CREATE INDEX idx_messagereception_message_receiver + ON message_reception(message_id, received_by_id) + """) + logging.info("Added index idx_messagereception_message_receiver to message_reception table") + else: + logging.info("Index idx_messagereception_message_receiver already exists on message_reception table") + else: + logging.info("message_reception table not accessible, skipping index creation") + else: + logging.info("message_reception table does not exist, skipping index creation") + + db.commit() + logging.info("Successfully added message map performance indexes") + + except Exception as e: + db.rollback() + logging.error(f"Failed to add message map indexes: {str(e)}") + raise + finally: + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_message_reception.py b/migrations/add_message_reception.py index 698c9db7..00c43f70 100644 --- a/migrations/add_message_reception.py +++ b/migrations/add_message_reception.py @@ -1,12 +1,19 @@ import logging import configparser +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + def migrate(db): - """ - Migrate database to add message_id and message_reception tracking - """ + cursor = None try: cursor = db.cursor() + clear_unread_results(cursor) # Ensure we're in the correct database cursor.execute("SELECT DATABASE()") @@ -40,6 +47,19 @@ def migrate(db): WHERE TABLE_NAME = 'message_reception' """) has_reception_table = cursor.fetchone()[0] > 0 + logging.info(f"message_reception table exists: {has_reception_table}") + + # Double-check that we can actually access the table + if has_reception_table: + # Clear any unread results before checking table accessibility + clear_unread_results(cursor) + try: + cursor.execute("SELECT COUNT(*) FROM message_reception LIMIT 1") + result = cursor.fetchone() + logging.info("message_reception table is accessible") + except Exception as table_error: + logging.warning(f"message_reception table exists in schema but not accessible: {table_error}") + has_reception_table = False if not has_reception_table: logging.info("Creating message_reception table...") @@ -54,7 +74,10 @@ def migrate(db): rx_rssi INTEGER, hop_limit INTEGER DEFAULT NULL, hop_start INTEGER DEFAULT NULL, - UNIQUE KEY unique_reception (message_id, received_by_id) + relay_node VARCHAR(4) DEFAULT NULL, + UNIQUE KEY unique_reception (message_id, received_by_id), + INDEX idx_messagereception_message_receiver (message_id, received_by_id), + INDEX idx_message_reception_relay_node (relay_node) ) """) db.commit() @@ -109,9 +132,39 @@ def migrate(db): logging.info("Migrating from hop_count to hop_limit/hop_start...") # No need to remove the hop_count column, just leave it for backward compatibility logging.info("Migration complete") + + # Check if the performance index exists + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.statistics + WHERE table_schema = DATABASE() + AND table_name = 'message_reception' + AND index_name = 'idx_messagereception_message_receiver' + """) + has_performance_index = cursor.fetchone()[0] > 0 + + if not has_performance_index: + logging.info("Adding performance index to message_reception table...") + try: + cursor.execute(""" + CREATE INDEX idx_messagereception_message_receiver + ON message_reception(message_id, received_by_id) + """) + db.commit() + logging.info("Added performance index to message_reception table successfully") + except Exception as index_error: + logging.warning(f"Could not add performance index to message_reception table: {index_error}") + # Don't raise the error, just log it and continue + else: + logging.info("Performance index already exists on message_reception table") except Exception as e: logging.error(f"Error performing migration: {e}") + db.rollback() raise finally: - cursor.close() \ No newline at end of file + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_message_reception_ts_created.py b/migrations/add_message_reception_ts_created.py new file mode 100644 index 00000000..237d3fd5 --- /dev/null +++ b/migrations/add_message_reception_ts_created.py @@ -0,0 +1,61 @@ +import logging + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def migrate(db): + """ + Add ts_created column to message_reception table for compatibility + """ + cursor = None + try: + cursor = db.cursor() + + # Clear any unread results before proceeding + clear_unread_results(cursor) + + # Check if ts_created column exists in message_reception table + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.COLUMNS + WHERE TABLE_NAME = 'message_reception' + AND COLUMN_NAME = 'ts_created' + """) + has_ts_created = cursor.fetchone()[0] > 0 + + if not has_ts_created: + logging.info("Adding ts_created column to message_reception table...") + cursor.execute(""" + ALTER TABLE message_reception + ADD COLUMN ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP + COMMENT 'Timestamp when the reception was recorded' + """) + + # Update existing records to use rx_time converted to timestamp + logging.info("Updating existing records with rx_time converted to ts_created...") + cursor.execute(""" + UPDATE message_reception + SET ts_created = FROM_UNIXTIME(rx_time) + WHERE rx_time IS NOT NULL AND ts_created IS NULL + """) + + db.commit() + logging.info("Added ts_created column successfully") + else: + logging.info("ts_created column already exists in message_reception table") + + except Exception as e: + logging.error(f"Error during ts_created migration: {e}") + db.rollback() + raise + finally: + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_positionlog_log_id.py b/migrations/add_positionlog_log_id.py new file mode 100644 index 00000000..7730e118 --- /dev/null +++ b/migrations/add_positionlog_log_id.py @@ -0,0 +1,62 @@ +import logging + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def migrate(db): + cursor = None + try: + cursor = db.cursor() + clear_unread_results(cursor) + # Check if log_id column already exists + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.COLUMNS + WHERE TABLE_NAME = 'positionlog' + AND COLUMN_NAME = 'log_id' + """) + column_exists = cursor.fetchone()[0] > 0 + + if not column_exists: + logging.info("Adding log_id column to positionlog table...") + # Drop old primary key + cursor.execute("ALTER TABLE positionlog DROP PRIMARY KEY") + # Add log_id as auto-increment primary key + cursor.execute(""" + ALTER TABLE positionlog + ADD COLUMN log_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST + """) + logging.info("Added log_id column as primary key to positionlog table.") + else: + logging.info("log_id column already exists in positionlog table.") + + # Add secondary index for (id, ts_created) if not exists + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.STATISTICS + WHERE TABLE_NAME = 'positionlog' + AND INDEX_NAME = 'idx_positionlog_id_ts' + """) + index_exists = cursor.fetchone()[0] > 0 + if not index_exists: + cursor.execute("CREATE INDEX idx_positionlog_id_ts ON positionlog(id, ts_created)") + logging.info("Added index idx_positionlog_id_ts on (id, ts_created) to positionlog table.") + else: + logging.info("Index idx_positionlog_id_ts already exists on positionlog table.") + + db.commit() + except Exception as e: + logging.error(f"Error during positionlog log_id migration: {e}") + db.rollback() + raise + finally: + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_relay_edges_table.py b/migrations/add_relay_edges_table.py new file mode 100644 index 00000000..c2e92959 --- /dev/null +++ b/migrations/add_relay_edges_table.py @@ -0,0 +1,31 @@ +import logging + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def migrate(db): + try: + cursor = db.cursor() + clear_unread_results(cursor) + cursor.execute(""" + CREATE TABLE IF NOT EXISTS relay_edges ( + from_node VARCHAR(8) NOT NULL, + relay_suffix VARCHAR(2) NOT NULL, + to_node VARCHAR(8) NOT NULL, + first_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + last_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + count INT DEFAULT 1, + PRIMARY KEY (from_node, relay_suffix, to_node) + ) + """) + db.commit() + logging.info("relay_edges table created successfully.") + cursor.close() + except Exception as e: + logging.error(f"Error creating relay_edges table: {e}") + logging.info("Continuing despite relay_edges table creation error") \ No newline at end of file diff --git a/migrations/add_relay_node_to_reception.py b/migrations/add_relay_node_to_reception.py new file mode 100644 index 00000000..f5ef770f --- /dev/null +++ b/migrations/add_relay_node_to_reception.py @@ -0,0 +1,123 @@ +import logging +import configparser + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def migrate(db): + cursor = None + try: + cursor = db.cursor() + + # Clear any unread results before proceeding + clear_unread_results(cursor) + + # Ensure we're in the correct database + cursor.execute("SELECT DATABASE()") + current_db = cursor.fetchone()[0] + if not current_db: + raise Exception("No database selected") + + # Check if message_reception table exists + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.TABLES + WHERE TABLE_NAME = 'message_reception' + """) + has_reception_table = cursor.fetchone()[0] > 0 + logging.info(f"message_reception table exists: {has_reception_table}") + + if not has_reception_table: + logging.info("message_reception table does not exist, skipping relay_node column addition") + return + + # Double-check that we can actually access the table + try: + cursor.execute("SELECT COUNT(*) FROM message_reception LIMIT 1") + result = cursor.fetchone() + logging.info("message_reception table is accessible") + except Exception as table_error: + logging.warning(f"message_reception table exists in schema but not accessible: {table_error}") + return + + # Check if relay_node column exists in message_reception table + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.COLUMNS + WHERE TABLE_NAME = 'message_reception' + AND COLUMN_NAME = 'relay_node' + """) + has_relay_node = cursor.fetchone()[0] > 0 + logging.info(f"relay_node column exists: {has_relay_node}") + + if not has_relay_node: + logging.info("Adding relay_node column to message_reception table...") + try: + cursor.execute(""" + ALTER TABLE message_reception + ADD COLUMN relay_node VARCHAR(4) DEFAULT NULL, + ADD INDEX idx_message_reception_relay_node (relay_node) + """) + db.commit() + logging.info("Added relay_node column successfully") + except Exception as alter_error: + logging.error(f"Error adding relay_node column: {alter_error}") + # Try adding just the column first, then the index + try: + cursor.execute(""" + ALTER TABLE message_reception + ADD COLUMN relay_node VARCHAR(4) DEFAULT NULL + """) + db.commit() + logging.info("Added relay_node column (without index)") + + # Now try to add the index + try: + cursor.execute(""" + ALTER TABLE message_reception + ADD INDEX idx_message_reception_relay_node (relay_node) + """) + db.commit() + logging.info("Added relay_node index successfully") + except Exception as index_error: + logging.warning(f"Could not add relay_node index: {index_error}") + + except Exception as column_error: + logging.error(f"Could not add relay_node column: {column_error}") + raise + else: + logging.info("relay_node column already exists in message_reception table") + + # Verify the column exists by trying to query it + try: + cursor.execute("SELECT relay_node FROM message_reception LIMIT 1") + logging.info("relay_node column is queryable - migration successful") + except Exception as verify_error: + logging.error(f"relay_node column is not queryable: {verify_error}") + # Try to add the column again as a last resort + try: + cursor.execute(""" + ALTER TABLE message_reception + ADD COLUMN relay_node VARCHAR(4) DEFAULT NULL + """) + db.commit() + logging.info("Added relay_node column as fallback") + except Exception as fallback_error: + logging.error(f"Failed to add relay_node column as fallback: {fallback_error}") + raise + + except Exception as e: + logging.error(f"Error during relay_node migration: {e}") + db.rollback() + raise + finally: + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_routing_messages_table.py b/migrations/add_routing_messages_table.py new file mode 100644 index 00000000..a4b44f8a --- /dev/null +++ b/migrations/add_routing_messages_table.py @@ -0,0 +1,80 @@ +import logging +import mysql.connector +from mysql.connector import Error + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def migrate(db): + """ + Add routing_messages table to store routing packet information. + This table captures routing errors, hop counts, relay nodes, and other routing metadata. + """ + cursor = None + try: + cursor = db.cursor() + clear_unread_results(cursor) + + # Check if table already exists + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.tables + WHERE table_schema = DATABASE() + AND table_name = 'routing_messages' + """) + + if cursor.fetchone()[0] > 0: + logging.info("routing_messages table already exists, skipping creation") + return + + # Create the routing_messages table + cursor.execute(""" + CREATE TABLE routing_messages ( + routing_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, + from_id INT UNSIGNED NOT NULL, + to_id INT UNSIGNED, + message_id BIGINT, + request_id BIGINT, + relay_node VARCHAR(10), + hop_limit TINYINT UNSIGNED, + hop_start TINYINT UNSIGNED, + hops_taken TINYINT UNSIGNED, + error_reason INT, + error_description VARCHAR(50), + is_error BOOLEAN DEFAULT FALSE, + success BOOLEAN DEFAULT FALSE, + channel TINYINT UNSIGNED, + rx_snr FLOAT, + rx_rssi FLOAT, + rx_time BIGINT, + routing_data JSON, + uplink_node INT UNSIGNED, + ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + INDEX idx_routing_from (from_id), + INDEX idx_routing_to (to_id), + INDEX idx_routing_time (ts_created), + INDEX idx_routing_error (is_error), + INDEX idx_routing_relay (relay_node), + INDEX idx_routing_request (request_id), + INDEX idx_routing_uplink (uplink_node) + ) + """) + + db.commit() + logging.info("Successfully created routing_messages table") + + except Error as e: + logging.error(f"Error during routing_messages table creation: {e}") + db.rollback() + raise + finally: + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_telemetry_packet_id.py b/migrations/add_telemetry_packet_id.py new file mode 100644 index 00000000..f636bc8b --- /dev/null +++ b/migrations/add_telemetry_packet_id.py @@ -0,0 +1,157 @@ +import logging + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def ensure_packet_id_column(cur, table_name='telemetry'): + cur.execute(f"SHOW COLUMNS FROM {table_name} LIKE 'packet_id';") + if not cur.fetchone(): + cur.execute(f"ALTER TABLE {table_name} ADD COLUMN packet_id BIGINT;") + +def check_migration_completed(cur): + """Check if this migration has already been completed""" + # Check if unique constraint exists + cur.execute(""" + SELECT COUNT(*) + FROM information_schema.STATISTICS + WHERE TABLE_NAME = 'telemetry' + AND INDEX_NAME = 'unique_telemetry' + """) + has_unique_constraint = cur.fetchone()[0] > 0 + + # Check if all rows have packet_id values + cur.execute("SELECT COUNT(*) FROM telemetry WHERE packet_id IS NULL") + has_null_packet_ids = cur.fetchone()[0] > 0 + + return has_unique_constraint and not has_null_packet_ids + +def migrate(db): + cursor = None + try: + cursor = db.cursor() + clear_unread_results(cursor) + + # Check if migration has already been completed + if check_migration_completed(cursor): + logging.info("Telemetry packet_id migration already completed, skipping...") + return + + logging.info("Starting telemetry packet_id migration...") + + # Ensure packet_id column exists before any operation + ensure_packet_id_column(cursor, 'telemetry') + + # Get column names for telemetry table, excluding 'rn' if present + cursor.execute("SHOW COLUMNS FROM telemetry;") + columns = [row[0] for row in cursor.fetchall() if row[0] != 'rn'] + colnames = ', '.join(columns) + + # Check if we need to deduplicate by (id, telemetry_time) + cursor.execute(""" + SELECT COUNT(*) FROM ( + SELECT id, telemetry_time, COUNT(*) as cnt + FROM telemetry + GROUP BY id, telemetry_time + HAVING cnt > 1 + ) t + """) + has_duplicates_by_time = cursor.fetchone()[0] > 0 + + if has_duplicates_by_time: + logging.info("Deduplicating telemetry by (id, telemetry_time)...") + # Drop intermediate tables if they exist (idempotency) + cursor.execute("DROP TABLE IF EXISTS telemetry_new;") + # Create intermediate table with same schema + cursor.execute("CREATE TABLE telemetry_new LIKE telemetry;") + ensure_packet_id_column(cursor, 'telemetry_new') + # Deduplicate by (id, telemetry_time) + cursor.execute(f""" + INSERT INTO telemetry_new ({colnames}) + SELECT {colnames} FROM ( + SELECT {colnames}, ROW_NUMBER() OVER (PARTITION BY id, telemetry_time ORDER BY ts_created ASC) AS rn + FROM telemetry + ) t + WHERE rn = 1; + """) + cursor.execute("TRUNCATE telemetry;") + ensure_packet_id_column(cursor, 'telemetry') + cursor.execute(f"INSERT INTO telemetry ({colnames}) SELECT {colnames} FROM telemetry_new;") + cursor.execute("DROP TABLE telemetry_new;") + logging.info("Deduplication by (id, telemetry_time) completed") + + # Set packet_id to a synthetic value with randomness for legacy rows + cursor.execute("SELECT COUNT(*) FROM telemetry WHERE packet_id IS NULL") + null_packet_ids = cursor.fetchone()[0] + + if null_packet_ids > 0: + logging.info(f"Setting packet_id for {null_packet_ids} rows...") + cursor.execute(""" + UPDATE telemetry + SET packet_id = id + UNIX_TIMESTAMP(telemetry_time) + FLOOR(RAND() * 1000000) + WHERE packet_id IS NULL; + """) + logging.info("packet_id values set successfully") + + # Check if we need to deduplicate by (id, packet_id) + cursor.execute(""" + SELECT COUNT(*) FROM ( + SELECT id, packet_id, COUNT(*) as cnt + FROM telemetry + WHERE packet_id IS NOT NULL + GROUP BY id, packet_id + HAVING cnt > 1 + ) t + """) + has_duplicates_by_packet_id = cursor.fetchone()[0] > 0 + + if has_duplicates_by_packet_id: + logging.info("Deduplicating telemetry by (id, packet_id)...") + # Drop intermediate tables if they exist (idempotency) + cursor.execute("DROP TABLE IF EXISTS telemetry_new2;") + # Create intermediate table with same schema + cursor.execute("CREATE TABLE telemetry_new2 LIKE telemetry;") + ensure_packet_id_column(cursor, 'telemetry_new2') + # Final deduplication by (id, packet_id) + cursor.execute(f""" + INSERT INTO telemetry_new2 ({colnames}) + SELECT {colnames} FROM ( + SELECT {colnames}, ROW_NUMBER() OVER (PARTITION BY id, packet_id ORDER BY ts_created ASC) AS rn + FROM telemetry + ) t + WHERE rn = 1; + """) + cursor.execute("TRUNCATE telemetry;") + ensure_packet_id_column(cursor, 'telemetry') + cursor.execute(f"INSERT INTO telemetry ({colnames}) SELECT {colnames} FROM telemetry_new2;") + cursor.execute("DROP TABLE telemetry_new2;") + logging.info("Deduplication by (id, packet_id) completed") + + # Add unique constraint on (id, packet_id) + try: + cursor.execute(""" + ALTER TABLE telemetry + ADD UNIQUE KEY unique_telemetry (id, packet_id); + """) + logging.info("Added unique constraint on (id, packet_id)") + except Exception as e: + # If the unique key already exists, ignore + if 'Duplicate key name' not in str(e): + raise + + logging.info("Telemetry packet_id migration completed successfully") + except Exception as e: + logging.error(f"Error during telemetry packet_id migration: {e}") + db.rollback() + raise + finally: + if cursor: + try: + cursor.close() + except: + pass + \ No newline at end of file diff --git a/migrations/add_traceroute_id.py b/migrations/add_traceroute_id.py index d91a38b3..70f712b5 100644 --- a/migrations/add_traceroute_id.py +++ b/migrations/add_traceroute_id.py @@ -1,6 +1,18 @@ +import logging + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + def migrate(db): - cursor = db.cursor() + cursor = None try: + cursor = db.cursor() + clear_unread_results(cursor) # Start transaction cursor.execute("START TRANSACTION") @@ -24,6 +36,7 @@ def migrate(db): WHERE TABLE_NAME = 'traceroute' """) existing_columns = [row[0] for row in cursor.fetchall()] + logging.info(f"Existing columns in traceroute: {existing_columns}") # Build ALTER TABLE statement alter_statements = [] @@ -34,41 +47,30 @@ def migrate(db): existing_columns.remove('snr') existing_columns.append('snr_towards') - # Add missing columns + # Add missing columns (always add at the end for robustness) for col, type_def in needed_columns.items(): if col not in existing_columns: - if col == 'traceroute_id': - alter_statements.append(f"ADD COLUMN {col} {type_def} FIRST") - elif col == 'request_id': - alter_statements.append(f"ADD COLUMN {col} {type_def} AFTER traceroute_id") - elif col == 'channel': - alter_statements.append(f"ADD COLUMN {col} {type_def} AFTER to_id") - elif col == 'hop_limit': - alter_statements.append(f"ADD COLUMN {col} {type_def} AFTER channel") - elif col == 'success': - alter_statements.append(f"ADD COLUMN {col} {type_def} AFTER hop_limit") - elif col == 'time': - alter_statements.append(f"ADD COLUMN {col} {type_def} AFTER success") - elif col == 'snr_back': - alter_statements.append(f"ADD COLUMN {col} {type_def} AFTER snr_towards") - elif col == 'route_back': - alter_statements.append(f"ADD COLUMN {col} {type_def} AFTER route") - else: - alter_statements.append(f"ADD COLUMN {col} {type_def}") + alter_statements.append(f"ADD COLUMN {col} {type_def}") # Execute ALTER TABLE if there are changes needed if alter_statements: alter_sql = "ALTER TABLE traceroute " + ", ".join(alter_statements) - print(f"Executing: {alter_sql}") + logging.info(f"Executing ALTER TABLE: {alter_sql}") cursor.execute(alter_sql) + else: + logging.info("No ALTER TABLE needed for traceroute.") # Commit transaction db.commit() - print("Migration completed successfully") + logging.info("Migration completed successfully") except Exception as e: - print(f"Migration error: {str(e)}") + logging.error(f"Error during traceroute id migration: {e}") db.rollback() raise finally: - cursor.close() \ No newline at end of file + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_traceroute_improvements.py b/migrations/add_traceroute_improvements.py new file mode 100644 index 00000000..12ec6567 --- /dev/null +++ b/migrations/add_traceroute_improvements.py @@ -0,0 +1,87 @@ +import logging +import mysql.connector +from mysql.connector import Error + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def migrate(db): + """ + Add improvements to traceroute table: + - request_id to group related attempts + - is_reply to distinguish requests from replies + - error_reason to track failure reasons + - attempt_number to track attempt sequence + """ + cursor = None + try: + cursor = db.cursor() + clear_unread_results(cursor) + + # Check which columns already exist + cursor.execute("SHOW COLUMNS FROM traceroute") + existing_columns = [column[0] for column in cursor.fetchall()] + + # Build ALTER TABLE statement only for missing columns + alter_statements = [] + if 'request_id' not in existing_columns: + alter_statements.append("ADD COLUMN request_id BIGINT") + if 'is_reply' not in existing_columns: + alter_statements.append("ADD COLUMN is_reply BOOLEAN DEFAULT FALSE") + if 'error_reason' not in existing_columns: + alter_statements.append("ADD COLUMN error_reason INT") + if 'attempt_number' not in existing_columns: + alter_statements.append("ADD COLUMN attempt_number INT") + + # Only execute ALTER TABLE if we have columns to add + if alter_statements: + alter_sql = f"ALTER TABLE traceroute {', '.join(alter_statements)}" + cursor.execute(alter_sql) + + # Check if index exists before creating it + cursor.execute("SHOW INDEX FROM traceroute WHERE Key_name = 'idx_traceroute_request_id'") + if not cursor.fetchone(): + cursor.execute(""" + CREATE INDEX idx_traceroute_request_id + ON traceroute(request_id) + """) + + # Update existing records to use message_id as request_id if needed + if 'request_id' not in existing_columns: + cursor.execute(""" + UPDATE traceroute + SET request_id = message_id + WHERE request_id IS NULL + """) + + # Calculate attempt numbers for existing records if needed + if 'attempt_number' not in existing_columns: + cursor.execute(""" + UPDATE traceroute t1 + JOIN ( + SELECT request_id, COUNT(*) as attempt_count + FROM traceroute + GROUP BY request_id + ) t2 ON t1.request_id = t2.request_id + SET t1.attempt_number = t2.attempt_count + WHERE t1.attempt_number IS NULL + """) + + db.commit() + logging.info("Successfully added traceroute improvements") + + except Exception as e: + logging.error(f"Error during traceroute improvements migration: {e}") + db.rollback() + raise + finally: + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_traceroute_snr.py b/migrations/add_traceroute_snr.py index beebef1a..11d5cf0b 100644 --- a/migrations/add_traceroute_snr.py +++ b/migrations/add_traceroute_snr.py @@ -1,11 +1,21 @@ import logging +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + def migrate(db): """ - Improve SNR storage in traceroute table + Update traceroute table to store separate SNR values for forward and return paths """ + cursor = None try: cursor = db.cursor() + clear_unread_results(cursor) # First check if the table exists cursor.execute(""" @@ -19,19 +29,26 @@ def migrate(db): logging.info("Creating traceroute table...") cursor.execute(""" CREATE TABLE traceroute ( + traceroute_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, from_id INT UNSIGNED NOT NULL, to_id INT UNSIGNED NOT NULL, - route VARCHAR(255), - snr TEXT COMMENT 'Semicolon-separated SNR values, stored as integers (actual_value * 4)', + route TEXT, + route_back TEXT, + snr_towards TEXT COMMENT 'Semicolon-separated SNR values for forward path', + snr_back TEXT COMMENT 'Semicolon-separated SNR values for return path', + success BOOLEAN DEFAULT FALSE, + channel TINYINT UNSIGNED, + hop_limit TINYINT UNSIGNED, ts_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - INDEX idx_traceroute_nodes (from_id, to_id) + INDEX idx_traceroute_nodes (from_id, to_id), + INDEX idx_traceroute_time (ts_created) ) """) db.commit() logging.info("Created traceroute table successfully") return - # Check if SNR column exists + # Check if old SNR column exists cursor.execute(""" SELECT COLUMN_TYPE FROM information_schema.COLUMNS @@ -40,78 +57,83 @@ def migrate(db): """) result = cursor.fetchone() - if not result: - # SNR column doesn't exist, add it - logging.info("Adding SNR column to traceroute table...") - cursor.execute(""" - ALTER TABLE traceroute - ADD COLUMN snr TEXT COMMENT 'Semicolon-separated SNR values, stored as integers (actual_value * 4)' - """) - db.commit() - logging.info("Added SNR column successfully") - return - - current_type = result[0] - - if current_type.upper() == 'VARCHAR(255)': - logging.info("Converting traceroute SNR column to more efficient format...") + if result: + # Old SNR column exists, need to migrate to new format + logging.info("Migrating from single SNR column to separate forward/return SNR columns...") - # Create temporary column with same type to preserve data + # Add new columns if they don't exist cursor.execute(""" ALTER TABLE traceroute - ADD COLUMN snr_temp VARCHAR(255) + ADD COLUMN IF NOT EXISTS snr_towards TEXT COMMENT 'Semicolon-separated SNR values for forward path', + ADD COLUMN IF NOT EXISTS snr_back TEXT COMMENT 'Semicolon-separated SNR values for return path' """) - # Copy existing data + # Copy existing SNR data to snr_towards (since historically it was forward path only) cursor.execute(""" UPDATE traceroute - SET snr_temp = snr + SET snr_towards = snr WHERE snr IS NOT NULL """) - # Drop old column and create new one with better type - cursor.execute(""" - ALTER TABLE traceroute - DROP COLUMN snr, - ADD COLUMN snr TEXT COMMENT 'Semicolon-separated SNR values, stored as integers (actual_value * 4)' - """) - - # Copy data back - cursor.execute(""" - UPDATE traceroute - SET snr = snr_temp - WHERE snr_temp IS NOT NULL - """) - - # Drop temporary column + # Drop old column cursor.execute(""" ALTER TABLE traceroute - DROP COLUMN snr_temp + DROP COLUMN snr """) db.commit() - logging.info("Converted SNR column successfully") + logging.info("Migrated SNR columns successfully") + + # Add other necessary columns if they don't exist + cursor.execute(""" + ALTER TABLE traceroute + ADD COLUMN IF NOT EXISTS traceroute_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST, + ADD COLUMN IF NOT EXISTS route_back TEXT AFTER route, + ADD COLUMN IF NOT EXISTS success BOOLEAN DEFAULT FALSE AFTER snr_back, + ADD COLUMN IF NOT EXISTS channel TINYINT UNSIGNED AFTER success, + ADD COLUMN IF NOT EXISTS hop_limit TINYINT UNSIGNED AFTER channel + """) - # Add index if it doesn't exist + # Add indices if they don't exist cursor.execute(""" SELECT COUNT(*) FROM information_schema.STATISTICS WHERE TABLE_NAME = 'traceroute' AND INDEX_NAME = 'idx_traceroute_nodes' """) - has_index = cursor.fetchone()[0] > 0 + has_node_index = cursor.fetchone()[0] > 0 - if not has_index: + if not has_node_index: logging.info("Adding index on from_id, to_id...") cursor.execute(""" ALTER TABLE traceroute ADD INDEX idx_traceroute_nodes (from_id, to_id) """) - db.commit() - logging.info("Added index successfully") + + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.STATISTICS + WHERE TABLE_NAME = 'traceroute' + AND INDEX_NAME = 'idx_traceroute_time' + """) + has_time_index = cursor.fetchone()[0] > 0 + + if not has_time_index: + logging.info("Adding index on ts_created...") + cursor.execute(""" + ALTER TABLE traceroute + ADD INDEX idx_traceroute_time (ts_created) + """) + + db.commit() + logging.info("Migration completed successfully") except Exception as e: logging.error(f"Error performing traceroute SNR migration: {e}") raise finally: - cursor.close() \ No newline at end of file + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/migrations/add_ts_uplink.py b/migrations/add_ts_uplink.py new file mode 100644 index 00000000..cd60cdc5 --- /dev/null +++ b/migrations/add_ts_uplink.py @@ -0,0 +1,50 @@ +import logging + +def clear_unread_results(cursor): + """Clear any unread results from the cursor""" + try: + while cursor.nextset(): + pass + except: + pass + +def migrate(db): + """ + Add ts_uplink column to nodeinfo table to track MQTT uplink status + """ + cursor = None + try: + cursor = db.cursor() + clear_unread_results(cursor) + + # Check if column exists + cursor.execute(""" + SELECT COUNT(*) + FROM information_schema.COLUMNS + WHERE TABLE_NAME = 'nodeinfo' + AND COLUMN_NAME = 'ts_uplink' + """) + column_exists = cursor.fetchone()[0] > 0 + + if not column_exists: + logging.info("Adding ts_uplink column to nodeinfo table...") + cursor.execute(""" + ALTER TABLE nodeinfo + ADD COLUMN ts_uplink TIMESTAMP NULL DEFAULT NULL + COMMENT 'Last time node connected via MQTT' + """) + db.commit() + logging.info("Added ts_uplink column successfully") + else: + logging.info("ts_uplink column already exists") + + except Exception as e: + logging.error(f"Error during ts_uplink migration: {e}") + db.rollback() + raise + finally: + if cursor: + try: + cursor.close() + except: + pass \ No newline at end of file diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 00000000..748bda1a --- /dev/null +++ b/package-lock.json @@ -0,0 +1,623 @@ +{ + "name": "meshinfo-lite", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "dependencies": { + "color-parse": "^2.0.2", + "color-rgba": "^3.0.0", + "color-space": "^2.3.2", + "earcut": "^3.0.1", + "ol": "^10.5.0", + "ol-ext": "^4.0.0", + "quickselect": "^3.0.0", + "rbush": "^3.0.1" + }, + "devDependencies": { + "copyfiles": "^2.4.1" + } + }, + "node_modules/@petamoriken/float16": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@petamoriken/float16/-/float16-3.9.2.tgz", + "integrity": "sha512-VgffxawQde93xKxT3qap3OH+meZf7VaSB5Sqd4Rqc+FP5alWbpOyan/7tRbOAvynjpG3GpdtAuGU/NdhQpmrog==" + }, + "node_modules/@types/rbush": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/rbush/-/rbush-4.0.0.tgz", + "integrity": "sha512-+N+2H39P8X+Hy1I5mC6awlTX54k3FhiUmvt7HWzGJZvF+syUAAxP/stwppS8JE84YHqFgRMv6fCy31202CMFxQ==" + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-convert/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/color-name": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-2.0.0.tgz", + "integrity": "sha512-SbtvAMWvASO5TE2QP07jHBMXKafgdZz8Vrsrn96fiL+O92/FN/PLARzUW5sKt013fjAprK2d2iCn2hk2Xb5oow==", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/color-parse": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/color-parse/-/color-parse-2.0.2.tgz", + "integrity": "sha512-eCtOz5w5ttWIUcaKLiktF+DxZO1R9KLNY/xhbV6CkhM7sR3GhVghmt6X6yOnzeaM24po+Z9/S1apbXMwA3Iepw==", + "dependencies": { + "color-name": "^2.0.0" + } + }, + "node_modules/color-rgba": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/color-rgba/-/color-rgba-3.0.0.tgz", + "integrity": "sha512-PPwZYkEY3M2THEHHV6Y95sGUie77S7X8v+h1r6LSAPF3/LL2xJ8duUXSrkic31Nzc4odPwHgUbiX/XuTYzQHQg==", + "dependencies": { + "color-parse": "^2.0.0", + "color-space": "^2.0.0" + } + }, + "node_modules/color-space": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/color-space/-/color-space-2.3.2.tgz", + "integrity": "sha512-BcKnbOEsOarCwyoLstcoEztwT0IJxqqQkNwDuA3a65sICvvHL2yoeV13psoDFh5IuiOMnIOKdQDwB4Mk3BypiA==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/copyfiles": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/copyfiles/-/copyfiles-2.4.1.tgz", + "integrity": "sha512-fereAvAvxDrQDOXybk3Qu3dPbOoKoysFMWtkY3mv5BsL8//OSZVL5DCLYqgRfY5cWirgRzlC+WSrxp6Bo3eNZg==", + "dev": true, + "dependencies": { + "glob": "^7.0.5", + "minimatch": "^3.0.3", + "mkdirp": "^1.0.4", + "noms": "0.0.0", + "through2": "^2.0.1", + "untildify": "^4.0.0", + "yargs": "^16.1.0" + }, + "bin": { + "copyfiles": "copyfiles", + "copyup": "copyfiles" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", + "dev": true + }, + "node_modules/earcut": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/earcut/-/earcut-3.0.1.tgz", + "integrity": "sha512-0l1/0gOjESMeQyYaK5IDiPNvFeu93Z/cO0TjZh9eZ1vyCtZnA7KMZ8rQggpsJHIbGSdrqYq9OhuveadOVHCshw==" + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/geotiff": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/geotiff/-/geotiff-2.1.3.tgz", + "integrity": "sha512-PT6uoF5a1+kbC3tHmZSUsLHBp2QJlHasxxxxPW47QIY1VBKpFB+FcDvX+MxER6UzgLQZ0xDzJ9s48B9JbOCTqA==", + "dependencies": { + "@petamoriken/float16": "^3.4.7", + "lerc": "^3.0.0", + "pako": "^2.0.4", + "parse-headers": "^2.0.2", + "quick-lru": "^6.1.1", + "web-worker": "^1.2.0", + "xml-utils": "^1.0.2", + "zstddec": "^0.1.0" + }, + "engines": { + "node": ">=10.19" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==", + "dev": true + }, + "node_modules/lerc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lerc/-/lerc-3.0.0.tgz", + "integrity": "sha512-Rm4J/WaHhRa93nCN2mwWDZFoRVF18G1f47C+kvQWyHGEZxFpTUi73p7lMVSAndyxGt6lJ2/CFbOcf9ra5p8aww==" + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "dev": true, + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/noms": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/noms/-/noms-0.0.0.tgz", + "integrity": "sha512-lNDU9VJaOPxUmXcLb+HQFeUgQQPtMI24Gt6hgfuMHRJgMRHMF/qZ4HJD3GDru4sSw9IQl2jPjAYnQrdIeLbwow==", + "dev": true, + "dependencies": { + "inherits": "^2.0.1", + "readable-stream": "~1.0.31" + } + }, + "node_modules/ol": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/ol/-/ol-10.5.0.tgz", + "integrity": "sha512-nHFx8gkGmvYImsa7iKkwUnZidd5gn1XbMZd9GNOorvm9orjW9gQvT3Naw/MjIasVJ3cB9EJUdCGR2EFAulMHsQ==", + "dependencies": { + "@types/rbush": "4.0.0", + "earcut": "^3.0.0", + "geotiff": "^2.1.3", + "pbf": "4.0.1", + "rbush": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/openlayers" + } + }, + "node_modules/ol-ext": { + "version": "4.0.33", + "resolved": "https://registry.npmjs.org/ol-ext/-/ol-ext-4.0.33.tgz", + "integrity": "sha512-aCbgYe4tScqBMMGnheK2eM9ArjHE1u5g2LzfHKVht/Ko1ylvFNFDkzEHSwPWEVSTrrgiDqy/vtDnrqLJ+OPozw==", + "peerDependencies": { + "ol": ">= 5.3.0" + } + }, + "node_modules/ol/node_modules/rbush": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/rbush/-/rbush-4.0.1.tgz", + "integrity": "sha512-IP0UpfeWQujYC8Jg162rMNc01Rf0gWMMAb2Uxus/Q0qOFw4lCcq6ZnQEZwUoJqWyUGJ9th7JjwI4yIWo+uvoAQ==", + "dependencies": { + "quickselect": "^3.0.0" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/pako": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/pako/-/pako-2.1.0.tgz", + "integrity": "sha512-w+eufiZ1WuJYgPXbV/PO3NCMEc3xqylkKHzp8bxp1uW4qaSNQUkwmLLEc3kKsfz8lpV1F8Ht3U1Cm+9Srog2ug==" + }, + "node_modules/parse-headers": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/parse-headers/-/parse-headers-2.0.6.tgz", + "integrity": "sha512-Tz11t3uKztEW5FEVZnj1ox8GKblWn+PvHY9TmJV5Mll2uHEwRdR/5Li1OlXoECjLYkApdhWy44ocONwXLiKO5A==" + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pbf": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pbf/-/pbf-4.0.1.tgz", + "integrity": "sha512-SuLdBvS42z33m8ejRbInMapQe8n0D3vN/Xd5fmWM3tufNgRQFBpaW2YVJxQZV4iPNqb0vEFvssMEo5w9c6BTIA==", + "dependencies": { + "resolve-protobuf-schema": "^2.1.0" + }, + "bin": { + "pbf": "bin/pbf" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true + }, + "node_modules/protocol-buffers-schema": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/protocol-buffers-schema/-/protocol-buffers-schema-3.6.0.tgz", + "integrity": "sha512-TdDRD+/QNdrCGCE7v8340QyuXd4kIWIgapsE2+n/SaGiSSbomYl4TjHlvIoCWRpE7wFt02EpB35VVA2ImcBVqw==" + }, + "node_modules/quick-lru": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-6.1.2.tgz", + "integrity": "sha512-AAFUA5O1d83pIHEhJwWCq/RQcRukCkn/NSm2QsTEMle5f2hP0ChI2+3Xb051PZCkLryI/Ir1MVKviT2FIloaTQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/quickselect": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/quickselect/-/quickselect-3.0.0.tgz", + "integrity": "sha512-XdjUArbK4Bm5fLLvlm5KpTFOiOThgfWWI4axAZDWg4E/0mKdZyI9tNEfds27qCi1ze/vwTR16kvmmGhRra3c2g==" + }, + "node_modules/rbush": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/rbush/-/rbush-3.0.1.tgz", + "integrity": "sha512-XRaVO0YecOpEuIvbhbpTrZgoiI6xBlz6hnlr6EHhd+0x9ase6EmeN+hdwwUaJvLcsFFQ8iWVF1GAK1yB0BWi0w==", + "dependencies": { + "quickselect": "^2.0.0" + } + }, + "node_modules/rbush/node_modules/quickselect": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/quickselect/-/quickselect-2.0.0.tgz", + "integrity": "sha512-RKJ22hX8mHe3Y6wH/N3wCM6BWtjaxIyyUIkpHOvfFnxdI4yD4tBXEBKSbriGujF6jnSVkJrffuo6vxACiSSxIw==" + }, + "node_modules/readable-stream": { + "version": "1.0.34", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz", + "integrity": "sha512-ok1qVCJuRkNmvebYikljxJA/UEsKwLl2nI1OmaqAu4/UE+h0wKCHok4XkL/gvi39OacXvw59RJUOFUkDib2rHg==", + "dev": true, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve-protobuf-schema": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/resolve-protobuf-schema/-/resolve-protobuf-schema-2.1.0.tgz", + "integrity": "sha512-kI5ffTiZWmJaS/huM8wZfEMer1eRd7oJQhDuxeCLe3t7N7mX3z94CN0xPxBQxFYQTSNz9T0i+v6inKqSdK8xrQ==", + "dependencies": { + "protocol-buffers-schema": "^3.3.1" + } + }, + "node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "node_modules/string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==", + "dev": true + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, + "dependencies": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "node_modules/through2/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", + "dev": true + }, + "node_modules/through2/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dev": true, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/through2/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/untildify": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/untildify/-/untildify-4.0.0.tgz", + "integrity": "sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true + }, + "node_modules/web-worker": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/web-worker/-/web-worker-1.5.0.tgz", + "integrity": "sha512-RiMReJrTAiA+mBjGONMnjVDP2u3p9R1vkcGz6gDIrOMT3oGuYwX2WRMYI9ipkphSuE5XKEhydbhNEJh4NY9mlw==" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true + }, + "node_modules/xml-utils": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/xml-utils/-/xml-utils-1.10.2.tgz", + "integrity": "sha512-RqM+2o1RYs6T8+3DzDSoTRAUfrvaejbVHcp3+thnAtDKo8LskR+HomLajEy5UjTz24rpka7AxVBRR3g2wTUkJA==" + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true, + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/zstddec": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/zstddec/-/zstddec-0.1.0.tgz", + "integrity": "sha512-w2NTI8+3l3eeltKAdK8QpiLo/flRAr2p8AGeakfMZOXBxOg9HIu4LVDxBi81sYgVhFhdJjv1OrB5ssI8uFPoLg==" + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 00000000..d641557a --- /dev/null +++ b/package.json @@ -0,0 +1,18 @@ +{ + "dependencies": { + "color-parse": "^2.0.2", + "color-rgba": "^3.0.0", + "color-space": "^2.3.2", + "earcut": "^3.0.1", + "ol": "^10.5.0", + "ol-ext": "^4.0.0", + "quickselect": "^3.0.0", + "rbush": "^3.0.1" + }, + "devDependencies": { + "copyfiles": "^2.4.1" + }, + "scripts": { + "copy-vendor": "copyfiles -u 2 \"node_modules/ol/**/*.js\" \"node_modules/ol/ol.css\" www/js/vendor/ol/ && cp node_modules/rbush/index.js www/js/vendor/rbush.js && cp node_modules/quickselect/index.js www/js/vendor/quickselect.js && cp node_modules/earcut/dist/earcut.min.js www/js/vendor/earcut.js && copyfiles -u 2 \"node_modules/color-space/**/*.js\" www/js/vendor/color-space/ && cp node_modules/color-rgba/index.js www/js/vendor/color-rgba.js && cp node_modules/color-parse/index.js www/js/vendor/color-parse.js && copyfiles -u 2 \"node_modules/ol-ext/**/*.js\" \"node_modules/ol-ext/ol-ext.css\" www/js/vendor/ol-ext/" + } +} diff --git a/process_payload.py b/process_payload.py index 6e591ce4..a8de9452 100644 --- a/process_payload.py +++ b/process_payload.py @@ -119,16 +119,51 @@ def get_data(msg): ) elif portnum == portnums_pb2.ROUTING_APP: j["type"] = "routing" - j["decoded"]["json_payload"] = to_json( - mesh_pb2.Routing().FromString(msg.decoded.payload) - ) + + # Parse the routing message + routing_msg = mesh_pb2.Routing().FromString(msg.decoded.payload) + routing_data = to_json(routing_msg) + + # Extract routing information based on actual packet structure + routing_info = { + "routing_data": routing_data, + "error_reason": routing_data.get("error_reason", None), + "request_id": j.get("request_id", None), + "relay_node": j.get("relay_node", None), + "hop_limit": j.get("hop_limit", None), + "hop_start": j.get("hop_start", None), + "hops_taken": (j.get("hop_start", 0) - j.get("hop_limit", 0)) if j.get("hop_start") is not None and j.get("hop_limit") is not None else None, + "is_error": routing_data.get("error_reason") is not None and routing_data.get("error_reason") > 0, + "success": routing_data.get("error_reason") is None or routing_data.get("error_reason") == 0 + } + + # Add error reason descriptions + error_reason = routing_data.get("error_reason") + if error_reason is not None: + error_descriptions = { + 0: "None", + 1: "No Interface", + 2: "No Route", + 3: "Got Nak", + 4: "Timeout", + 5: "No Interface", + 6: "No Route", + 7: "Got Nak", + 8: "Timeout", + 9: "No Interface", + 10: "No Route", + 11: "Got Nak", + 12: "Timeout" + } + routing_info["error_description"] = error_descriptions.get(error_reason, f"Unknown Error {error_reason}") + + j["decoded"]["json_payload"] = routing_info elif portnum == portnums_pb2.TRACEROUTE_APP: j["type"] = "traceroute" + route_discovery = mesh_pb2.RouteDiscovery().FromString(msg.decoded.payload) - route_data = to_json(route_discovery) - # Log the raw route data for debugging - logging.debug(f"Raw traceroute data: {json.dumps(route_data, indent=2)}") + route_data = to_json(route_discovery) # Ensure we have all required fields with proper defaults route_data.setdefault("route", []) @@ -146,8 +181,8 @@ def get_data(msg): j["decoded"]["json_payload"] = route_data - # Log the processed payload - logging.debug(f"Processed traceroute payload: {json.dumps(j['decoded']['json_payload'], indent=2)}") + # Log the final data that will be stored + #logging.info(f"Final traceroute data to be stored: {json.dumps(j['decoded']['json_payload'], indent=2)}") elif portnum == portnums_pb2.POSITION_APP: j["type"] = "position" @@ -159,6 +194,49 @@ def get_data(msg): j["decoded"]["json_payload"] = to_json( telemetry_pb2.Telemetry().FromString(msg.decoded.payload) ) + elif portnum == portnums_pb2.STORE_FORWARD_APP: + j["type"] = "store_forward" + # Store & Forward messages contain routing information for delayed message delivery + # We'll log them but not store them in the database as they're internal routing messages + j["decoded"]["json_payload"] = { + "message": "Store & Forward routing message" + } + logging.debug(f"Received Store & Forward message from {j['from']} - internal routing message") + elif portnum == portnums_pb2.RANGE_TEST_APP: + j["type"] = "range_test" + # Range test messages are used for testing radio range + j["decoded"]["json_payload"] = { + "message": "Range test message" + } + logging.debug(f"Received Range Test message from {j['from']}") + elif portnum == portnums_pb2.SIMULATOR_APP: + j["type"] = "simulator" + # Simulator messages are used for testing + j["decoded"]["json_payload"] = { + "message": "Simulator message" + } + logging.debug(f"Received Simulator message from {j['from']}") + elif portnum == portnums_pb2.ZPS_APP: + j["type"] = "zps" + # ZPS (Zero Power Sensor) messages + j["decoded"]["json_payload"] = { + "message": "ZPS message" + } + logging.debug(f"Received ZPS message from {j['from']}") + elif portnum == portnums_pb2.POWERSTRESS_APP: + j["type"] = "powerstress" + # Power stress test messages + j["decoded"]["json_payload"] = { + "message": "Power stress test message" + } + logging.debug(f"Received Power Stress message from {j['from']}") + elif portnum == portnums_pb2.RETICULUM_TUNNEL_APP: + j["type"] = "reticulum_tunnel" + # Reticulum tunnel messages + j["decoded"]["json_payload"] = { + "message": "Reticulum tunnel message" + } + logging.debug(f"Received Reticulum Tunnel message from {j['from']}") if j["type"]: # Only log if we successfully determined the type msg_type = j["type"] @@ -169,6 +247,14 @@ def get_data(msg): forward_hops = len(route_info.get("route", [])) return_hops = len(route_info.get("route_back", [])) logging.info(f"Received traceroute from {msg_from} with {forward_hops} forward hops and {return_hops} return hops") + elif msg_type == "routing": + routing_info = j["decoded"]["json_payload"] + error_reason = routing_info.get("error_reason", 0) + error_desc = routing_info.get("error_description", "Unknown") + hops_taken = routing_info.get("hops_taken", 0) + relay_node = routing_info.get("relay_node", "None") + success = routing_info.get("success", False) + logging.info(f"Received routing from {msg_from} via relay {relay_node} with {hops_taken} hops (error: {error_desc}, success: {success})") elif msg_type == "text" and j.get("hop_limit") is not None and j.get("hop_start") is not None: hop_count = j["hop_start"] - j["hop_limit"] logging.info(f"Received {msg_type} from {msg_from} with {hop_count} hops ({j['hop_limit']}/{j['hop_start']})") @@ -183,17 +269,37 @@ def get_data(msg): return None -def process_payload(payload, topic): - md = MeshData() +def process_payload(payload, topic, md: MeshData): + # --- Add log at the start --- + logger = logging.getLogger(__name__) # Get logger instance + logger.debug(f"process_payload: Entered function for topic: {topic}") + + # Check if this is an ignored channel + if "/2/e/" in topic: + channel_name = topic.split("/")[-2] # Get channel name from topic + ignored_channels = config.get("channels", "ignored_channels", fallback="").split(",") + if channel_name in ignored_channels: + logger.debug(f"Ignoring message from channel: {channel_name}") + return + + # --- End log --- mp = get_packet(payload) if mp: try: data = get_data(mp) if data: # Only store if we got valid data + logger.debug(f"process_payload: Calling md.store() for topic {topic}") + # Use the passed-in MeshData instance md.store(data, topic) else: - logging.warning(f"Received invalid or unsupported message type on topic {topic}") + # Log topic only if debug is enabled or if it's an unsupported type + if config.get("server", "debug") == "true": + logging.warning(f"Received invalid or unsupported message type on topic {topic}. Payload: {payload[:100]}...") # Log partial payload for debug + else: + logger.warning(f"process_payload: get_packet returned None for topic {topic}") + except KeyError as e: logging.warning(f"Failed to process message: Missing key {str(e)} in payload on topic {topic}") except Exception as e: - logging.error(f"Unexpected error processing message: {str(e)}") \ No newline at end of file + # Log the full traceback for unexpected errors + logging.exception(f"Unexpected error processing message on topic {topic}: {str(e)}") # Use logging.exception \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 6e2830a5..72209309 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,8 +9,16 @@ colorlog bcrypt pyjwt matplotlib +cairocffi +cairosvg +Pillow rasterio scipy geopy boto3 -pytz \ No newline at end of file +pytz +Flask-Caching +pandas +psutil>=7.0.0 +shapely +py-staticmaps \ No newline at end of file diff --git a/run.sh b/run.sh index 1fa797ce..8ce43081 100755 --- a/run.sh +++ b/run.sh @@ -1,3 +1,5 @@ #!/usr/bin/env bash +python generate_css.py +python generate_favicon.py python main.py diff --git a/runtime_cache/2029240f6d1128be89ddc32729463129 b/runtime_cache/2029240f6d1128be89ddc32729463129 new file mode 100644 index 00000000..60b84f8b Binary files /dev/null and b/runtime_cache/2029240f6d1128be89ddc32729463129 differ diff --git a/scripts/cleanup_corrupted_timestamps.sh b/scripts/cleanup_corrupted_timestamps.sh new file mode 100755 index 00000000..081778de --- /dev/null +++ b/scripts/cleanup_corrupted_timestamps.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +# Cleanup corrupted timestamp data in message_reception table +# This script removes records with future timestamps or very old timestamps that are likely corrupted + +set -e + +echo "=== MeshInfo Database Timestamp Cleanup ===" +echo "This script will clean up corrupted timestamp data in the message_reception table" +echo "" + +# Check if we're in the right directory +if [ ! -f "docker-compose.yml" ]; then + echo "Error: docker-compose.yml not found. Please run this script from the project root directory." + exit 1 +fi + +# Check if containers are running +if ! docker compose ps | grep -q "mariadb.*Up"; then + echo "Error: MariaDB container is not running. Please start the services first:" + echo " docker compose up -d" + exit 1 +fi + +echo "Checking for corrupted timestamp data..." + +# Count bad records before cleanup +BAD_RECORDS=$(docker compose exec -T mariadb mariadb -u root -ppassw0rd meshdata -e " +SELECT COUNT(*) FROM message_reception WHERE rx_time > UNIX_TIMESTAMP() + 86400; +" 2>/dev/null | tail -n 1) + +OLD_RECORDS=$(docker compose exec -T mariadb mariadb -u root -ppassw0rd meshdata -e " +SELECT COUNT(*) FROM message_reception WHERE rx_time < UNIX_TIMESTAMP() - (5 * 365 * 24 * 3600); +" 2>/dev/null | tail -n 1) + +echo "Found $BAD_RECORDS records with future timestamps (>24 hours ahead)" +echo "Found $OLD_RECORDS records with very old timestamps (>5 years ago)" + +if [ "$BAD_RECORDS" -eq 0 ] && [ "$OLD_RECORDS" -eq 0 ]; then + echo "No corrupted timestamp data found. Database is clean!" + exit 0 +fi + +echo "" +echo "Cleaning up corrupted timestamp data..." + +# Clean up future timestamps +if [ "$BAD_RECORDS" -gt 0 ]; then + echo "Removing $BAD_RECORDS records with future timestamps..." + docker compose exec -T mariadb mariadb -u root -ppassw0rd meshdata -e " + DELETE FROM message_reception WHERE rx_time > UNIX_TIMESTAMP() + 86400; + " +fi + +# Clean up very old timestamps +if [ "$OLD_RECORDS" -gt 0 ]; then + echo "Removing $OLD_RECORDS records with very old timestamps..." + docker compose exec -T mariadb mariadb -u root -ppassw0rd meshdata -e " + DELETE FROM message_reception WHERE rx_time < UNIX_TIMESTAMP() - (5 * 365 * 24 * 3600); + " +fi + +# Verify cleanup +REMAINING_RECORDS=$(docker compose exec -T mariadb mariadb -u root -ppassw0rd meshdata -e " +SELECT COUNT(*) FROM message_reception; +" 2>/dev/null | tail -n 1) + +echo "" +echo "=== Cleanup Complete ===" +echo "Remaining records in message_reception table: $REMAINING_RECORDS" +echo "" +echo "The corrupted timestamp data has been removed." +echo "This should fix the 'negative minutes ago' display issues in the map interface." \ No newline at end of file diff --git a/scripts/docker-build.sh b/scripts/docker-build.sh index 76cbb358..2e6bca89 100644 --- a/scripts/docker-build.sh +++ b/scripts/docker-build.sh @@ -2,15 +2,19 @@ # build -# set version from args if not, exit -#if [ -z "$1" ] -# then -# echo "No version supplied (e.g. 1.0.0)" -# exit 1 -#fi +# Get version from git if not provided +if [ -z "$1" ]; then + # Get the latest git tag + LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0") + # Get current commit hash + COMMIT_HASH=$(git rev-parse --short HEAD) + VERSION="${LATEST_TAG}-${COMMIT_HASH}" + echo "Using git-based version: $VERSION" +else + VERSION=$1 + echo "Using provided version: $VERSION" +fi -REPO=dadecoza/meshinfo -# VERSION=$1 -VERSION=latest +REPO=agessaman/meshinfo docker build -t $REPO:$VERSION --platform=linux/amd64 . diff --git a/scripts/release.sh b/scripts/release.sh index b0743030..11af03ee 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -9,7 +9,7 @@ if [ -z "$1" ] exit 1 fi -REPO=dadecoza/meshinfo +REPO=agessaman/meshinfo VERSION=$1 # echo $CR_PAT | docker login ghcr.io -u USERNAME --password-stdin git tag -a $VERSION -m "Version $VERSION" && git push --tags && \ diff --git a/setup_database.py b/setup_database.py new file mode 100644 index 00000000..2bff0c15 --- /dev/null +++ b/setup_database.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python3 +""" +Database Setup Script for MeshInfo-Lite + +This script creates the database and user with proper privileges for new installations. +It should be run once during initial setup. + +Usage: + python setup_database.py + +Requirements: + - MariaDB/MySQL server running + - Root access to the database server + - config.ini file with database configuration +""" + +import configparser +import mysql.connector +import logging +import sys +import os + +def setup_logging(): + """Setup basic logging for the setup script.""" + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + ) + +def check_config(): + """Check if config.ini exists and has required database settings.""" + if not os.path.exists('config.ini'): + logging.error("config.ini not found! Please create it first.") + return False + + config = configparser.ConfigParser() + config.read('config.ini') + + required_sections = ['database'] + required_keys = ['host', 'username', 'password', 'database'] + + for section in required_sections: + if section not in config: + logging.error(f"Missing [{section}] section in config.ini") + return False + + for key in required_keys: + if key not in config[section]: + logging.error(f"Missing {key} in [{section}] section of config.ini") + return False + + return True + +def test_root_connection(config): + """Test connection to database as root.""" + try: + root_password = config.get("database", "root_password", fallback="passw0rd") + db = mysql.connector.connect( + host=config["database"]["host"], + user="root", + password=root_password, + ) + db.close() + logging.info("Successfully connected to database as root") + return True + except mysql.connector.Error as e: + logging.error(f"Failed to connect as root: {e}") + logging.error("Please ensure:") + logging.error("1. MariaDB/MySQL server is running") + logging.error("2. Root password is correct in config.ini [database] root_password") + logging.error("3. Root user can connect from this host") + return False + +def create_database_and_user(config): + """Create database and user with proper privileges.""" + root_password = config.get("database", "root_password", fallback="passw0rd") + + try: + # Connect as root + db = mysql.connector.connect( + host=config["database"]["host"], + user="root", + password=root_password, + ) + + logging.info("Creating database and user...") + + # Create database + cur = db.cursor() + cur.execute(f"""CREATE DATABASE IF NOT EXISTS {config["database"]["database"]} +CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci""") + cur.close() + logging.info(f"✓ Database '{config['database']['database']}' created/verified") + + # Create user if it doesn't exist + cur = db.cursor() + cur.execute(f"""CREATE USER IF NOT EXISTS '{config["database"]["username"]}'@'%' +IDENTIFIED BY '{config["database"]["password"]}'""") + cur.close() + logging.info(f"✓ User '{config['database']['username']}' created/verified") + + # Grant all privileges on the specific database + cur = db.cursor() + cur.execute(f"""GRANT ALL PRIVILEGES ON {config["database"]["database"]}.* +TO '{config["database"]["username"]}'@'%'""") + cur.close() + logging.info(f"✓ Granted ALL PRIVILEGES on {config['database']['database']}.*") + + # Grant RELOAD privilege for query cache operations + cur = db.cursor() + cur.execute(f"""GRANT RELOAD ON *.* TO '{config["database"]["username"]}'@'%'""") + cur.close() + logging.info("✓ Granted RELOAD privilege for query cache operations") + + # Grant PROCESS privilege for monitoring + cur = db.cursor() + cur.execute(f"""GRANT PROCESS ON *.* TO '{config["database"]["username"]}'@'%'""") + cur.close() + logging.info("✓ Granted PROCESS privilege for monitoring") + + # Flush privileges to apply changes + cur = db.cursor() + cur.execute("FLUSH PRIVILEGES") + cur.close() + logging.info("✓ Privileges flushed") + + db.commit() + db.close() + + logging.info("✓ Database setup completed successfully!") + return True + + except mysql.connector.Error as e: + logging.error(f"Error creating database: {e}") + return False + +def test_user_connection(config): + """Test connection with the newly created user.""" + try: + db = mysql.connector.connect( + host=config["database"]["host"], + user=config["database"]["username"], + password=config["database"]["password"], + database=config["database"]["database"], + ) + db.close() + logging.info("✓ Successfully connected with application user") + return True + except mysql.connector.Error as e: + logging.error(f"Failed to connect with application user: {e}") + return False + +def test_privileges(config): + """Test if the user has the required privileges.""" + try: + db = mysql.connector.connect( + host=config["database"]["host"], + user=config["database"]["username"], + password=config["database"]["password"], + database=config["database"]["database"], + ) + cur = db.cursor() + + # Test RELOAD privilege + try: + cur.execute("FLUSH QUERY CACHE") + logging.info("✓ RELOAD privilege verified") + except mysql.connector.Error as e: + logging.warning(f"RELOAD privilege test failed: {e}") + + # Test PROCESS privilege + try: + cur.execute("SHOW PROCESSLIST") + logging.info("✓ PROCESS privilege verified") + except mysql.connector.Error as e: + logging.warning(f"PROCESS privilege test failed: {e}") + + cur.close() + db.close() + return True + + except mysql.connector.Error as e: + logging.error(f"Error testing privileges: {e}") + return False + +def main(): + """Main setup function.""" + setup_logging() + + logging.info("=== MeshInfo-Lite Database Setup ===") + + # Check configuration + if not check_config(): + sys.exit(1) + + config = configparser.ConfigParser() + config.read('config.ini') + + # Test root connection + if not test_root_connection(config): + sys.exit(1) + + # Create database and user + if not create_database_and_user(config): + sys.exit(1) + + # Test user connection + if not test_user_connection(config): + logging.error("Setup completed but user connection test failed") + sys.exit(1) + + # Test privileges + test_privileges(config) + + logging.info("=== Setup Complete ===") + logging.info("You can now start the MeshInfo-Lite application") + logging.info("Run: python main.py") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/setup_docker.py b/setup_docker.py new file mode 100644 index 00000000..ca38f70e --- /dev/null +++ b/setup_docker.py @@ -0,0 +1,209 @@ +#!/usr/bin/env python3 +""" +Docker Setup Script for MeshInfo-Lite + +This script sets up the database with proper privileges for Docker Compose installations. +It should be run after the Docker containers are started. + +Usage: + python setup_docker.py + +Requirements: + - Docker Compose services running + - config.ini file with database configuration +""" + +import configparser +import mysql.connector +import logging +import sys +import os +import time + +def setup_logging(): + """Setup basic logging for the setup script.""" + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + ) + +def check_config(): + """Check if config.ini exists and has required database settings.""" + if not os.path.exists('config.ini'): + logging.error("config.ini not found! Please create it first.") + return False + + config = configparser.ConfigParser() + config.read('config.ini') + + required_sections = ['database'] + required_keys = ['host', 'username', 'password', 'database'] + + for section in required_sections: + if section not in config: + logging.error(f"Missing [{section}] section in config.ini") + return False + + for key in required_keys: + if key not in config[section]: + logging.error(f"Missing {key} in [{section}] section of config.ini") + return False + + return True + +def wait_for_database(config, max_attempts=30): + """Wait for the database to become available.""" + logging.info("Waiting for database to become available...") + + for attempt in range(max_attempts): + try: + # Try to connect as root first + root_password = config.get("database", "root_password", fallback="passw0rd") + db = mysql.connector.connect( + host=config["database"]["host"], + user="root", + password=root_password, + connection_timeout=5 + ) + db.close() + logging.info("✓ Database is available") + return True + except mysql.connector.Error as e: + if attempt < max_attempts - 1: + logging.info(f"Database not ready yet (attempt {attempt + 1}/{max_attempts})...") + time.sleep(2) + else: + logging.error(f"Database failed to become available: {e}") + return False + + return False + +def setup_database_privileges(config): + """Set up database privileges for the application user.""" + root_password = config.get("database", "root_password", fallback="passw0rd") + + try: + # Connect as root + db = mysql.connector.connect( + host=config["database"]["host"], + user="root", + password=root_password, + ) + + logging.info("Setting up database privileges...") + + # Grant RELOAD privilege for query cache operations + cur = db.cursor() + cur.execute(f"""GRANT RELOAD ON *.* TO '{config["database"]["username"]}'@'%'""") + cur.close() + logging.info("✓ Granted RELOAD privilege for query cache operations") + + # Grant PROCESS privilege for monitoring + cur = db.cursor() + cur.execute(f"""GRANT PROCESS ON *.* TO '{config["database"]["username"]}'@'%'""") + cur.close() + logging.info("✓ Granted PROCESS privilege for monitoring") + + # Flush privileges to apply changes + cur = db.cursor() + cur.execute("FLUSH PRIVILEGES") + cur.close() + logging.info("✓ Privileges flushed") + + db.commit() + db.close() + + logging.info("✓ Database privileges setup completed successfully!") + return True + + except mysql.connector.Error as e: + logging.error(f"Error setting up database privileges: {e}") + return False + +def test_user_connection(config): + """Test connection with the application user.""" + try: + db = mysql.connector.connect( + host=config["database"]["host"], + user=config["database"]["username"], + password=config["database"]["password"], + database=config["database"]["database"], + ) + db.close() + logging.info("✓ Successfully connected with application user") + return True + except mysql.connector.Error as e: + logging.error(f"Failed to connect with application user: {e}") + return False + +def test_privileges(config): + """Test if the user has the required privileges.""" + try: + db = mysql.connector.connect( + host=config["database"]["host"], + user=config["database"]["username"], + password=config["database"]["password"], + database=config["database"]["database"], + ) + cur = db.cursor() + + # Test RELOAD privilege + try: + cur.execute("FLUSH QUERY CACHE") + logging.info("✓ RELOAD privilege verified") + except mysql.connector.Error as e: + logging.warning(f"RELOAD privilege test failed: {e}") + + # Test PROCESS privilege + try: + cur.execute("SHOW PROCESSLIST") + logging.info("✓ PROCESS privilege verified") + except mysql.connector.Error as e: + logging.warning(f"PROCESS privilege test failed: {e}") + + cur.close() + db.close() + return True + + except mysql.connector.Error as e: + logging.error(f"Error testing privileges: {e}") + return False + +def main(): + """Main setup function.""" + setup_logging() + + logging.info("=== MeshInfo-Lite Docker Setup ===") + + # Check configuration + if not check_config(): + sys.exit(1) + + config = configparser.ConfigParser() + config.read('config.ini') + + # Wait for database to become available + if not wait_for_database(config): + logging.error("Database is not available. Please ensure Docker Compose services are running.") + logging.error("Run: docker-compose up -d") + sys.exit(1) + + # Set up database privileges + if not setup_database_privileges(config): + sys.exit(1) + + # Test user connection + if not test_user_connection(config): + logging.error("Setup completed but user connection test failed") + sys.exit(1) + + # Test privileges + test_privileges(config) + + logging.info("=== Docker Setup Complete ===") + logging.info("The MeshInfo-Lite application should now have full functionality") + logging.info("Check the application logs for any remaining issues") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/templates/account.html.j2 b/templates/account.html.j2 new file mode 100644 index 00000000..62cc7c1e --- /dev/null +++ b/templates/account.html.j2 @@ -0,0 +1,275 @@ +{% set this_page = "account" %} +{% extends "layout.html.j2" %} + +{% block title %}Account Settings | MeshInfo{% endblock %} + +{% block og_title %}Account Settings | {{ config['mesh']['name'] }}{% endblock %} +{% block og_description %}Manage your account settings, change password, and unlink nodes in the {{ config['mesh']['short_name'] }} mesh network.{% endblock %} + +{% block head %} + {{ super() }} + +{% endblock %} + +{% block content %} +
+
Account Settings
+ +
+ +
+
+
+
Change Password
+
+
+
+
+ + +
+
+ + + Password must be at least 6 characters long. +
+
+ + +
+ +
+ +
+
+
+ + +
+
+
+
Linked Nodes
+
+
+ {% if nodes %} +

You have {{ nodes|length }} linked node(s).

+ {% for id, node in nodes.items()|sort(attribute='1.short_name') %} +
+
+
+ + + {{ node.short_name }} + + +
+ {{ node.long_name }} +
+ +
+
+ {% endfor %} + {% else %} +

You don't have any linked nodes.

+ Link a Node + {% endif %} + +
+
+
+
+
+ + + + + +{% endblock %} + diff --git a/templates/allnodes.html.j2 b/templates/allnodes.html.j2 new file mode 100644 index 00000000..26b372e0 --- /dev/null +++ b/templates/allnodes.html.j2 @@ -0,0 +1,44 @@ +{% set this_page = "allnodes" %} +{% extends "layout.html.j2" %} + +{% block title %}All Nodes | MeshInfo{% endblock %} + +{% block og_title %}All Nodes | {{ config['mesh']['name'] }}{% endblock %} +{% block og_description %}Complete list of all nodes in the {{ config['mesh']['short_name'] }} mesh network, including offline nodes. Search, filter, and view detailed node information.{% endblock %} +{% block og_image_width %}1200{% endblock %} +{% block og_image_height %}630{% endblock %} +{% block twitter_title %}All Nodes | {{ config['mesh']['name'] }}{% endblock %} +{% block twitter_description %}Complete list of all nodes in the {{ config['mesh']['short_name'] }} mesh network, including offline nodes. Search, filter, and view detailed node information.{% endblock %} +{% block twitter_image_width %}1200{% endblock %} +{% block twitter_image_height %}630{% endblock %} + +{% block head %} + {{ super() }} +{% endblock %} + +{% block content %} +{% if latest and latest.id is not none %} +{% set lnodeid = utils.convert_node_id_from_int_to_hex(latest.id) %} +{% if lnodeid and lnodeid in nodes %} +{% set lnode = nodes[lnodeid] %} +
+ 📌 Welcome to our newest node, {{ lnode.long_name }} ({{ lnode.short_name }}).👋 +
+{% endif %} +{% endif %} + +{% if hw_model_filter and hw_name_filter %} +
+ 🔧 Showing nodes with hardware model: {{ hw_name_filter }} + Clear Filter +
+{% endif %} + +
+
+
📡 All Nodes (Including Offline)
+
+ {% include 'node_search.html.j2' %} + {% include 'node_table.html.j2' %} +
+{% endblock %} \ No newline at end of file diff --git a/templates/chat.html.j2 b/templates/chat.html.j2 index b248786a..9471c659 100644 --- a/templates/chat.html.j2 +++ b/templates/chat.html.j2 @@ -95,7 +95,7 @@ padding: 3px 8px; display: inline-block;" title="SNR: {{ reception.rx_snr }}dB, RSSI: {{ reception.rx_rssi }}dBm{% if reception.hop_start is not none and reception.hop_limit is not none %}, Hops taken: {{ reception.hop_start - reception.hop_limit }} of {{ reception.hop_start }}){% endif %}"> - {{ nodes[node_id].short_name }} + {{ nodes[node_id].short_name|replace('"', '"') }} {% endif %} diff --git a/templates/chat2.html.j2 b/templates/chat2.html.j2 index 31a8d4e7..48393983 100644 --- a/templates/chat2.html.j2 +++ b/templates/chat2.html.j2 @@ -3,19 +3,52 @@ {% block title %}Chat | MeshInfo{% endblock %} +{% block og_title %}Chat | {{ config['mesh']['name'] }}{% endblock %} +{% block og_description %}Real-time chat messages from the {{ config['mesh']['short_name'] }} mesh network. View conversations, message propagation, and network activity.{% endblock %} +{% block og_image_width %}1200{% endblock %} +{% block og_image_height %}630{% endblock %} +{% block twitter_title %}Chat | {{ config['mesh']['name'] }}{% endblock %} +{% block twitter_description %}Real-time chat messages from the {{ config['mesh']['short_name'] }} mesh network. View conversations, message propagation, and network activity.{% endblock %} +{% block twitter_image_width %}1200{% endblock %} +{% block twitter_image_height %}630{% endblock %} + {% block head %} + {{ super() }} + {% endblock %} @@ -52,11 +475,39 @@ window.addEventListener('load', function() {
🗨️ Chat
- Showing {{ pagination.start_item }} to {{ pagination.end_item }} of {{ pagination.total }} messages + Showing {{ (pagination.page - 1) * pagination.per_page + 1 }} to {{ pagination.page * pagination.per_page if pagination.page * pagination.per_page < pagination.total else pagination.total }} of {{ pagination.total }} messages +
+
+
{% if pagination.has_prev %} - + {% else %} @@ -66,7 +517,7 @@ window.addEventListener('load', function() { {% endif %} {% if pagination.has_next %} - + {% else %} @@ -80,56 +531,189 @@ window.addEventListener('load', function() {
{% for message in chat %} -
+
-
- - {% if message.from in nodes %} - - {{ nodes[message.from].long_name }} ({{ nodes[message.from].short_name }}) - +
{# Make header a flex container #} + {# Group 1: Sender and optional Recipient #} +
{# Container for left-aligned items #} + {# Sender Span (Always Shown) #} + + {% if message.from in nodes %} + {% set sender = nodes[message.from] %} +
+ + {# Hardware Info #} + {% if sender.hw_model %} + HW: {{ sender.hw_model_name if sender.hw_model_name else 'Unknown' }}
+ {% endif %} + {% if sender.firmware_version %} + FW: {{ sender.firmware_version }}
+ {% endif %} + + {# Role Display #} + {% if sender.role is not none %} + Role: {{ sender.role_name }}
+ {% endif %} + + {# Owner #} + {% if sender.owner_username %} + Owner: {{ sender.owner_username }}
+ {% endif %} + + {# Location #} + {% if sender.position %} + {% if sender.position.geocoded %} + Loc: {{ sender.position.geocoded }}
+ {% elif sender.position.latitude is not none and sender.position.longitude is not none %} + Lat: {{ '%.5f'|format(sender.position.latitude) }}, Lon: {{ '%.5f'|format(sender.position.longitude) }} + {% if sender.position.altitude is not none %} + Alt: {{ sender.position.altitude }}m + {% endif %}
+ {% endif %} + {% endif %} + + {# Telemetry Status #} + {% if sender.telemetry %} + {% if sender.telemetry.battery_level is not none %} + Batt: {{ sender.telemetry.battery_level }}% + {% if sender.telemetry.voltage is not none %} + ({{ '%.2f'|format(sender.telemetry.voltage) }}V) + {% endif %}
+ {% elif sender.telemetry.voltage is not none %} + Voltage: {{ '%.2f'|format(sender.telemetry.voltage) }}V
+ {% endif %} + {# Optional: Environmentals #} + {% if sender.telemetry.temperature is not none %} Temp: {{ '%.1f'|format(sender.telemetry.temperature) }}°C {% endif %} + {% if sender.telemetry.relative_humidity is not none %} RH: {{ '%.1f'|format(sender.telemetry.relative_humidity) }}% {% endif %} + {% if sender.telemetry.barometric_pressure is not none %} Pres: {{ '%.1f'|format(sender.telemetry.barometric_pressure / 100) }}hPa {% endif %} + {% if sender.telemetry.temperature is not none or sender.telemetry.relative_humidity is not none or sender.telemetry.barometric_pressure is not none %}
{% endif %} + {% endif %} + + {# Last Seen #} + Last Seen: {{ time_ago(sender.ts_seen) }} +
"> + {{ sender.long_name|replace('"', '"') }} ({{ sender.short_name|replace('"', '"') }}) + + {% else %} + {{ message.from }} + {% endif %} + + + {# Direct Message Indicator and Recipient (Conditional) #} + {% if message.to != 'ffffffff' and message.to in nodes %} + + + + +
+ + {# Hardware Info #} + {% if nodes[message.to].hw_model %} + HW: {{ nodes[message.to].hw_model_name if nodes[message.to].hw_model_name else 'Unknown' }}
+ {% endif %} + {% if nodes[message.to].firmware_version %} + FW: {{ nodes[message.to].firmware_version }}
+ {% endif %} + + {# Role Display #} + {% if nodes[message.to].role is not none %} + Role: + {% set role_val = nodes[message.to].role %} + {% if role_val == 0 %}Client + {% elif role_val == 1 %}Client Mute + {% elif role_val == 2 %}Router + {% elif role_val == 3 %}Router Client + {% elif role_val == 4 %}Repeater + {% elif role_val == 5 %}Tracker + {% elif role_val == 6 %}Sensor + {% elif role_val == 7 %}TAK + {% elif role_val == 8 %}Client Hidden + {% elif role_val == 9 %}Lost and Found + {% elif role_val == 10 %}TAK Tracker + {% elif role_val == 11 %}Router Late + {% elif role_val == 12 %}Client Base + {% else %}Unknown ({{ role_val }}) + {% endif %}
+ {% endif %} + + {# Owner #} + {% if nodes[message.to].owner_username %} + Owner: {{ nodes[message.to].owner_username }}
+ {% elif nodes[message.to].owner %} + Owner: {{ nodes[message.to].owner }}
{# Fallback to email #} + {% endif %} + + {# Location #} + {% if nodes[message.to].position %} + {% if nodes[message.to].position.geocoded %} + Loc: {{ nodes[message.to].position.geocoded }}
+ {% elif nodes[message.to].position.latitude is not none and nodes[message.to].position.longitude is not none %} + Lat: {{ '%.5f'|format(nodes[message.to].position.latitude) }}, Lon: {{ '%.5f'|format(nodes[message.to].position.longitude) }} + {% if nodes[message.to].position.altitude is not none %} + Alt: {{ nodes[message.to].position.altitude }}m + {% endif %}
+ {% endif %} + {% endif %} + + {# Telemetry Status #} + {% if nodes[message.to].telemetry %} + {% if nodes[message.to].telemetry.battery_level is not none %} + Batt: {{ nodes[message.to].telemetry.battery_level }}% + {% if nodes[message.to].telemetry.voltage is not none %} + ({{ '%.2f'|format(nodes[message.to].telemetry.voltage) }}V) + {% endif %}
+ {% elif nodes[message.to].telemetry.voltage is not none %} + Voltage: {{ '%.2f'|format(nodes[message.to].telemetry.voltage) }}V
+ {% endif %} + {# Optional: Environmentals #} + {% if nodes[message.to].telemetry.temperature is not none %} Temp: {{ '%.1f'|format(nodes[message.to].telemetry.temperature) }}°C {% endif %} + {% if nodes[message.to].telemetry.relative_humidity is not none %} RH: {{ '%.1f'|format(nodes[message.to].telemetry.relative_humidity) }}% {% endif %} + {% if nodes[message.to].telemetry.barometric_pressure is not none %} Pres: {{ '%.1f'|format(nodes[message.to].telemetry.barometric_pressure / 100) }}hPa {% endif %} + {% if nodes[message.to].telemetry.temperature is not none or nodes[message.to].telemetry.relative_humidity is not none or nodes[message.to].telemetry.barometric_pressure is not none %}
{% endif %} + {% endif %} + + {# Last Seen #} + Last Seen: {{ time_ago(nodes[message.to].ts_seen) }} +
"> + {{ nodes[message.to].long_name|replace('"', '"') }} ({{ nodes[message.to].short_name|replace('"', '"') }}) + +
+ {% endif %} + {# End DM Indicator #} +
+ + {# Group 2: Channel, Timestamp, Map Icon #} +
{# Container for right-aligned items #} + + {{ time_ago(message.ts_created) }} + + {{ utils.get_channel_name(message.channel, use_short_names=True) }} + {% if message.from in nodes and nodes[message.from].position %} + + + {% else %} - {{ message.from }} + {% endif %} - - Ch {{ message.channel }} - - {{ time_ago(message.ts_created) }} - - {% if message.from in nodes and nodes[message.from].position and message.receptions %} - - - - {% else %} - - {% endif %} +
{{ message.text }}
{% if message.receptions %} -
+
-
+ "> - {{ node.short_name }} + {{ node.short_name|replace('"', '"') }} {% endif %} {% endfor %} @@ -160,7 +748,7 @@ window.addEventListener('load', function() { {% set node = nodes[node_id] %}
@@ -189,7 +777,7 @@ window.addEventListener('load', function() { {% if pagination.has_next %}
-
@@ -200,5 +788,21 @@ window.addEventListener('load', function() {
{% endif %} + +
-{% endblock %} \ No newline at end of file +{% endblock %} + + + + +{% macro format_duration(seconds) %} + {%- set s = seconds|int %} + {%- set m = s // 60 %} + {%- set s = s % 60 %} + {%- if m > 0 %}{{ m }}m {% endif %}{{ s }}s +{% endmacro %} \ No newline at end of file diff --git a/templates/graph.html.j2 b/templates/graph.html.j2 index 77514494..4077c976 100644 --- a/templates/graph.html.j2 +++ b/templates/graph.html.j2 @@ -2,6 +2,10 @@ {% extends "layout.html.j2" %} {% block title %}Graph | MeshInfo{% endblock %} +{% block og_title %}Network Graph | {{ config['mesh']['name'] }}{% endblock %} +{% block og_description %}Visualize the full {{ config['mesh']['short_name'] }} mesh network: see node connectivity, links, and network structure in real time.{% endblock %} +{% block twitter_title %}Network Graph | {{ config['mesh']['name'] }}{% endblock %} +{% block twitter_description %}Visualize the full {{ config['mesh']['short_name'] }} mesh network: see node connectivity, links, and network structure in real time.{% endblock %} {% block content %}
{{ this_page.title() }}
@@ -11,21 +15,180 @@ -{% endblock %} +{% endblock %} \ No newline at end of file diff --git a/templates/graph2.html.j2 b/templates/graph2.html.j2 new file mode 100644 index 00000000..f84c762a --- /dev/null +++ b/templates/graph2.html.j2 @@ -0,0 +1,292 @@ +{% set this_page = "graph" %} +{% extends "layout.html.j2" %} + +{% block title %}Graph | MeshInfo{% endblock %} +{% block content %} +
+
{{ this_page.title() }}
+ + +

+ Connections shown: Blue solid lines = Neighbor Info module messages, + Green dashed lines = LoRa message reception (Zero Hop). + Merged view shows both types. +

+
+
+ + + + + + + + + + +{% endblock %} \ No newline at end of file diff --git a/templates/graph3.html.j2 b/templates/graph3.html.j2 new file mode 100644 index 00000000..ff289b8b --- /dev/null +++ b/templates/graph3.html.j2 @@ -0,0 +1,335 @@ +{% set this_page = "graph" %} +{% extends "layout.html.j2" %} + +{% block title %}Graph | MeshInfo{% endblock %} +{% block content %} +
+
{{ this_page.title() }}
+ + +

+ Connections shown: Blue solid lines = Neighbor Info module messages, + Green dashed lines = LoRa message reception (Zero Hop). + Merged view shows both types. +

+
+
+ + + + + + + + + + + + + + +{% endblock %} \ No newline at end of file diff --git a/templates/graph4.html.j2 b/templates/graph4.html.j2 new file mode 100644 index 00000000..16badfbd --- /dev/null +++ b/templates/graph4.html.j2 @@ -0,0 +1,343 @@ +{% set this_page = "graph" %} +{% extends "layout.html.j2" %} + +{% block title %}Graph | MeshInfo{% endblock %} + +{% block og_title %}Network Graph | {{ config['mesh']['name'] }}{% endblock %} +{% block og_description %}Interactive network graph showing node connections and relationships in the {{ config['mesh']['short_name'] }} mesh network. View neighbor info and zero-hop connections.{% endblock %} +{% block og_image_width %}1200{% endblock %} +{% block og_image_height %}630{% endblock %} +{% block twitter_title %}Network Graph | {{ config['mesh']['name'] }}{% endblock %} +{% block twitter_description %}Interactive network graph showing node connections and relationships in the {{ config['mesh']['short_name'] }} mesh network. View neighbor info and zero-hop connections.{% endblock %} +{% block twitter_image_width %}1200{% endblock %} +{% block twitter_image_height %}630{% endblock %} + +{% block head %} + {{ super() }} +{% endblock %} + +{% block content %} +
+
{{ this_page.title() }}
+ + +

+ Connections shown: Blue solid lines = Neighbor Info module messages, + Green dashed lines = LoRa message reception (Zero Hop). + Merged view shows both types. +

+
+
+ + + + + + + + + + + + + +{% endblock %} \ No newline at end of file diff --git a/templates/index.html.j2 b/templates/index.html.j2 index 50c34581..75145d3c 100644 --- a/templates/index.html.j2 +++ b/templates/index.html.j2 @@ -2,14 +2,23 @@ {% block title %}{{ config["mesh"]["name"] }}{% endblock %} +{% block og_title %}{{ mesh_name }} - Mesh Network Overview{% endblock %} +{% block og_description %}Welcome to MeshInfo for the {{ mesh_region }} region! View active nodes, network coverage, and community info for the {{ mesh_short_name }} Meshtastic mesh network.{% endblock %} +{% block twitter_title %}{{ mesh_name }} - Mesh Network Overview{% endblock %} +{% block twitter_description %}Welcome to MeshInfo for the {{ mesh_region }} region! View active nodes, network coverage, and community info for the {{ mesh_short_name }} Meshtastic mesh network.{% endblock %} + {% block content %} +{% set mesh_name = config['mesh'].get('name', 'Mesh Network') or 'Mesh Network' %} +{% set mesh_region = config['mesh'].get('region', 'your region') or 'your region' %} +{% set mesh_short_name = config['mesh'].get('short_name', 'Mesh') or 'Mesh' %} +
- logo -

{{ config["mesh"]["name"] }}

+ logo +

{{ mesh_name }}

- This site provides information on {{ active_nodes|length }} active nodes in the Puget Sound Meshtastic network. + This site provides information on {{ active_nodes|length }} active nodes in the {{ mesh_region }} region ({{ mesh_short_name }}) network.

Last updated: {{ format_timestamp(timestamp.timestamp()) }}

@@ -23,7 +32,7 @@

- MeshInfo is a web-based tool which collects and visualizes data from Meshtastic nodes across the Puget Sound region. It provides visibility into our PugetMesh community mesh network, helping both new and experienced members understand network coverage, identify growth opportunities, and troubleshoot connectivity issues. + MeshInfo is a web-based tool which collects and visualizes data from Meshtastic nodes across the {{ mesh_region }} region. It provides visibility into your community mesh network, helping both new and experienced members understand network coverage, identify growth opportunities, and troubleshoot connectivity issues.

Through MeshInfo, you can view node locations on maps, check detailed node information including telemetry data, visualize node connections, and search for specific nodes by ID or name. @@ -53,7 +62,7 @@ Enable Position Sharing (optional but recommended): Go to Settings > Channels > LongFast and set "Position Enabled" to TRUE. Set "Uplink Enabled" to TRUE. This allows your node's position and recieved messages to be shared with other nodes and the MQTT server.

  • - Set Position Precision: Set position precision slider to 1194ft (364m). This is the most accurate setting that will still show up on maps. You can use higher precision (lower distance values), but they will be rounded to this level for privacy reasons. Settings with lower precision (larger distances) will appear as configured. + Set Position Precision: Set the position precision slider to a value you are comfortable with sharing. The map and node information pages will represent your position with the accuracy that you select for public channels and MapInfo packets.
  • Configure Your Position: Either use GPS (if your device has one) or set a fixed position via Settings > Position. @@ -61,19 +70,19 @@

    Note: Your node will still appear in MeshInfo without location data, but distance and direction information to other nodes won't be available.

    -

    PugetMesh MQTT Connection (recommended):

    -

    Important: Users who don't connect directly to the PugetMesh MQTT server won't be sharing information with other users in the region. Connecting your node to our MQTT server allows your data to contribute to the regional mesh network and helps everyone better understand network coverage.

    -

    To connect your node directly to the PugetMesh MQTT server, configure these settings in Settings > Modules > MQTT:

    +

    MQTT Connection (recommended):

    +

    Important: Users who connect to the MQTT can share what their node is hearing on the LoRa network. Connecting your node to the MQTT server allows your data to contribute to analysis of the regional mesh network, and helps everyone better understand network coverage.

    +

    To connect your node to the {{ mesh_short_name }} MQTT server, configure these settings in Settings > Modules > MQTT:

    • - Address: mqtt.davekeogh.com + Address: {{ config['mqtt']['broker'] }}
    • - Username: meshdev + Username: {{ config['mqtt']['username'] }}
    • - Password: large4cats + Password: {{ config['mqtt']['password'] }}
    • Encryption Enabled: Yes @@ -85,7 +94,7 @@ TLS Enabled: No
    • - Root topic: msh/US + Root topic: {{ config['mqtt']['topic'].rstrip('/#') }}
    @@ -106,11 +115,15 @@ Benefits: When enabled across multiple nodes, this helps build a comprehensive graph of the mesh network, showing how nodes are connected and the quality of links between them (displayed as SNR values).
  • - + {% if config['mesh'].get('config_url') %}

    - For complete setup instructions, visit the PugetMesh configuration page: + For complete setup instructions, visit the {{ mesh_short_name }} configuration page:

    - View Full Configuration Guide + + + {% endif %}
    @@ -120,15 +133,22 @@
    -

    Join the PugetMesh Community

    +

    Join the {{ mesh_short_name }} Community

    - PugetMesh is a volunteer-driven group supporting mesh networks in the Puget Sound region. We focus on building a resilient off-grid communication network using Meshtastic technology. + {{ mesh_short_name }} is a volunteer-driven group supporting mesh networks in your region. We focus on building a resilient off-grid communication network using Meshtastic technology.

    - Join Our Discord - Visit PugetMesh.org + {% if config['mesh'].get('discord_url') %} + + + + + Join Our Discord + + {% endif %} + Visit {{ mesh_short_name }} Website
    diff --git a/templates/layout.html.j2 b/templates/layout.html.j2 index 159e68ff..3df7541d 100644 --- a/templates/layout.html.j2 +++ b/templates/layout.html.j2 @@ -4,17 +4,34 @@ {% macro snr_badge(snr) %} {% if snr is not none %} {# Check if snr exists and is not None #} {% if snr > 0 %} - {{ "%.1f"|format(snr) }} dB + {{ "%.1f"|format(snr) }} dB {% elif snr > -5 %} - {{ "%.1f"|format(snr) }} dB + {{ "%.1f"|format(snr) }} dB {% elif snr > -10 %} - {{ "%.1f"|format(snr) }} dB + {{ "%.1f"|format(snr) }} dB {% else %} - {{ "%.1f"|format(snr) }} dB + {{ "%.1f"|format(snr) }} dB {% endif %} {% endif %} {% endmacro %} +{% macro page_icon(page_name) %} + {% set icon_map = { + 'chat': 'bi-chat-dots', + 'graph': 'bi-diagram-3', + 'map': 'bi-geo-alt', + 'nodes': 'bi-hdd-network', + 'neighbors': 'bi-people', + 'telemetry': 'bi-graph-up', + 'traceroutes': 'bi-arrow-repeat', + 'logs': 'bi-journal-text', + 'metrics': 'bi-speedometer2', + 'my nodes': 'bi-person-check' + } %} + {% set icon = icon_map.get(page_name.lower(), 'bi-circle') %} + +{% endmacro %} + {% block title %}{% endblock %} @@ -22,9 +39,45 @@ - + + + + {% block head %} + + + + + + + + + + - {% block head %}{% endblock %} + + + + + + + {% endblock %} @@ -36,35 +89,58 @@ diff --git a/templates/login.html.j2 b/templates/login.html.j2 index d7241a06..8e883006 100644 --- a/templates/login.html.j2 +++ b/templates/login.html.j2 @@ -3,14 +3,34 @@ {% block title %}Login | MeshInfo{% endblock %} +{% block head %} + +{% endblock %} + {% block content %}

    Login

    - - + + + You can login with either your email address or username.
    diff --git a/templates/logs.html.j2 b/templates/logs.html.j2 index 0cf68fff..cc215baf 100644 --- a/templates/logs.html.j2 +++ b/templates/logs.html.j2 @@ -3,13 +3,81 @@ {% block title %}MQTT | MeshInfo{% endblock %} +{% block og_title %}Logs | {{ config['mesh']['name'] }}{% endblock %} +{% block og_description %}Browse message and event logs for the {{ config['mesh']['short_name'] }} mesh network. Track activity, errors, and network events.{% endblock %} +{% block og_image_width %}1200{% endblock %} +{% block og_image_height %}630{% endblock %} +{% block twitter_title %}Logs | {{ config['mesh']['name'] }}{% endblock %} +{% block twitter_description %}Browse message and event logs for the {{ config['mesh']['short_name'] }} mesh network. Track activity, errors, and network events.{% endblock %} +{% block twitter_image_width %}1200{% endblock %} +{% block twitter_image_height %}630{% endblock %} + +{% block head %} + {{ super() }} + +{% endblock %} + {% block content %}
    -
    MQTT Messages
    +
    +
    📋 MQTT Messages
    +

    All messages received by MQTT. Only the messages received since this server was last restarted are shown.

    + + +
    +
    +
    + +
    + + +
    +
    + + +
    +
    + + +
    +
    + Showing all messages + +
    + +
    +
    +
    +
    @@ -19,9 +87,9 @@ - + {% for message in logs %} - +
    Message
    {% if message.ts_created %} {{ format_timestamp(message.ts_created) }} @@ -37,4 +105,276 @@
    + + {% endblock %} \ No newline at end of file diff --git a/templates/map.html.j2 b/templates/map.html.j2 index f77ce806..af39c867 100644 --- a/templates/map.html.j2 +++ b/templates/map.html.j2 @@ -3,17 +3,117 @@ {% block title %}Map | MeshInfo{% endblock %} {% block head %} - - + {% include "map_dependencies.html.j2" %} {% endblock %} +{% block og_title %}Live Node Map | {{ config['mesh']['name'] }}{% endblock %} +{% block og_description %}Explore the live map of all nodes in the {{ config['mesh']['short_name'] }} Meshtastic mesh network. See node locations, coverage, and activity in real time.{% endblock %} +{% block twitter_title %}Live Node Map | {{ config['mesh']['name'] }}{% endblock %} +{% block twitter_description %}Explore the live map of all nodes in the {{ config['mesh']['short_name'] }} Meshtastic mesh network. See node locations, coverage, and activity in real time.{% endblock %} + {% block content %}
    -
    + + {# Settings Toggle Button #} + + + {# Settings Panel - Hidden by default #} + + + {# Node Detail Popup #} + -
    + + {# Node Info Panel - Hidden by default #} +