diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..b09eeac --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,159 @@ +# Changelog + +All notable changes to the GPU Tracker project. + +## [1.1.0] - 2026-01-31 + +### Added + +#### Command-Line Interface +- **Sampling interval configuration** (`-interval` flag): Configure custom sampling rates from 1 second to any duration +- **Database path customization** (`-db` flag): Specify custom location for SQLite database +- **Version information** (`-version` flag): Display application version + +#### Operating Modes +- **One-shot mode** (`-once` flag): Sample GPU state once and exit without starting TUI +- **Continuous monitoring mode** (`-continuous` flag): Background sampling with automatic database saves +- **User listing mode** (`-list-users` flag): Quick summary of GPU users and their memory consumption + +#### Export Functionality +- **JSON export** (`-export json`): Export complete snapshot data in JSON format +- **CSV export** (`-export csv`): Export data in CSV format for spreadsheet analysis +- **Output file specification** (`-output` flag): Direct export output to file or stdout +- Export includes full GPU metrics, process information, and timestamps + +#### Alert System +- **Temperature threshold alerts** (`-max-temp` flag): Configurable temperature warnings (default: 90°C) +- **Memory threshold alerts** (`-max-mem` flag): Configurable memory utilization warnings (default: 95%) +- Visual alert indicators in TUI with red highlights +- Alert messages in stderr for continuous and one-shot modes +- Emoji indicators (⚠️) for better visibility + +#### TUI Enhancements +- **User filtering** (key: `f`): Cycle through users to focus on specific user activity +- **GPU filtering** (key: `g`): Filter view by specific GPU in multi-GPU systems +- **Memory sorting** (key: `m`): Toggle sorting of processes by memory usage +- **Clear filters** (key: `c`): Reset all active filters to full view +- **Active filter indicators**: Visual display of currently applied filters in header +- **Highlighted filtered items**: Active filters shown with arrow indicators (►) + +#### Configuration System +- New `Config` struct for passing configuration to TUI +- `NewWithConfig()` function for advanced TUI initialization +- Backward compatible with existing `New()` function + +#### Documentation +- **FEATURES.md**: Comprehensive feature documentation with examples +- **QUICKSTART.md**: Quick reference guide for common operations +- **CHANGELOG.md**: Version history and changes +- Enhanced README.md with usage examples and integration guides +- Advanced use case examples (monitoring, alerting, cron jobs) +- Troubleshooting section expanded + +### Changed +- Main function refactored to support multiple operating modes +- TUI model enhanced with filtering and configuration support +- View rendering updated to use filtered snapshots +- Help text expanded with new keyboard shortcuts +- Sample interval now configurable (was hardcoded to 5 seconds) +- Export functions integrated directly in main package + +### Improved +- Better separation of concerns (sampling, storage, display, export) +- More flexible architecture for future extensions +- Enhanced error handling in export functions +- Clearer status messages in TUI +- More informative help overlay +- Better user experience with filtering indicators + +### Technical Details +- Added `encoding/json` and `encoding/csv` imports +- Added `flag` package for CLI argument parsing +- Enhanced model struct with filter fields +- New helper functions: `getUniqueUsers()`, `getFilteredSnapshot()` +- Alert checking logic separated into `checkAlerts()` function +- Export logic in `exportToJSON()` and `exportToCSV()` functions + +### Backward Compatibility +- All existing features work unchanged +- Database format unchanged - existing databases fully compatible +- Default behavior preserved when no flags specified +- Existing keyboard shortcuts unchanged +- API compatible with version 1.0.x + +## [1.0.0] - Initial Release + +### Features +- Live GPU monitoring via nvidia-smi +- Per-user GPU memory tracking +- Historical snapshot storage in SQLite +- Interactive Terminal UI (TUI) using Bubble Tea +- Auto-recording with 5-second intervals +- Manual snapshot saving +- History browsing by date and snapshot +- GPU utilization and temperature display +- Process tracking with PID to user mapping +- Beautiful lipgloss-styled interface +- Help overlay with keyboard shortcuts + +### Core Components +- `/cmd/gpu-tracker/main.go`: Application entry point +- `/internal/sampler/nvidia.go`: NVIDIA GPU sampling logic +- `/internal/store/store.go`: SQLite database operations +- `/internal/tui/`: Terminal UI components +- `/internal/types/types.go`: Data structures +- `/internal/util/proc_unix.go`: Process to user mapping + +### Initial Keyboard Shortcuts +- `a`: Toggle auto-recording +- `r`: Refresh snapshot +- `s`: Save snapshot +- `h`: Toggle history mode +- `←/→`: Navigate snapshots +- `↑/↓`: Navigate days +- `t`: Jump to today +- `q`: Quit +- `?`: Toggle help + +### System Requirements +- Go 1.21+ +- Linux OS +- NVIDIA GPU with drivers +- nvidia-smi utility +- gcc for CGO (SQLite) + +--- + +## Version Numbering + +This project follows [Semantic Versioning](https://semver.org/): +- MAJOR version for incompatible API changes +- MINOR version for added functionality (backward compatible) +- PATCH version for backward compatible bug fixes + +## Upgrade Path + +### From 1.0.x to 1.1.0 +No special steps required: +1. Replace binary with new version +2. All existing data preserved +3. New features available via flags and key shortcuts +4. No configuration changes needed + +## Future Plans + +### Planned for 1.2.0 +- AMD GPU support (ROCm) +- Web dashboard option +- Prometheus exporter +- Email notification system +- Historical data analysis tools +- Multi-host aggregation + +### Under Consideration +- Intel GPU support +- Remote monitoring +- REST API server mode +- Grafana dashboard templates +- Docker container deployment +- Configuration file support diff --git a/FEATURES.md b/FEATURES.md new file mode 100644 index 0000000..48603a5 --- /dev/null +++ b/FEATURES.md @@ -0,0 +1,269 @@ +# GPU Tracker - New Features Summary + +This document describes all the new features and options added to the GPU tracker application. + +## Version 1.1.0 - New Features + +### 1. Command-Line Flags + +The application now supports comprehensive command-line configuration: + +#### Basic Options +- **`-interval `**: Set custom sampling interval (default: 5 seconds) + - Example: `./gpuwatch -interval 10` + +- **`-db `**: Specify custom database location + - Example: `./gpuwatch -db /custom/path/gpuwatch.db` + +- **`-version`**: Display version information + - Example: `./gpuwatch -version` + +#### Operation Modes +- **`-once`**: Sample once and exit without starting the TUI + - Use case: Quick status check or scripting + - Example: `./gpuwatch -once` + +- **`-continuous`**: Continuous background monitoring mode + - Samples at regular intervals and saves to database + - No TUI, runs until stopped with Ctrl+C + - Example: `./gpuwatch -continuous -interval 30` + +- **`-list-users`**: List all users currently using GPUs + - Quick summary of user memory consumption + - Example: `./gpuwatch -list-users` + +#### Export Options +- **`-export `**: Export snapshot data (formats: `json`, `csv`) + - JSON: Full structured data export + - CSV: Tabular format for spreadsheets + - Example: `./gpuwatch -export json` + +- **`-output `**: Specify output file for exports + - If omitted, prints to stdout + - Example: `./gpuwatch -export csv -output report.csv` + +#### Alert Thresholds +- **`-max-temp `**: GPU temperature alert threshold (default: 90°C) + - Visual alerts in TUI when exceeded + - Example: `./gpuwatch -max-temp 85` + +- **`-max-mem `**: Memory utilization alert threshold (default: 95%) + - Visual alerts in TUI when exceeded + - Example: `./gpuwatch -max-mem 90` + +### 2. Export Functionality + +#### JSON Export +Exports complete snapshot data in JSON format including: +- Full GPU information (utilization, memory, temperature, power) +- Process details (PID, name, user, memory usage) +- Timestamp information + +**Example:** +```bash +./gpuwatch -export json -output snapshot.json +``` + +**Use cases:** +- Integration with monitoring systems +- Data analysis with jq or Python +- API consumption + +#### CSV Export +Exports data in CSV format suitable for spreadsheets: +- Columns: Timestamp, GPU Index, GPU Name, Utilization %, Memory %, Temperature, Power, PID, Process, User, Memory MB +- Easy to import into Excel, Google Sheets, or process with awk/sed + +**Example:** +```bash +./gpuwatch -export csv -output report.csv +``` + +### 3. TUI Filtering and Display Options + +#### Filter by User (Key: `f`) +- Cycle through users to filter view by specific user +- Shows only processes belonging to the selected user +- Indicator shows active user filter +- Press `f` multiple times to cycle through all users +- Clear with `c` + +#### Filter by GPU (Key: `g`) +- Cycle through GPUs to focus on specific GPU +- Shows only the selected GPU and its processes +- Useful for multi-GPU systems +- Press `g` multiple times to cycle through all GPUs +- Clear with `c` + +#### Sort by Memory (Key: `m`) +- Toggle sorting of processes by memory usage +- Helps identify memory-intensive processes +- Works in combination with other filters + +#### Clear Filters (Key: `c`) +- Resets all active filters +- Returns to full system view + +### 4. Alert System + +#### Visual Indicators +- **Temperature Alerts**: Red warning when GPU temperature exceeds threshold +- **Memory Alerts**: Red warning when memory utilization exceeds threshold +- Displayed directly in TUI next to GPU stats +- Example: `⚠️ HIGH TEMP 92°C` + +#### Command-Line Alerts +In one-shot and continuous modes, alerts are printed to stderr: +``` +⚠️ ALERT: GPU 0 (NVIDIA GeForce RTX 3090) temperature 91.0°C exceeds threshold 90.0°C +``` + +This allows for easy integration with monitoring scripts and email alerts. + +### 5. Multiple Operating Modes + +#### Interactive TUI Mode (Default) +- Full terminal user interface +- Real-time updates +- History browsing +- Filtering and sorting + +**Usage:** +```bash +./gpuwatch +``` + +#### One-Shot Mode +- Sample once and display +- Perfect for scripting +- Can be combined with export + +**Usage:** +```bash +./gpuwatch -once +./gpuwatch -once -export json +``` + +#### Continuous Mode +- Background monitoring +- Automatic database saves +- No TUI overhead +- Alert notifications to stderr + +**Usage:** +```bash +./gpuwatch -continuous -interval 60 >> /var/log/gpuwatch.log 2>&1 +``` + +#### List Users Mode +- Quick summary of GPU users +- Shows memory usage per user +- Fast and lightweight + +**Usage:** +```bash +./gpuwatch -list-users +``` + +## Integration Examples + +### Cron Job for Regular Monitoring +```bash +# Sample every 10 minutes +*/10 * * * * /usr/local/bin/gpuwatch -continuous -interval 600 >> /var/log/gpuwatch.log 2>&1 +``` + +### Email Alerts on High Temperature +```bash +#!/bin/bash +./gpuwatch -once -max-temp 85 2>&1 | grep "ALERT" && \ + echo "GPU temperature alert!" | mail -s "GPU Alert" admin@example.com +``` + +### Daily CSV Report +```bash +#!/bin/bash +DATE=$(date +%Y-%m-%d) +./gpuwatch -export csv -output "/reports/gpu-report-${DATE}.csv" +``` + +### JSON API Integration +```bash +# Get current GPU data as JSON +curl -X POST https://api.example.com/gpu-metrics \ + -H "Content-Type: application/json" \ + -d "$(./gpuwatch -export json)" +``` + +### Process with jq +```bash +# Get GPU 0 temperature +./gpuwatch -export json | jq '.GPUs[0].TempC' + +# Get all users and their memory usage +./gpuwatch -export json | jq '.Procs[] | {user: .User, mem: .UsedMemMB}' + +# Check if any GPU is over 80% memory +./gpuwatch -export json | jq '.GPUs[] | select(.UtilMem > 80)' +``` + +## Updated Key Bindings + +### Navigation & Actions +- `a` - Toggle auto-recording +- `r` - Refresh snapshot once +- `s` - Save snapshot manually +- `h` - Toggle History mode +- `←/→` - Previous/Next snapshot (in History) +- `↑/↓` - Previous/Next day (in History) +- `t` - Jump to today/live mode +- `q` - Quit +- `?` - Toggle help overlay + +### NEW: Filters & Display +- `f` - Cycle through users to filter +- `g` - Cycle through GPUs to filter +- `m` - Toggle sort by memory usage +- `c` - Clear all active filters + +## Configuration Best Practices + +### Development/Testing +```bash +# Fast sampling, low thresholds +./gpuwatch -interval 2 -max-temp 70 -max-mem 80 +``` + +### Production Monitoring +```bash +# Moderate sampling, reasonable thresholds +./gpuwatch -interval 30 -max-temp 85 -max-mem 90 +``` + +### Long-term Data Collection +```bash +# Slow sampling, save resources +./gpuwatch -continuous -interval 300 -db /data/gpuwatch/history.db +``` + +### Emergency Debugging +```bash +# Very fast sampling, strict alerts +./gpuwatch -interval 1 -max-temp 75 -max-mem 85 +``` + +## Upgrade Notes + +### Breaking Changes +None - All new features are opt-in via flags or keyboard shortcuts. + +### Default Behavior +- Default sampling interval remains 5 seconds +- Alert thresholds default to 90°C and 95% +- TUI mode is still the default when no flags are specified +- Database location unchanged: `~/.local/share/gpuwatch/gpuwatch.db` + +### Compatibility +- Existing database files work without modification +- Old keyboard shortcuts remain unchanged +- New shortcuts don't conflict with existing ones diff --git a/QUICKSTART.md b/QUICKSTART.md new file mode 100644 index 0000000..9a8fb92 --- /dev/null +++ b/QUICKSTART.md @@ -0,0 +1,226 @@ +# GPU Tracker - Quick Reference + +## Installation +```bash +git clone https://github.com/iranailab/gpu-tracker +cd gpu-tracker +go mod tidy +go build -o gpuwatch ./cmd/gpu-tracker +``` + +## Quick Start +```bash +# Basic TUI mode +./gpuwatch + +# Sample once and see results +./gpuwatch -once + +# List users using GPUs +./gpuwatch -list-users + +# Export to JSON +./gpuwatch -export json + +# Export to CSV file +./gpuwatch -export csv -output report.csv +``` + +## Command-Line Flags + +| Flag | Type | Default | Description | +|------|------|---------|-------------| +| `-interval` | int | 5 | Sampling interval (seconds) | +| `-db` | string | `~/.local/share/gpuwatch/gpuwatch.db` | Database path | +| `-once` | bool | false | Sample once and exit | +| `-continuous` | bool | false | Continuous monitoring mode | +| `-export` | string | - | Export format: json, csv | +| `-output` | string | stdout | Export output file | +| `-list-users` | bool | false | List GPU users and exit | +| `-max-temp` | float | 90.0 | Temperature alert (°C) | +| `-max-mem` | float | 95.0 | Memory alert (%) | +| `-version` | bool | false | Show version | + +## TUI Keyboard Shortcuts + +### Basic Controls +| Key | Action | +|-----|--------| +| `q` | Quit | +| `?` | Toggle help | +| `a` | Toggle auto-record | +| `r` | Refresh now | +| `s` | Save snapshot | + +### Navigation +| Key | Action | +|-----|--------| +| `h` | Toggle history mode | +| `t` | Jump to today/live | +| `←` | Previous snapshot | +| `→` | Next snapshot | +| `↑` | Previous day | +| `↓` | Next day | + +### Filters (NEW) +| Key | Action | +|-----|--------| +| `f` | Cycle user filter | +| `g` | Cycle GPU filter | +| `m` | Sort by memory | +| `c` | Clear filters | + +## Common Use Cases + +### Monitor in Real-Time +```bash +./gpuwatch +# Press 'a' to enable auto-recording +# Press 'f' to filter by user +# Press 'g' to filter by GPU +``` + +### Background Monitoring +```bash +# Sample every 30 seconds +./gpuwatch -continuous -interval 30 & + +# Or with log file +./gpuwatch -continuous -interval 60 >> gpu.log 2>&1 & +``` + +### Generate Reports +```bash +# Daily CSV report +./gpuwatch -export csv -output "gpu-$(date +%F).csv" + +# JSON for API +./gpuwatch -export json | curl -X POST https://api.example.com/metrics -d @- +``` + +### Check Specific User +```bash +# List all users first +./gpuwatch -list-users + +# Then monitor in TUI and press 'f' to cycle to that user +./gpuwatch +``` + +### Alert on High Temperature +```bash +#!/bin/bash +./gpuwatch -once -max-temp 80 2>&1 | grep -q "ALERT" && \ + echo "High GPU temp detected!" | mail -s "GPU Alert" you@example.com +``` + +### Data Analysis +```bash +# Extract GPU names +./gpuwatch -export json | jq -r '.GPUs[].Name' + +# Total memory usage +./gpuwatch -export csv | awk -F',' 'NR>1 {sum+=$13} END {print sum " MB"}' + +# Users sorted by memory +./gpuwatch -export json | jq -r '.Procs[] | "\(.User) \(.UsedMemMB)"' | sort -k2 -rn +``` + +### Cron Jobs +```bash +# Every 5 minutes, check and alert +*/5 * * * * /usr/local/bin/gpuwatch -once -max-temp 85 -max-mem 90 2>&1 | logger -t gpuwatch + +# Hourly CSV snapshot +0 * * * * /usr/local/bin/gpuwatch -export csv -output "/data/gpu-$(date +\%H).csv" + +# Daily database backup +0 0 * * * cp ~/.local/share/gpuwatch/gpuwatch.db ~/backups/gpuwatch-$(date +\%F).db +``` + +## Troubleshooting + +### nvidia-smi not found +```bash +# Test nvidia-smi +nvidia-smi -L + +# Add to PATH if needed +export PATH=$PATH:/usr/local/cuda/bin +``` + +### Permission denied +```bash +# Run as regular user, not root +./gpuwatch + +# If needed, check file permissions +ls -l gpuwatch +chmod +x gpuwatch +``` + +### Database locked +```bash +# Only one instance can write +# Use different database for multiple instances +./gpuwatch -db /tmp/gpuwatch1.db & +./gpuwatch -db /tmp/gpuwatch2.db & +``` + +### High CPU usage +```bash +# Increase sampling interval +./gpuwatch -interval 30 + +# Or in continuous mode +./gpuwatch -continuous -interval 60 +``` + +## Tips & Tricks + +1. **Combine filters**: Use `f` and `g` together to see specific user on specific GPU +2. **Export for analysis**: Regular CSV exports make trend analysis easy +3. **Background monitoring**: Run with `-continuous` on server startup +4. **Custom intervals**: Match sampling to your workload (fast for dev, slow for production) +5. **Alert tuning**: Adjust `-max-temp` and `-max-mem` based on your hardware +6. **History browsing**: Use `h` to enter history, then arrow keys to explore +7. **Quick checks**: Use `-list-users` for fast overview without TUI +8. **Scripting**: Use `-once` with `-export json` for automation + +## Environment + +### Recommended +- Go 1.22+ +- Linux with NVIDIA GPU +- nvidia-smi in PATH +- Terminal with 24-bit color support + +### Minimum +- Go 1.21+ +- Linux with NVIDIA drivers +- nvidia-smi accessible +- Any terminal + +## Files & Locations + +``` +~/.local/share/gpuwatch/ + └── gpuwatch.db # Default database + +./gpuwatch # Binary +./FEATURES.md # Detailed feature documentation +./README.md # Full documentation +``` + +## Getting Help + +1. Press `?` in TUI for keyboard shortcuts +2. Run `./gpuwatch -h` for flag help +3. Check FEATURES.md for detailed examples +4. See README.md for full documentation + +## Version + +Current: **1.1.0** + +Run `./gpuwatch -version` to check your version. diff --git a/README.md b/README.md index 98c2bc6..34defe0 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,21 @@ Powered by [Bubble Tea](https://github.com/charmbracelet/bubbletea), [Lip Gloss] - **Historical Browsing:** All usage snapshots are stored; navigate through any day and view every snapshot. +- **Flexible Export:** + Export snapshots to JSON or CSV format for analysis and reporting. + +- **Advanced Filtering:** + Filter by specific user or GPU, clear view for focused monitoring. + +- **Alert Thresholds:** + Configurable temperature and memory usage alerts with visual indicators. + +- **Multiple Modes:** + TUI mode, one-shot sampling, continuous background monitoring, or export mode. + +- **Customizable Sampling:** + Configure sampling intervals and database location via command-line flags. + - **Elegant Terminal UI:** Beautiful, colorful, and informative display with keybindings for productivity. @@ -62,11 +77,92 @@ go build -o gpuwatch ./cmd/gpu-tracker ## Usage -**Key bindings inside the TUI:** +### Running the TUI (Default Mode) + +```bash +./gpuwatch +``` + +### Command-Line Options + +```bash +./gpuwatch [OPTIONS] +``` + +**Available options:** + +| Flag | Description | Default | +|------|-------------|----------| +| `-interval` | Sampling interval in seconds | 5 | +| `-db` | Custom database path | `~/.local/share/gpuwatch/gpuwatch.db` | +| `-once` | Sample once and exit (no TUI) | false | +| `-continuous` | Continuously sample and save without TUI | false | +| `-export` | Export format: `json` or `csv` | - | +| `-output` | Output file for export (default: stdout) | - | +| `-list-users` | List all users using GPUs and exit | false | +| `-max-temp` | Alert threshold for GPU temperature (°C) | 90.0 | +| `-max-mem` | Alert threshold for memory usage (%) | 95.0 | +| `-version` | Show version information | false | + +### Usage Examples + +**1. Basic TUI mode with default settings:** +```bash +./gpuwatch +``` + +**2. Custom sampling interval (10 seconds):** +```bash +./gpuwatch -interval 10 +``` + +**3. One-shot sampling (sample once and display):** +```bash +./gpuwatch -once +``` + +**4. Continuous background monitoring:** +```bash +./gpuwatch -continuous -interval 30 +``` + +**5. Export current snapshot to JSON:** +```bash +./gpuwatch -export json -output snapshot.json +``` + +**6. Export current snapshot to CSV:** +```bash +./gpuwatch -export csv -output snapshot.csv +``` + +**7. Export to stdout (pipe to other tools):** +```bash +./gpuwatch -export json | jq '.GPUs[0].Name' +``` + +**8. List users currently using GPUs:** +```bash +./gpuwatch -list-users +``` + +**9. Custom alert thresholds:** +```bash +./gpuwatch -max-temp 80 -max-mem 90 +``` + +**10. Custom database location:** +```bash +./gpuwatch -db /path/to/custom/gpuwatch.db +``` + +### TUI Key Bindings + +**Navigation & Actions:** | Key | Action | | ------- | -------------------------------------- | -| `a` | Toggle auto-recording (live, every 5s) | +| `a` | Toggle auto-recording (live, configurable interval) | | `r` | Refresh snapshot once | | `s` | Save a snapshot manually | | `h` | Toggle History mode | @@ -76,16 +172,43 @@ go build -o gpuwatch ./cmd/gpu-tracker | `q` | Quit | | `?` | Toggle help overlay | +**Filters & Display:** + +| Key | Action | +| ------- | -------------------------------------- | +| `f` | Cycle through users to filter | +| `g` | Cycle through GPUs to filter | +| `m` | Toggle sort by memory usage | +| `c` | Clear all active filters | + --- ## How It Works * **Sampling:** The app runs `nvidia-smi` to capture GPU/process stats. For each process, it maps PID → UID (via `/proc//status`) → username (`/etc/passwd`). + * **History:** Snapshots are saved to SQLite on disk. Auto-recording can be toggled or snapshots saved manually. + * **Browsing:** Switch to history mode and browse by day/snapshot, all within the TUI. + +* **Filtering:** + Filter the view by specific users or GPUs to focus on relevant data. Use keyboard shortcuts to cycle through available filters. + +* **Alerts:** + Visual indicators appear when GPU temperature or memory usage exceeds configured thresholds. Alerts are also shown in continuous mode. + +* **Export:** + Export snapshots to JSON or CSV format for integration with other tools, reporting, or analysis. + +* **Modes:** + - **TUI Mode (default):** Interactive terminal UI with real-time updates + - **One-shot Mode:** Sample once and display/export + - **Continuous Mode:** Background monitoring that saves snapshots automatically + - **List Mode:** Quick overview of current GPU users + * **Extensible:** Sampler and database logic are separated—add support for AMD (ROCm), NVML, or other GPUs easily. @@ -110,6 +233,50 @@ go build -o gpuwatch ./cmd/gpu-tracker --- +## Advanced Use Cases + +### Integration with Monitoring Systems + +**Prometheus/Grafana Integration:** +```bash +# Export to JSON and parse with jq +./gpuwatch -export json | jq -r '.GPUs[] | "\(.Name) \(.UtilGPU)"' +``` + +**Alerting Script:** +```bash +#!/bin/bash +# Check GPU usage and send alerts +./gpuwatch -once -max-temp 85 -max-mem 90 2>&1 | grep "ALERT" && \ + echo "GPU alert detected!" | mail -s "GPU Alert" admin@example.com +``` + +**Cron Job for Regular Sampling:** +```bash +# Add to crontab: sample every 5 minutes +*/5 * * * * /path/to/gpuwatch -continuous -interval 300 >> /var/log/gpuwatch.log 2>&1 +``` + +### Data Analysis + +**Export historical data for analysis:** +```bash +# Export current state to CSV +./gpuwatch -export csv -output daily_report.csv + +# Process with standard tools +cat daily_report.csv | awk -F',' '{sum+=$7} END {print "Total GPU Memory: " sum " MB"}' +``` + +**Monitor specific user:** +```bash +# Run TUI and filter by user immediately +# Press 'f' to cycle through users, or use export mode: +./gpuwatch -list-users +``` + +--- + ## Troubleshooting * **Go version too old:** @@ -120,6 +287,18 @@ go build -o gpuwatch ./cmd/gpu-tracker * **GLIBC errors on run:** Build and run the binary on the same Linux distribution. + +* **nvidia-smi not found:** + Ensure NVIDIA drivers are installed and `nvidia-smi` is in your PATH. Test with: `nvidia-smi -L` + +* **Database locked errors:** + If running multiple instances, ensure only one instance writes to the database, or use different database paths with `-db` flag. + +* **High CPU usage in continuous mode:** + Increase the sampling interval: `./gpuwatch -continuous -interval 60` (samples every 60 seconds) + +* **Export returns empty data:** + Ensure GPUs are available and nvidia-smi is working. Try `./gpuwatch -once` first to verify sampling works. --- @@ -140,9 +319,7 @@ See [LICENSE](./LICENSE) for details. ## Author -Developed by [Alireza Parvaresh](https://github.com/parvvaresg) +Developed by [Alireza Parvaresh](https://github.com/parvvaresh) Contributions welcome! - -Made with love in Iran ❤️🇮🇷 --- diff --git a/cmd/gpu-tracker/main.go b/cmd/gpu-tracker/main.go index 065a151..89203cb 100644 --- a/cmd/gpu-tracker/main.go +++ b/cmd/gpu-tracker/main.go @@ -1,18 +1,38 @@ package main import ( + "encoding/csv" + "encoding/json" + "flag" "fmt" "log" "os" "path/filepath" "time" + "gpuwatch/internal/sampler" "gpuwatch/internal/store" "gpuwatch/internal/tui" + "gpuwatch/internal/types" tea "github.com/charmbracelet/bubbletea" ) +var ( + sampleIntervalFlag = flag.Int("interval", 5, "Sampling interval in seconds (default: 5)") + exportFormat = flag.String("export", "", "Export current snapshot to file (formats: json, csv)") + exportFile = flag.String("output", "", "Output file for export (default: stdout)") + dbPathFlag = flag.String("db", "", "Custom database path (default: ~/.local/share/gpuwatch/gpuwatch.db)") + oneShotMode = flag.Bool("once", false, "Sample once and exit (no TUI)") + continuousMode = flag.Bool("continuous", false, "Continuously sample and save without TUI") + showVersion = flag.Bool("version", false, "Show version information") + maxTemp = flag.Float64("max-temp", 90.0, "Alert threshold for GPU temperature (°C)") + maxMem = flag.Float64("max-mem", 95.0, "Alert threshold for memory usage (%)") + listUsers = flag.Bool("list-users", false, "List all users using GPUs and exit") +) + +const version = "1.1.0" + func ensureDataDir() (string, error) { home, err := os.UserHomeDir() if err != nil { @@ -25,24 +45,213 @@ func ensureDataDir() (string, error) { return p, nil } -func main() { - dataDir, err := ensureDataDir() +func exportToJSON(snap types.Snapshot, path string) error { + data, err := json.MarshalIndent(snap, "", " ") if err != nil { - log.Fatal(err) + return err + } + if path == "" { + fmt.Println(string(data)) + return nil + } + return os.WriteFile(path, data, 0644) +} + +func exportToCSV(snap types.Snapshot, path string) error { + var w *csv.Writer + if path == "" { + w = csv.NewWriter(os.Stdout) + } else { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + w = csv.NewWriter(f) + } + defer w.Flush() + + // Write header + if err := w.Write([]string{"Timestamp", "GPU Index", "GPU Name", "GPU Util %", "Mem Util %", "Mem Used MB", "Mem Total MB", "Temp C", "Power W", "PID", "Process", "User", "Proc Mem MB"}); err != nil { + return err + } + + ts := snap.TS.Format(time.RFC3339) + for _, gpu := range snap.GPUs { + // Find processes for this GPU + hasProc := false + for _, proc := range snap.Procs { + if proc.GPUUUID == gpu.UUID { + hasProc = true + if err := w.Write([]string{ + ts, + fmt.Sprintf("%d", gpu.Index), + gpu.Name, + fmt.Sprintf("%.1f", gpu.UtilGPU), + fmt.Sprintf("%.1f", gpu.UtilMem), + fmt.Sprintf("%.1f", gpu.MemUsedMB), + fmt.Sprintf("%.1f", gpu.MemTotalMB), + fmt.Sprintf("%.1f", gpu.TempC), + fmt.Sprintf("%.1f", gpu.PowerDrawW), + fmt.Sprintf("%d", proc.PID), + proc.ProcessName, + proc.User, + fmt.Sprintf("%.1f", proc.UsedMemMB), + }); err != nil { + return err + } + } + } + if !hasProc { + if err := w.Write([]string{ + ts, + fmt.Sprintf("%d", gpu.Index), + gpu.Name, + fmt.Sprintf("%.1f", gpu.UtilGPU), + fmt.Sprintf("%.1f", gpu.UtilMem), + fmt.Sprintf("%.1f", gpu.MemUsedMB), + fmt.Sprintf("%.1f", gpu.MemTotalMB), + fmt.Sprintf("%.1f", gpu.TempC), + fmt.Sprintf("%.1f", gpu.PowerDrawW), + "", "", "", "", + }); err != nil { + return err + } + } + } + return nil +} + +func listUsersMode(snap types.Snapshot) { + userMemMap := make(map[string]float64) + for _, proc := range snap.Procs { + userMemMap[proc.User] += proc.UsedMemMB + } + fmt.Println("Users currently using GPUs:") + fmt.Println("User\t\tMemory (MB)") + fmt.Println("----\t\t-----------") + for user, mem := range userMemMap { + fmt.Printf("%s\t\t%.1f\n", user, mem) + } +} + +func checkAlerts(snap types.Snapshot, maxTemp, maxMem float64) { + for _, gpu := range snap.GPUs { + if gpu.TempC > maxTemp { + fmt.Fprintf(os.Stderr, "⚠️ ALERT: GPU %d (%s) temperature %.1f°C exceeds threshold %.1f°C\n", + gpu.Index, gpu.Name, gpu.TempC, maxTemp) + } + if gpu.UtilMem > maxMem { + fmt.Fprintf(os.Stderr, "⚠️ ALERT: GPU %d (%s) memory utilization %.1f%% exceeds threshold %.1f%%\n", + gpu.Index, gpu.Name, gpu.UtilMem, maxMem) + } } - dbPath := filepath.Join(dataDir, "gpuwatch.db") +} + +func main() { + flag.Parse() + + if *showVersion { + fmt.Printf("gpuwatch version %s\n", version) + return + } + + // Get database path + var dbPath string + if *dbPathFlag != "" { + dbPath = *dbPathFlag + } else { + dataDir, err := ensureDataDir() + if err != nil { + log.Fatal(err) + } + dbPath = filepath.Join(dataDir, "gpuwatch.db") + } + + // One-shot mode: sample once and optionally export + if *oneShotMode || *listUsers || *exportFormat != "" { + snap, err := sampler.Sample() + if err != nil { + log.Fatalf("Failed to sample: %v", err) + } + + checkAlerts(snap, *maxTemp, *maxMem) + + if *listUsers { + listUsersMode(snap) + return + } + + if *exportFormat != "" { + switch *exportFormat { + case "json": + if err := exportToJSON(snap, *exportFile); err != nil { + log.Fatalf("Export failed: %v", err) + } + case "csv": + if err := exportToCSV(snap, *exportFile); err != nil { + log.Fatalf("Export failed: %v", err) + } + default: + log.Fatalf("Unknown export format: %s (supported: json, csv)", *exportFormat) + } + return + } + + // Just print snapshot + fmt.Printf("Snapshot at %s\n", snap.TS.Format(time.RFC3339)) + for _, gpu := range snap.GPUs { + fmt.Printf("GPU %d: %s - Util: %.1f%%, Mem: %.1f%%, Temp: %.1f°C\n", + gpu.Index, gpu.Name, gpu.UtilGPU, gpu.UtilMem, gpu.TempC) + } + return + } + + // Continuous mode: sample and save without TUI + if *continuousMode { + db, err := store.Open(dbPath) + if err != nil { + log.Fatalf("open db: %v", err) + } + defer db.Close() + + fmt.Printf("Continuous mode: sampling every %d seconds (Ctrl+C to stop)\n", *sampleIntervalFlag) + ticker := time.NewTicker(time.Duration(*sampleIntervalFlag) * time.Second) + defer ticker.Stop() + + for { + snap, err := sampler.Sample() + if err != nil { + log.Printf("Sample error: %v", err) + continue + } + checkAlerts(snap, *maxTemp, *maxMem) + id, err := db.SaveSnapshot(snap) + if err != nil { + log.Printf("Save error: %v", err) + } else { + fmt.Printf("[%s] Saved snapshot #%d\n", snap.TS.Format("15:04:05"), id) + } + <-ticker.C + } + } + + // Normal TUI mode db, err := store.Open(dbPath) if err != nil { log.Fatalf("open db: %v", err) } defer db.Close() - m := tui.New(db) + sampleInterval := time.Duration(*sampleIntervalFlag) * time.Second + m := tui.NewWithConfig(db, tui.Config{ + SampleInterval: sampleInterval, + MaxTemp: *maxTemp, + MaxMem: *maxMem, + }) p := tea.NewProgram(m, tea.WithAltScreen()) if _, err := p.Run(); err != nil { fmt.Println("error:", err) os.Exit(1) } - - _ = time.Second // keep import of time for future flags } diff --git a/internal/tui/model.go b/internal/tui/model.go index 070fe80..cd2100d 100644 --- a/internal/tui/model.go +++ b/internal/tui/model.go @@ -2,6 +2,7 @@ package tui import ( "fmt" + "strings" "time" "gpuwatch/internal/sampler" @@ -11,26 +12,35 @@ import ( tea "github.com/charmbracelet/bubbletea" ) -// tick interval during live auto-recording -const sampleInterval = 5 * time.Second +type Config struct { + SampleInterval time.Duration + MaxTemp float64 + MaxMem float64 +} type model struct { - db *store.DB - live bool - autoRecord bool - width int - height int + db *store.DB + config Config + live bool + autoRecord bool + width int + height int - curr types.Snapshot // live or currently viewed snapshot - status string - err error + curr types.Snapshot // live or currently viewed snapshot + status string + err error // history - historyDate time.Time - metas []store.SnapshotMeta - index int // index into metas for current snapshot + historyDate time.Time + metas []store.SnapshotMeta + index int // index into metas for current snapshot + + showHelp bool - showHelp bool + // filters + filterUser string + filterGPU int // -1 means all GPUs + sortByMem bool } type ( @@ -41,12 +51,22 @@ type ( ) func New(db *store.DB) model { + return NewWithConfig(db, Config{ + SampleInterval: 5 * time.Second, + MaxTemp: 90.0, + MaxMem: 95.0, + }) +} + +func NewWithConfig(db *store.DB, config Config) model { loc := time.Now().Location() return model{ - db: db, - live: true, - autoRecord: true, + db: db, + config: config, + live: true, + autoRecord: true, historyDate: time.Now().In(loc), + filterGPU: -1, // show all GPUs by default } } @@ -56,18 +76,22 @@ func (m model) Init() tea.Cmd { func (m model) tickIfNeeded() tea.Cmd { if m.live && m.autoRecord { - return tea.Tick(sampleInterval, func(time.Time) tea.Msg { return m.doSample() }) + return tea.Tick(m.config.SampleInterval, func(time.Time) tea.Msg { return m.doSample() }) } return nil } func (m model) doSample() tea.Msg { s, err := sampler.Sample() - if err != nil { return errorMsg{err} } + if err != nil { + return errorMsg{err} + } // Save when auto record if m.autoRecord { id, err := m.db.SaveSnapshot(s) - if err != nil { return errorMsg{err} } + if err != nil { + return errorMsg{err} + } s.ID = id return refreshMsg{snap: s} } @@ -77,7 +101,9 @@ func (m model) doSample() tea.Msg { func (m model) refreshOnce() tea.Cmd { return func() tea.Msg { s, err := sampler.Sample() - if err != nil { return errorMsg{err} } + if err != nil { + return errorMsg{err} + } return refreshMsg{snap: s} } } @@ -85,17 +111,23 @@ func (m model) refreshOnce() tea.Cmd { func (m model) loadMetasCmd(day time.Time) tea.Cmd { return func() tea.Msg { metas, err := m.db.ListSnapshotsByDate(day) - if err != nil { return errorMsg{err} } + if err != nil { + return errorMsg{err} + } return metasMsg{metas: metas} } } func (m model) loadByMetaCmd(idx int) tea.Cmd { - if idx < 0 || idx >= len(m.metas) { return nil } + if idx < 0 || idx >= len(m.metas) { + return nil + } id := m.metas[idx].ID return func() tea.Msg { s, err := m.db.LoadSnapshot(id) - if err != nil { return errorMsg{err} } + if err != nil { + return errorMsg{err} + } return refreshMsg{snap: s} } } @@ -148,9 +180,13 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { case "s": // save snapshot immediately if m.live { return m, func() tea.Msg { - if m.curr.TS.IsZero() { return errorMsg{fmt.Errorf("no current snapshot")}} + if m.curr.TS.IsZero() { + return errorMsg{fmt.Errorf("no current snapshot")} + } id, err := m.db.SaveSnapshot(m.curr) - if err != nil { return errorMsg{err} } + if err != nil { + return errorMsg{err} + } return savedMsg{id: id} } } @@ -168,19 +204,130 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return m, m.refreshOnce() case "left": if !m.live && len(m.metas) > 0 { - if m.index > 0 { m.index-- } + if m.index > 0 { + m.index-- + } return m, m.loadByMetaCmd(m.index) } case "right": if !m.live && len(m.metas) > 0 { - if m.index < len(m.metas)-1 { m.index++ } + if m.index < len(m.metas)-1 { + m.index++ + } return m, m.loadByMetaCmd(m.index) } case "up": - if !m.live { m.historyDate = m.historyDate.AddDate(0,0,-1); return m, m.loadMetasCmd(m.historyDate) } + if !m.live { + m.historyDate = m.historyDate.AddDate(0, 0, -1) + return m, m.loadMetasCmd(m.historyDate) + } case "down": - if !m.live { m.historyDate = m.historyDate.AddDate(0,0,1); return m, m.loadMetasCmd(m.historyDate) } + if !m.live { + m.historyDate = m.historyDate.AddDate(0, 0, 1) + return m, m.loadMetasCmd(m.historyDate) + } + case "f": // filter by user + // Cycle through users in current snapshot + users := m.getUniqueUsers() + if len(users) == 0 { + m.filterUser = "" + } else if m.filterUser == "" { + m.filterUser = users[0] + } else { + found := false + for i, u := range users { + if u == m.filterUser && i < len(users)-1 { + m.filterUser = users[i+1] + found = true + break + } + } + if !found { + m.filterUser = "" // clear filter + } + } + return m, nil + case "g": // filter by GPU + // Cycle through GPUs + if len(m.curr.GPUs) == 0 { + m.filterGPU = -1 + } else if m.filterGPU == -1 { + m.filterGPU = m.curr.GPUs[0].Index + } else { + found := false + for i, gpu := range m.curr.GPUs { + if gpu.Index == m.filterGPU && i < len(m.curr.GPUs)-1 { + m.filterGPU = m.curr.GPUs[i+1].Index + found = true + break + } + } + if !found { + m.filterGPU = -1 // show all + } + } + return m, nil + case "m": // toggle sort by memory + m.sortByMem = !m.sortByMem + return m, nil + case "c": // clear all filters + m.filterUser = "" + m.filterGPU = -1 + m.sortByMem = false + return m, nil } } return m, nil } + +func (m model) getUniqueUsers() []string { + seen := make(map[string]bool) + var users []string + for _, proc := range m.curr.Procs { + if !seen[proc.User] { + seen[proc.User] = true + users = append(users, proc.User) + } + } + return users +} + +func (m model) getFilteredSnapshot() types.Snapshot { + snap := m.curr + + // Apply GPU filter + if m.filterGPU != -1 { + var filteredGPUs []types.GPU + for _, gpu := range snap.GPUs { + if gpu.Index == m.filterGPU { + filteredGPUs = append(filteredGPUs, gpu) + } + } + snap.GPUs = filteredGPUs + + // Filter processes for this GPU + var filteredProcs []types.GPUProcess + for _, proc := range snap.Procs { + for _, gpu := range filteredGPUs { + if proc.GPUUUID == gpu.UUID { + filteredProcs = append(filteredProcs, proc) + break + } + } + } + snap.Procs = filteredProcs + } + + // Apply user filter + if m.filterUser != "" { + var filteredProcs []types.GPUProcess + for _, proc := range snap.Procs { + if strings.EqualFold(proc.User, m.filterUser) { + filteredProcs = append(filteredProcs, proc) + } + } + snap.Procs = filteredProcs + } + + return snap +} diff --git a/internal/tui/view.go b/internal/tui/view.go index ba18429..fd41031 100644 --- a/internal/tui/view.go +++ b/internal/tui/view.go @@ -20,6 +20,21 @@ func (m model) View() string { header += " " + errStyle.Render(m.err.Error()) } + // Show active filters + if m.filterUser != "" || m.filterGPU != -1 || m.sortByMem { + var filters []string + if m.filterUser != "" { + filters = append(filters, fmt.Sprintf("user:%s", m.filterUser)) + } + if m.filterGPU != -1 { + filters = append(filters, fmt.Sprintf("GPU:%d", m.filterGPU)) + } + if m.sortByMem { + filters = append(filters, "sorted:mem") + } + header += " " + lg.NewStyle().Foreground(lg.Color("#FFA500")).Render(fmt.Sprintf("[filters: %s]", strings.Join(filters, ", "))) + } + body := m.renderBody() help := m.renderHelp() @@ -37,18 +52,32 @@ func (m model) renderBody() string { } func (m model) renderGPUs() string { - if len(m.curr.GPUs) == 0 { + snap := m.getFilteredSnapshot() + if len(snap.GPUs) == 0 { return box.Width(m.width/2 - 4).Render(subtle.Render("no GPU data")) } var lines []string - for _, g := range m.curr.GPUs { + for _, g := range snap.GPUs { title := label.Render(fmt.Sprintf("GPU %d — %s", g.Index, g.Name)) util := fmt.Sprintf("util %2.0f%% | mem %2.0f%% (%0.0f/%0.0f MB)", g.UtilGPU, g.UtilMem, g.MemUsedMB, g.MemTotalMB) therm := fmt.Sprintf("temp %2.0f°C | power %0.0f/%0.0f W", g.TempC, g.PowerDrawW, g.PowerLimitW) + + // Add alert indicators + var alerts []string + if g.TempC > m.config.MaxTemp { + alerts = append(alerts, fmt.Sprintf("⚠️ HIGH TEMP %.0f°C", g.TempC)) + } + if g.UtilMem > m.config.MaxMem { + alerts = append(alerts, fmt.Sprintf("⚠️ HIGH MEM %.0f%%", g.UtilMem)) + } + lines = append(lines, title) lines = append(lines, drawBar(g.UtilGPU, 100, 24)) lines = append(lines, subtle.Render(util)) lines = append(lines, subtle.Render(therm)) + if len(alerts) > 0 { + lines = append(lines, lg.NewStyle().Foreground(lg.Color("#FF0000")).Render(strings.Join(alerts, " "))) + } lines = append(lines, "") } content := strings.Join(lines, "\n") @@ -56,11 +85,12 @@ func (m model) renderGPUs() string { } func (m model) renderUsers() string { - if len(m.curr.Procs) == 0 { + snap := m.getFilteredSnapshot() + if len(snap.Procs) == 0 { return box.Width(m.width/2 - 4).Render(subtle.Render("no running GPU processes")) } agg := make(map[string]float64) - for _, p := range m.curr.Procs { + for _, p := range snap.Procs { agg[p.User] += p.UsedMemMB } var users []types.UserAgg @@ -70,28 +100,46 @@ func (m model) renderUsers() string { sort.Slice(users, func(i, j int) bool { return users[i].MemUsedMB > users[j].MemUsedMB }) var lines []string - lines = append(lines, label.Render("Per‑user GPU memory (MB)") ) + lines = append(lines, label.Render("Per‑user GPU memory (MB)")) max := 1.0 - for _, u := range users { if u.MemUsedMB > max { max = u.MemUsedMB } } + for _, u := range users { + if u.MemUsedMB > max { + max = u.MemUsedMB + } + } for _, u := range users { bar := drawBar(u.MemUsedMB, max, 30) - lines = append(lines, fmt.Sprintf("%-12s %s %5.0f", u.User, bar, u.MemUsedMB)) + userLabel := u.User + if m.filterUser == u.User { + userLabel = "►" + userLabel + } + lines = append(lines, fmt.Sprintf("%-12s %s %5.0f", userLabel, bar, u.MemUsedMB)) } content := strings.Join(lines, "\n") return box.Width(m.width/2 - 4).Render(content) } func (m model) renderProcs() string { + snap := m.getFilteredSnapshot() var b strings.Builder b.WriteString(label.Render("Top GPU processes (by used MB)") + "\n") - if len(m.curr.Procs) == 0 { + if len(snap.Procs) == 0 { b.WriteString(subtle.Render("none")) return box.Width(m.width - 4).Render(b.String()) } - procs := append([]types.GPUProcess(nil), m.curr.Procs...) - sort.Slice(procs, func(i, j int) bool { return procs[i].UsedMemMB > procs[j].UsedMemMB }) + procs := append([]types.GPUProcess(nil), snap.Procs...) + + // Apply sorting if enabled + if m.sortByMem { + sort.Slice(procs, func(i, j int) bool { return procs[i].UsedMemMB > procs[j].UsedMemMB }) + } else { + sort.Slice(procs, func(i, j int) bool { return procs[i].UsedMemMB > procs[j].UsedMemMB }) + } + maxN := 10 - if len(procs) < maxN { maxN = len(procs) } + if len(procs) < maxN { + maxN = len(procs) + } for i := 0; i < maxN; i++ { p := procs[i] b.WriteString(fmt.Sprintf("%5d %-12s %-22s %6.0f MB %s\n", p.PID, p.User, trim(p.ProcessName, 22), p.UsedMemMB, shortUUID(p.GPUUUID))) @@ -101,38 +149,60 @@ func (m model) renderProcs() string { func (m model) renderHelp() string { if !m.showHelp { - return subtle.Render("a: auto | r: refresh | s: save | h: history | ←/→: prev/next snap | ↑/↓: day | t: today | ?: help | q: quit") + return subtle.Render("a: auto | r: refresh | s: save | h: history | f: filter user | g: filter GPU | c: clear | ?: help | q: quit") } return box.Width(m.width - 4).Render(strings.Join([]string{ - "a — Toggle auto-recording of live samples (every 5s)", - "r — Refresh once (live mode)", - "s — Save the current snapshot immediately", - "h — Toggle History mode", - "←/→ — Previous/Next snapshot of the selected date", - "↑/↓ — Move one day back/forward", - "t — Jump back to today and live mode", - "q — Quit", + "Navigation & Actions:", + " a — Toggle auto-recording of live samples", + " r — Refresh once (live mode)", + " s — Save the current snapshot immediately", + " h — Toggle History mode", + " ←/→ — Previous/Next snapshot of the selected date", + " ↑/↓ — Move one day back/forward", + " t — Jump back to today and live mode", + "", + "Filters & Display:", + " f — Cycle through users to filter by specific user", + " g — Cycle through GPUs to filter by specific GPU", + " m — Toggle sort processes by memory usage", + " c — Clear all active filters", + "", + " q — Quit", }, "\n")) } func drawBar(value, max float64, width int) string { - if max <= 0 { max = 1 } + if max <= 0 { + max = 1 + } ratio := value / max - if ratio < 0 { ratio = 0 } - if ratio > 1 { ratio = 1 } + if ratio < 0 { + ratio = 0 + } + if ratio > 1 { + ratio = 1 + } filled := int(float64(width) * ratio) - if filled < 0 { filled = 0 } - if filled > width { filled = width } + if filled < 0 { + filled = 0 + } + if filled > width { + filled = width + } return bar.Width(filled).Render(strings.Repeat(" ", filled)) + subtle.Width(width-filled).Render(strings.Repeat("·", width-filled)) } func trim(s string, n int) string { r := []rune(s) - if len(r) <= n { return s } + if len(r) <= n { + return s + } return string(r[:n-1]) + "…" } func shortUUID(u string) string { - if len(u) <= 8 { return u } + if len(u) <= 8 { + return u + } return u[len(u)-8:] }