diff --git a/.env.example b/.env.example index 82c5e9652..e5e230a14 100644 --- a/.env.example +++ b/.env.example @@ -9,4 +9,15 @@ PGWEB_DATABASE_URL=postgres://pgweb_dev:pgweb_dev_password@postgres:5432/pgweb_t PGWEB_CUSTOM_PARAMS="Client,Instance,ClientName,InstanceName,AccountId,AccountPerspective,AccountDbUser,AccountName,AccountEmail,FolderName" # Test role for RLS (Issue #15) - used for testing multi-tenancy -PGWEB_TEST_ROLE=test_tenant_role \ No newline at end of file +PGWEB_TEST_ROLE=test_tenant_role + +# Font Configuration for Testing +PGWEB_FONT_FAMILY=Space Grotesk, sans-serif +PGWEB_FONT_SIZE=15px +PGWEB_GOOGLE_FONTS=Space Grotesk:300,400,500,600,700 + +# Query and Metadata Caching Configuration +PGWEB_DISABLE_QUERY_CACHE=false # Disable query result caching +PGWEB_DISABLE_METADATA_CACHE=false # Disable metadata caching for schemas, tables, etc. +PGWEB_QUERY_CACHE_TTL=300 # Query cache TTL in seconds - how long SELECT query results are cached +PGWEB_METADATA_CACHE_TTL=600 # Metadata cache TTL in seconds - how long schema/table info is cached \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index fc5c7e34f..af71851a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,58 @@ ## Changelog -Current [release](https://github.com/flowbi/pgweb/releases) is `0.16.5`. +Current [release](https://github.com/flowbi/pgweb/releases) is `0.16.14`. + +## 0.16.14 - 2025-09-01 + +- `NEW` Enable font customization +- `NEW` Implement in-memory query caching to improve performance + +## 0.16.13 - 2025-08-31 + +- `NEW` Introduce configuration of custom parameter during server configuration +- `NEW` Optimized local dev setup for testing through docker-compose.dev +- `NEW` Implement Runtime Query File Replacement for custom constraints +- `NEW` Implement Flow.bi custom constraints query with safe fallback to standard PostgreSQL constraints +- `FIX` Update ace-pgsql.js SQL keyword highlighting + +## 0.16.12 - 2025-08-30 + +- `FIX` Update Font Awesome link to use CDN for resolving font issue + +## 0.16.11 - 2025-08-29 + +- `FIX` Update Font Awesome font relative paths in CSS to absolute +- `FIX` Enhance parameter overlay management +- `FIX` 'hideParamIndicator' in URL and hide overlay in tabs other than Query + +## 0.16.10 - 2025-08-28 + +- `NEW` Support @parameters in queries +- `NEW` Streamline development setup with Docker, helper script and .env\* file +- `FIX` Update CI workflow and remove obsolete test file + +## 0.16.9 - 2025-08-27 + +- `NEW` Add an option to hide active parameter overlay + +## 0.16.8 - 2025-08-26 + +- `NEW` Implement URL parameter substitution SQL queries for iframe embedding +- `FIX` Fix formatting issues + +## 0.16.7 - 2025-08-25 + +- `NEW` Add schema and object filtering options +- `NEW` Add hyper link support +- `FIX` Time out 504 error in /rows when selecting foreign table and hide table information from UI +- `FIX` Update PostgreSQL version matrix in GitHub workflow +- `FIX` Formatting issues + +## 0.16.6 - 2025-08-24 + +- `FIX` Set CGO_ENABLED only for docker-build jobs and remove globally +- `FIX` Update project references to flowbi +- `FIX` Optimize GitHub Actions workflows to save on CI/CD time ## 0.16.5 - 2025-08-22 diff --git a/dev.sh b/dev.sh index b4a6f7a30..3305fcc0c 100755 --- a/dev.sh +++ b/dev.sh @@ -93,7 +93,7 @@ case "$ACTION" in # Test parameter substitution echo_info "Test URL with parameters:" - echo_info "http://localhost:8081/?Client=client&Instance=instance&ClientName=clientname&InstanceName=instance-name&AccountId=account-id&AccountPerspective=account-perspective&AccountDbUser=account-db-user&AccountName=account-name&AccountEmail=account-email&FolderName=folder-name&InvalidParameter=shouldnotshow" + echo_info "http://localhost:8081/?Client=client&Instance=instance&ClientName=client-name&InstanceName=instance-name&AccountId=account-id&AccountPerspective=account-perspective&AccountDbUser=account-db-user&AccountName=account-name&AccountEmail=account-email&FolderName=folder-name&InvalidParameter=shouldnotshow" ;; "stop"|"down") @@ -131,7 +131,7 @@ case "$ACTION" in echo_info "Testing parameter substitution..." sleep 2 echo_info "Opening pgweb with test parameters..." - open "http://localhost:8081/?Client=client&Instance=instance&ClientName=clientname&InstanceName=instance-name&AccountId=account-id&AccountPerspective=account-perspective&AccountDbUser=account-db-user&AccountName=account-name&AccountEmail=account-email&FolderName=folder-name&InvalidParameter=shouldnotshow" 2>/dev/null || echo_warning "Could not open browser automatically" + open "http://localhost:8081/?Client=client&Instance=instance&ClientName=client-name&InstanceName=instance-name&AccountId=account-id&AccountPerspective=account-perspective&AccountDbUser=account-db-user&AccountName=account-name&AccountEmail=account-email&FolderName=folder-name&InvalidParameter=shouldnotshow" 2>/dev/null || echo_warning "Could not open browser automatically" ;; "help"|*) diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index 52d29c2d8..4b347ef1d 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -27,11 +27,19 @@ services: postgres: condition: service_healthy environment: - # Default to local postgres for development # Override PGWEB_DATABASE_URL for production with URL-encoded password PGWEB_DATABASE_URL: ${PGWEB_DATABASE_URL:-postgres://pgweb_dev:pgweb_dev_password@postgres:5432/pgweb_test?sslmode=disable} PGWEB_TEST_ROLE: ${PGWEB_TEST_ROLE} PGWEB_CUSTOM_PARAMS: ${PGWEB_CUSTOM_PARAMS} + # Font configuration + PGWEB_FONT_FAMILY: ${PGWEB_FONT_FAMILY} + PGWEB_FONT_SIZE: ${PGWEB_FONT_SIZE} + PGWEB_GOOGLE_FONTS: ${PGWEB_GOOGLE_FONTS} + # Query and metadata caching configuration + PGWEB_DISABLE_QUERY_CACHE: ${PGWEB_DISABLE_QUERY_CACHE} + PGWEB_DISABLE_METADATA_CACHE: ${PGWEB_DISABLE_METADATA_CACHE} + PGWEB_QUERY_CACHE_TTL: ${PGWEB_QUERY_CACHE_TTL} + PGWEB_METADATA_CACHE_TTL: ${PGWEB_METADATA_CACHE_TTL} command: [ "./pgweb", diff --git a/docs/development-setup.md b/docs/development-setup.md new file mode 100644 index 000000000..b035d7f06 --- /dev/null +++ b/docs/development-setup.md @@ -0,0 +1,86 @@ +# pgweb Development Setup + +Simple one-command development environment for pgweb with PostgreSQL and parameter substitution. + +## Quick Start + +```bash +# Start development environment (builds automatically) +./dev.sh start + +# Access pgweb: http://localhost:8081 +# Access postgres: localhost:5433 +``` + +## Commands + +```bash +./dev.sh start # Start development environment +./dev.sh stop # Stop development environment +./dev.sh restart # Restart and rebuild +./dev.sh logs # Show logs (add service name: logs pgweb) +./dev.sh clean # Clean up containers and volumes +./dev.sh test # Test parameter substitution +./dev.sh help # Show help +``` + +## Parameter Substitution + +The system automatically replaces `@parameter` placeholders in SQL queries with URL parameters. + +**Example:** + +- URL: `http://localhost:8081/?Client=client&Instance=instance&ClientName=client-name&InstanceName=instance-name&AccountId=account-id&AccountPerspective=account-perspective&AccountDbUser=account-db-user&AccountName=account-name&AccountEmail=account-email&FolderName=folder-name&InvalidParameter=shouldnotshow` +- Query: `SELECT * FROM table WHERE client = @Client AND instance = @Instance` +- Executed: `SELECT * FROM table WHERE client = 'test-client' AND instance = 'test-instance'` + +**Custom Parameters:** + +Parameters are configurable via the `PGWEB_CUSTOM_PARAMS` environment variable. Default parameters include: + +- `Client`, `Instance`, `ClientName`, `InstanceName` +- `AccountId`, `AccountPerspective`, `AccountDbUser` +- `AccountName`, `AccountEmail`, `FolderName` + +You can customize these by setting `PGWEB_CUSTOM_PARAMS` in your `.env` file with a comma-separated list of parameter names. + +## Production Database + +For production database with special characters in password: + +```bash +# Set environment variables +export DB_USER="your_username" +export SQL_API_PASSWORD="your*pa$$@word" # Special chars auto-encoded +export SQL_API_HOST="your.database.host" +export DB_NAME="your_database" + +# Start with production database +./dev.sh start +``` + +## Docker Compose Only + +If you prefer direct docker-compose: + +```bash +# Local development +docker-compose up -d + +# Production database +DATABASE_URL="postgres://user:encoded_password@host:5432/db" docker-compose up -d +``` + +## How It Works + +1. **No Separate Build Step**: Docker builds the binary automatically during `docker-compose up` +2. **Automatic Password Encoding**: Special characters in passwords are URL-encoded automatically +3. **Frontend Parameter Substitution**: JavaScript replaces `@parameters` before sending queries to backend +4. **One File**: Everything configured in single `docker-compose.yml` + +## Files + +- `docker-compose.yml` - Main configuration (safe to commit) +- `dev.sh` - Helper script with password encoding +- `.env` - Secrets (never committed) +- `.env.example` - Template for environment variables diff --git a/docs/font-customization.md b/docs/font-customization.md new file mode 100644 index 000000000..bbdb7ba65 --- /dev/null +++ b/docs/font-customization.md @@ -0,0 +1,225 @@ +# Font Customization + +This document describes how to configure custom fonts in pgweb for company branding and improved user experience. + +## Overview + +pgweb supports customizable fonts through environment variables, allowing each deployment to use company-specific typography. The system supports: + +- Any Google Fonts font family +- Custom font sizes +- Automatic font loading from Google Fonts API +- Font persistence across user sessions +- Application to both interface text and code editor + +## Environment Variables + +### `PGWEB_FONT_FAMILY` + +**Description:** CSS font family to use throughout the interface +**Format:** CSS font-family string +**Default:** System fonts (`-apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif`) + +**Examples:** + +```bash +export PGWEB_FONT_FAMILY="Space Grotesk, sans-serif" +export PGWEB_FONT_FAMILY="Inter, sans-serif" +export PGWEB_FONT_FAMILY="Roboto, sans-serif" +export PGWEB_FONT_FAMILY="'Custom Font Name', Arial, sans-serif" +``` + +### `PGWEB_FONT_SIZE` + +**Description:** CSS font size for the interface +**Format:** CSS size value with units +**Default:** `14px` + +**Examples:** + +```bash +export PGWEB_FONT_SIZE="14px" +export PGWEB_FONT_SIZE="15px" +export PGWEB_FONT_SIZE="16px" +export PGWEB_FONT_SIZE="1rem" +``` + +### `PGWEB_GOOGLE_FONTS` + +**Description:** Google Fonts to preload for the interface +**Format:** Comma-separated list of `FontName:weights` +**Default:** None (only loads if specified) + +**Examples:** + +```bash +# Single font with multiple weights +export PGWEB_GOOGLE_FONTS="Space Grotesk:300,400,500,600,700" + +# Multiple fonts +export PGWEB_GOOGLE_FONTS="Inter:300,400,500,600,700,Roboto:300,400,500,700" + +# Font with specific weights +export PGWEB_GOOGLE_FONTS="Poppins:400,600,700" +``` + +## Configuration Methods + +### 1. Docker Entrypoint Script + +Add font configuration to your Docker entrypoint script: + +```bash +#!/bin/sh +set -e + +# Font Configuration - Company branding +export PGWEB_FONT_FAMILY="Space Grotesk, sans-serif" +export PGWEB_FONT_SIZE="15px" +export PGWEB_GOOGLE_FONTS="Space Grotesk:300,400,500,600,700" + +# Your existing pgweb configuration... +exec pgweb --bind=0.0.0.0 --listen=8081 [other options] +``` + +### 2. Docker Compose + +Configure fonts in your `docker-compose.yml`: + +```yaml +services: + pgweb: + image: your-pgweb-image + environment: + PGWEB_FONT_FAMILY: "Space Grotesk, sans-serif" + PGWEB_FONT_SIZE: "15px" + PGWEB_GOOGLE_FONTS: "Space Grotesk:300,400,500,600,700" + # Your other environment variables... +``` + +### 3. Environment File + +Create a `.env` file: + +```bash +# Font Configuration +PGWEB_FONT_FAMILY=Space Grotesk, sans-serif +PGWEB_FONT_SIZE=15px +PGWEB_GOOGLE_FONTS=Space Grotesk:300,400,500,600,700 +``` + +### 4. Command Line Arguments + +You can also use command line flags: + +```bash +pgweb --font-family="Space Grotesk, sans-serif" --font-size="15px" --google-fonts="Space Grotesk:300,400,500,600,700" +``` + +## Popular Font Examples + +### Tech Companies + +```bash +# Modern, clean fonts +export PGWEB_FONT_FAMILY="Inter, sans-serif" +export PGWEB_GOOGLE_FONTS="Inter:300,400,500,600,700" + +export PGWEB_FONT_FAMILY="Poppins, sans-serif" +export PGWEB_GOOGLE_FONTS="Poppins:300,400,500,600,700" +``` + +### Financial/Corporate + +```bash +# Professional, readable fonts +export PGWEB_FONT_FAMILY="Roboto, sans-serif" +export PGWEB_GOOGLE_FONTS="Roboto:300,400,500,700" + +export PGWEB_FONT_FAMILY="Open Sans, sans-serif" +export PGWEB_GOOGLE_FONTS="Open Sans:300,400,600,700" +``` + +### Creative/Design + +```bash +# Distinctive, branded fonts +export PGWEB_FONT_FAMILY="Space Grotesk, sans-serif" +export PGWEB_GOOGLE_FONTS="Space Grotesk:300,400,500,600,700" + +export PGWEB_FONT_FAMILY="Nunito, sans-serif" +export PGWEB_GOOGLE_FONTS="Nunito:300,400,600,700,800" +``` + +## Technical Details + +### Font Loading Process + +1. **Environment Variables** are read by the Go backend during startup +2. **Configuration API** (`/api/config`) exposes font settings to the frontend +3. **JavaScript** fetches configuration and dynamically loads Google Fonts +4. **CSS Custom Properties** apply fonts throughout the interface +5. **Ace Editor** receives font updates for consistent code styling + +### Browser Support + +The font system uses modern web standards: + +- CSS Custom Properties (CSS Variables) +- Google Fonts API v2 +- Dynamic font loading via JavaScript +- localStorage for persistence + +### Performance Considerations + +- Fonts are loaded asynchronously to prevent blocking +- `font-display: swap` ensures immediate text rendering +- Google Fonts are cached by the browser +- Only specified font weights are loaded to minimize bandwidth + +### Fallback Strategy + +The system includes robust fallbacks: + +1. **Configured font** (from environment) +2. **System fonts** (if Google Font fails to load) +3. **Generic sans-serif** (ultimate fallback) + +## Troubleshooting + +### Font Not Loading + +- Check that the font name exactly matches Google Fonts spelling +- Verify environment variables are properly set in your deployment +- Check browser developer tools for font loading errors +- Ensure internet connectivity for Google Fonts API + +### Font Not Applying to Code Editor + +- The Ace editor font is automatically updated when the main font changes +- If issues persist, check browser console for JavaScript errors + +### Configuration Not Taking Effect + +- Restart the pgweb service after changing environment variables +- Verify the `/api/config` endpoint returns your font configuration +- Clear browser cache if fonts appear to be cached incorrectly + +## API Reference + +### GET /api/config + +Returns current configuration including font settings: + +```json +{ + "fonts": { + "family": "Space Grotesk, sans-serif", + "size": "15px", + "google_fonts": "Space Grotesk:300,400,500,600,700" + }, + "parameter_patterns": { + "custom": ["Client", "Instance", ...] + } +} +``` diff --git a/docs/query-caching.md b/docs/query-caching.md new file mode 100644 index 000000000..d4577eddf --- /dev/null +++ b/docs/query-caching.md @@ -0,0 +1,306 @@ +# Query and Metadata Caching + +Flowbi/pgweb implements intelligent in-memory caching to improve performance by reducing database load and response times for frequently accessed data. + +## Overview + +The caching system consists of two independent caches: + +- **Query Cache**: Caches results of SELECT queries that don't contain time-sensitive functions +- **Metadata Cache**: Caches database schema information like tables, columns, constraints, and indexes + +## Cache Implementation Details + +### Thread Safety + +- Uses `sync.RWMutex` for concurrent read/write access +- Multiple queries can read from cache simultaneously +- Cache writes are serialized for data consistency + +### Memory Management + +- **Smart Memory Limits**: Query cache limited to 50MB, metadata cache to 100MB by default +- **Size-Based Eviction**: Automatically removes oldest items when memory limit is approached +- **Memory Estimation**: Uses reflection-based size calculation for cached values +- **Automatic Cleanup**: Expired entries cleaned every 5 minutes +- **TTL-based Expiration**: All cached items expire based on configured TTL + +### Cache Key Strategy + +- **Security**: MD5 hashing of query + connection string + user role for uniqueness +- **Role Isolation**: Different users with different roles get separate cache entries +- **Namespaced Keys**: Prefixed for organization: + - `query:` for SELECT query results + - `metadata:` for database schema information + +## Configuration + +All caching settings can be configured via environment variables or command-line flags: + +### Environment Variables + +```bash +# Disable query result caching (default: false) +PGWEB_DISABLE_QUERY_CACHE=false + +# Disable metadata caching (default: false) +PGWEB_DISABLE_METADATA_CACHE=false + +# Query cache TTL in seconds (default: 120) +PGWEB_QUERY_CACHE_TTL=300 + +# Metadata cache TTL in seconds (default: 600) +PGWEB_METADATA_CACHE_TTL=600 +``` + +### Command-Line Flags + +```bash +# Disable caches +./pgweb --no-query-cache --no-metadata-cache + +# Configure TTL values +./pgweb --query-cache-ttl=300 --metadata-cache-ttl=1200 +``` + +## Query Cache Behavior + +### Cacheable Queries + +Only SELECT queries are cached, and only when they don't contain time-sensitive functions: + +**✅ Cached:** + +```sql +SELECT * FROM users WHERE status = 'active' +SELECT COUNT(*) FROM orders +SELECT name, email FROM customers +``` + +**❌ Not Cached:** + +```sql +INSERT INTO logs VALUES (...) -- Not a SELECT +UPDATE users SET last_seen = NOW() -- Contains time function +SELECT * FROM events WHERE created_at > NOW() - INTERVAL '1 hour' +SELECT random() as lucky_number -- Contains random function +``` + +### Time-Sensitive Function Detection + +The following functions prevent query caching: + +- `now()` +- `current_timestamp` +- `random()` + +### Cache Flow + +1. Check if query is cacheable (SELECT + no time-sensitive functions) +2. Generate cache key from query + connection string +3. Look for existing cached result +4. If found and not expired, return cached result +5. If not found, execute query and cache result + +## Metadata Cache Behavior + +### Cached Metadata + +- Database schemas list +- Table and view information +- Column definitions and types +- Table constraints (primary keys, foreign keys, checks) +- Index information +- Table statistics + +### Cache Invalidation + +Metadata cache entries expire based on TTL. Consider clearing cache after: + +- Schema changes (CREATE/DROP TABLE) +- Column modifications (ALTER TABLE) +- Index creation/removal +- Constraint changes + +## Cache Management + +### Statistics Endpoint + +Get cache performance metrics: + +```bash +GET /api/cache/stats +``` + +Response: + +```json +{ + "caching_enabled": { + "query_cache": true, + "metadata_cache": true + }, + "cache_ttl": { + "query_cache_ttl": 120, + "metadata_cache_ttl": 600 + }, + "query_cache": { + "total_items": 45, + "expired_items": 3, + "active_items": 42, + "memory_used_mb": 12, + "memory_limit_mb": 50, + "memory_used_bytes": 12582912 + }, + "metadata_cache": { + "total_items": 12, + "expired_items": 0, + "active_items": 12, + "memory_used_mb": 3, + "memory_limit_mb": 100, + "memory_used_bytes": 3145728 + } +} +``` + +### Clear Cache + +Clear all cached data: + +```bash +POST /api/cache/clear +``` + +Response: + +```json +{ + "message": "Caches cleared successfully", + "cleared": ["query_cache", "metadata_cache"] +} +``` + +## Performance Benefits + +### Query Cache Benefits + +- **Faster Response Times**: Cached SELECT queries return instantly +- **Reduced Database Load**: Identical queries don't hit the database +- **Better Concurrency**: Multiple users can access cached results simultaneously + +### Metadata Cache Benefits + +- **Snappy UI**: Table lists and schema browsing respond immediately +- **Reduced Metadata Queries**: Schema information loaded once per TTL period +- **Improved User Experience**: Faster navigation between tables and schemas + +## Best Practices + +### When to Disable Caching + +**Disable Query Cache** when: + +- Working with rapidly changing data +- Real-time analytics requirements +- Memory constraints on the server + +**Disable Metadata Cache** when: + +- Frequently changing schema (development environments) +- Multiple users modifying database structure + +### Optimal TTL Settings + +**Short TTL (30-120 seconds)** for: + +- Frequently changing application data +- Development environments +- Real-time dashboards + +**Long TTL (300-3600 seconds)** for: + +- Stable reference data +- Reports and analytics +- Production environments with stable schemas + +### Memory Considerations + +- **Built-in Limits**: Query cache limited to 50MB, metadata cache to 100MB +- **Automatic Management**: Cache automatically evicts oldest items when memory limit reached +- **Memory Monitoring**: Check memory usage via `/api/cache/stats` endpoint +- **ECS/Container Deployments**: Ensure container memory allocation accounts for cache limits plus application overhead +- **Row Count Limits**: Large query results (>10,000 rows) are not cached to prevent memory issues +- **Estimation**: Memory usage is estimated using reflection; actual usage may vary + +## Deployment Considerations + +### Container Memory Requirements + +When deploying to ECS, Kubernetes, or Docker, ensure adequate memory allocation: + +**Minimum Recommended Memory:** +- **Development**: 256MB (cache disabled or very low TTL) +- **Production**: 512MB-1GB depending on usage patterns +- **High Traffic**: 1GB+ for optimal performance + +**Memory Breakdown:** +- Application base: ~200-300MB +- Query cache limit: 50MB (configurable) +- Metadata cache limit: 100MB (configurable) +- Go runtime/GC: ~100-200MB +- Database connections: ~50-100MB +- **Total**: ~500-650MB minimum + +**Example ECS Task Definition:** +```json +{ + "memory": 1024, + "memoryReservation": 512, + "environment": [ + {"name": "PGWEB_QUERY_CACHE_TTL", "value": "300"}, + {"name": "PGWEB_DISABLE_QUERY_CACHE", "value": "false"} + ] +} +``` + +## Troubleshooting + +### Cache Not Working + +1. Verify caching is enabled via `/api/cache/stats` +2. Check query doesn't contain time-sensitive functions +3. Ensure query is a valid SELECT statement +4. **Check Environment Variables**: Ensure `PGWEB_QUERY_CACHE_TTL` is being read correctly + +### High Memory Usage + +1. **Check Cache Stats**: Review memory usage via `/api/cache/stats` +2. **Container Memory**: Ensure your container has enough memory (see deployment section above) +3. **Large Results**: Cache won't store results >10,000 rows, but check for many medium-sized results +4. **TTL Optimization**: Reduce TTL values to expire items sooner +5. **Manual Clear**: Use `/api/cache/clear` if needed +6. **Temporary Disable**: Set `PGWEB_DISABLE_QUERY_CACHE=true` as last resort + +### Container Memory Issues + +If you see OOMKilled errors or high memory usage: + +1. **Check Actual Usage**: Memory stats may show higher usage due to Go's garbage collection +2. **Increase Container Memory**: Add 200-300MB buffer above cache limits +3. **Reduce Cache TTL**: Lower values mean faster cleanup +4. **Monitor Metrics**: Use `/api/cache/stats` to track memory trends + +### Environment Variables Not Working + +If your `PGWEB_QUERY_CACHE_TTL` setting isn't being applied: + +1. **Verify Environment**: Check environment variables are properly set in container +2. **Restart Required**: Environment variables only read at startup +3. **Check Logs**: Look for debug messages about cache configuration +4. **Test Override**: Try command-line flags: `--query-cache-ttl=300` + +### Stale Data Issues + +1. Reduce cache TTL values +2. Clear cache after schema changes +3. Monitor data freshness requirements vs. performance gains diff --git a/pkg/api/api.go b/pkg/api/api.go index ee8d2c25e..1ac4c5c9b 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -2,11 +2,13 @@ package api import ( "context" + "crypto/md5" "encoding/base64" "fmt" "net/http" neturl "net/url" "os" + "regexp" "strings" "time" @@ -14,6 +16,7 @@ import ( "github.com/tuvistavie/securerandom" "github.com/flowbi/pgweb/pkg/bookmarks" + "github.com/flowbi/pgweb/pkg/cache" "github.com/flowbi/pgweb/pkg/client" "github.com/flowbi/pgweb/pkg/command" "github.com/flowbi/pgweb/pkg/connection" @@ -32,8 +35,30 @@ var ( // QueryStore reads the SQL queries stores in the home directory QueryStore *queries.Store + + // QueryCache caches query results + QueryCache *cache.Cache + + // MetadataCache caches database metadata + MetadataCache *cache.Cache +) + +var ( + // Regex to identify SELECT queries that are safe to cache + selectQueryRegex = regexp.MustCompile(`(?i)^\s*SELECT\s+`) ) +func InitializeCaches() { + if !command.Opts.DisableQueryCache { + // Use memory-based cache limiting (50MB default) for better resource control + // This handles both large result sets and many small queries + QueryCache = cache.NewWithMemoryLimit(time.Duration(command.Opts.QueryCacheTTL)*time.Second, 50) + } + if !command.Opts.DisableMetadataCache { + MetadataCache = cache.New(time.Duration(command.Opts.MetadataCacheTTL) * time.Second) + } +} + // DB returns a database connection from the client context func DB(c *gin.Context) *client.Client { if command.Opts.Sessions { @@ -565,24 +590,15 @@ func GetTablesStats(c *gin.Context) { } } -// HandleQuery runs the database query -func HandleQuery(query string, c *gin.Context) { - metrics.IncrementQueriesCount() - - rawQuery, err := base64.StdEncoding.DecodeString(desanitize64(query)) - if err == nil { - query = string(rawQuery) - } - - result, err := DB(c).Query(query) - if err != nil { - badRequest(c, err) - return - } +// CachedResponse represents a cached final response +type CachedResponse struct { + Result *client.Result `json:"result"` + Format string `json:"format"` +} - format := getQueryParam(c, "format") +// handleFormatResponse serves the result in the requested format +func handleFormatResponse(c *gin.Context, result *client.Result, format string) { filename := getQueryParam(c, "filename") - if filename == "" { filename = fmt.Sprintf("pgweb-%v.%v", time.Now().Unix(), format) } @@ -603,6 +619,103 @@ func HandleQuery(query string, c *gin.Context) { } } +// generateQueryCacheKey creates a cache key for query results +func generateQueryCacheKey(query, connectionString, role string) string { + data := fmt.Sprintf("%s|%s|role:%s", query, connectionString, role) + hash := md5.Sum([]byte(data)) + return fmt.Sprintf("query:%x", hash) +} + +// isCacheableQuery checks if a query is safe to cache +func isCacheableQuery(query string) bool { + trimmed := strings.TrimSpace(query) + return selectQueryRegex.MatchString(trimmed) && + !strings.Contains(strings.ToLower(trimmed), "now()") && + !strings.Contains(strings.ToLower(trimmed), "current_timestamp") && + !strings.Contains(strings.ToLower(trimmed), "random()") +} + +// HandleQuery runs the database query +func HandleQuery(query string, c *gin.Context) { + metrics.IncrementQueriesCount() + + // Only attempt base64 decoding for GET requests (URL parameters) + // POST requests have plain text queries in form data + if c.Request.Method == "GET" { + rawQuery, err := base64.StdEncoding.DecodeString(desanitize64(query)) + if err == nil { + query = string(rawQuery) + } else { + if command.Opts.Debug { + fmt.Printf("[DEBUG] Base64 decode failed for GET query parameter: %q, error: %v\n", query, err) + } + } + } + + // Check cache for SELECT queries + conn := DB(c) + if conn == nil { + badRequest(c, errNotConnected) + return + } + + format := getQueryParam(c, "format") + + // Check cache first + if !command.Opts.DisableQueryCache && QueryCache != nil && isCacheableQuery(query) { + cacheKey := generateQueryCacheKey(query, conn.ConnectionString, conn.GetRole()) + if cached, found := QueryCache.Get(cacheKey); found { + // Return cached final response (already processed) + if cachedResp, ok := cached.(*CachedResponse); ok { + if command.Opts.Debug { + fmt.Printf("[CACHE] Query cache HIT for key: %s (rows: %d, role: %s) - returning final response\n", + cacheKey[6:16], len(cachedResp.Result.Rows), conn.GetRole()) + } + // Update timing to reflect cache retrieval (1-2ms) instead of original query time + cacheTime := time.Now() + cachedResp.Result.Stats.QueryStartTime = cacheTime.Add(-time.Millisecond).UTC() + cachedResp.Result.Stats.QueryFinishTime = cacheTime.UTC() + cachedResp.Result.Stats.QueryDuration = 1 // 1ms for cache hit + + // Serve cached result with proper format handling + handleFormatResponse(c, cachedResp.Result, cachedResp.Format) + return + } else { + if command.Opts.Debug { + fmt.Printf("[CACHE] Invalid cache entry, executing query\n") + } + } + } + } + + // Execute query + result, err := conn.Query(query) + if err != nil { + badRequest(c, err) + return + } + + // Post-process the result + result.PostProcess() + + // Cache the final processed result + if !command.Opts.DisableQueryCache && QueryCache != nil && isCacheableQuery(query) && len(result.Rows) <= 10000 { + cacheKey := generateQueryCacheKey(query, conn.ConnectionString, conn.GetRole()) + cachedResp := &CachedResponse{ + Result: result, + Format: format, + } + QueryCache.Set(cacheKey, cachedResp, time.Duration(command.Opts.QueryCacheTTL)*time.Second) + if command.Opts.Debug { + fmt.Printf("[CACHE] Query cache MISS, cached final response for key: %s (rows: %d, TTL: %ds, role: %s)\n", + cacheKey[6:16], len(result.Rows), command.Opts.QueryCacheTTL, conn.GetRole()) + } + } + + // Serve the result with proper format handling + handleFormatResponse(c, result, format) +} + // GetBookmarks renders the list of available bookmarks func GetBookmarks(c *gin.Context) { manager := bookmarks.NewManager(command.Opts.BookmarksDir) @@ -623,7 +736,7 @@ func GetInfo(c *gin.Context) { }) } -// GetConfig returns client configuration including custom parameter patterns +// GetConfig returns client configuration including custom parameter patterns and font settings func GetConfig(c *gin.Context) { // Get custom parameter patterns from environment variable customParams := os.Getenv("PGWEB_CUSTOM_PARAMS") @@ -632,6 +745,11 @@ func GetConfig(c *gin.Context) { "parameter_patterns": gin.H{ "custom": []string{}, }, + "fonts": gin.H{ + "family": command.Opts.FontFamily, + "size": command.Opts.FontSize, + "google_fonts": []string{}, + }, } // Add custom patterns if configured (no defaults) @@ -643,6 +761,11 @@ func GetConfig(c *gin.Context) { config["parameter_patterns"].(gin.H)["custom"] = customPatternsList } + // Add Google Fonts configuration + if command.Opts.GoogleFonts != "" { + config["fonts"].(gin.H)["google_fonts"] = command.Opts.GoogleFonts + } + successResponse(c, config) } @@ -768,3 +891,55 @@ func RunLocalQuery(c *gin.Context) { HandleQuery(statement, c) } + +// GetCacheStats renders cache statistics +func GetCacheStats(c *gin.Context) { + stats := map[string]interface{}{ + "caching_enabled": map[string]bool{ + "query_cache": !command.Opts.DisableQueryCache, + "metadata_cache": !command.Opts.DisableMetadataCache, + }, + "cache_ttl": map[string]uint{ + "query_cache_ttl": command.Opts.QueryCacheTTL, + "metadata_cache_ttl": command.Opts.MetadataCacheTTL, + }, + } + + if QueryCache != nil { + stats["query_cache"] = QueryCache.Stats() + } + + if MetadataCache != nil { + stats["metadata_cache"] = MetadataCache.Stats() + } + + successResponse(c, stats) +} + +// ClearCache clears all cache entries +func ClearCache(c *gin.Context) { + cleared := []string{} + + if QueryCache != nil { + QueryCache.Clear() + cleared = append(cleared, "query_cache") + } + + if MetadataCache != nil { + MetadataCache.Clear() + cleared = append(cleared, "metadata_cache") + } + + if len(cleared) == 0 { + successResponse(c, gin.H{ + "message": "No caches to clear (caching disabled)", + "cleared": cleared, + }) + return + } + + successResponse(c, gin.H{ + "message": "Caches cleared successfully", + "cleared": cleared, + }) +} diff --git a/pkg/api/routes.go b/pkg/api/routes.go index 879524657..df58e6c69 100644 --- a/pkg/api/routes.go +++ b/pkg/api/routes.go @@ -58,6 +58,8 @@ func SetupRoutes(router *gin.Engine) { api.GET("/history", GetHistory) api.GET("/bookmarks", GetBookmarks) api.GET("/export", DataExport) + api.GET("/cache/stats", GetCacheStats) + api.POST("/cache/clear", ClearCache) api.GET("/local_queries", requireLocalQueries(), GetLocalQueries) api.GET("/local_queries/:id", requireLocalQueries(), RunLocalQuery) api.POST("/local_queries/:id", requireLocalQueries(), RunLocalQuery) diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go new file mode 100644 index 000000000..ca7b86272 --- /dev/null +++ b/pkg/cache/cache.go @@ -0,0 +1,328 @@ +package cache + +import ( + "crypto/md5" + "fmt" + "reflect" + "sync" + "time" + "unsafe" +) + +type item struct { + value interface{} + expiresAt time.Time + size int64 // Estimated memory size in bytes +} + +type Cache struct { + items map[string]*item + mu sync.RWMutex + defaultTTL time.Duration + maxItems int // Maximum number of items (0 = unlimited) + maxMemory int64 // Maximum memory usage in bytes (0 = unlimited) + currentSize int64 // Current memory usage tracking +} + +func New(defaultTTL time.Duration) *Cache { + c := &Cache{ + items: make(map[string]*item), + defaultTTL: defaultTTL, + maxItems: 500, // Reasonable default max items + maxMemory: 100 * 1024 * 1024, // Default 100MB memory limit + } + + // Start cleanup goroutine + go c.cleanup() + + return c +} + +// NewWithMaxItems creates a cache with a specific maximum number of items +func NewWithMaxItems(defaultTTL time.Duration, maxItems int) *Cache { + c := &Cache{ + items: make(map[string]*item), + defaultTTL: defaultTTL, + maxItems: maxItems, + maxMemory: 100 * 1024 * 1024, // Default 100MB memory limit + } + + // Start cleanup goroutine + go c.cleanup() + + return c +} + +// NewWithMemoryLimit creates a cache with a specific memory limit +func NewWithMemoryLimit(defaultTTL time.Duration, maxMemoryMB int) *Cache { + c := &Cache{ + items: make(map[string]*item), + defaultTTL: defaultTTL, + maxItems: 10000, // High item limit when using memory-based limiting + maxMemory: int64(maxMemoryMB) * 1024 * 1024, // Convert MB to bytes + } + + // Start cleanup goroutine + go c.cleanup() + + return c +} + +// NewWithoutCleanup creates a cache without starting the cleanup goroutine +// Useful for testing or when cleanup is managed externally +func NewWithoutCleanup(defaultTTL time.Duration) *Cache { + return &Cache{ + items: make(map[string]*item), + defaultTTL: defaultTTL, + maxItems: 500, // Default max items + maxMemory: 100 * 1024 * 1024, // Default 100MB memory limit + } +} + +func (c *Cache) Set(key string, value interface{}, ttl time.Duration) { + if ttl == 0 { + ttl = c.defaultTTL + } + + c.mu.Lock() + defer c.mu.Unlock() + + // If maxItems is set and we would exceed it, remove oldest expired items first + if c.maxItems > 0 && len(c.items) >= c.maxItems { + c.evictExpired() + + // If we still exceed the limit after cleaning expired items, remove oldest items + if len(c.items) >= c.maxItems { + c.evictOldest(len(c.items) - c.maxItems + 1) + } + } + + // Estimate the memory size of the value + itemSize := c.estimateSize(value) + + // If replacing existing item, subtract its size first + if existingItem, exists := c.items[key]; exists { + c.currentSize -= existingItem.size + } + + // Check memory limit first (more important than item count) + if c.maxMemory > 0 && c.currentSize+itemSize > c.maxMemory { + c.evictToFitMemory(itemSize) + } + + newItem := &item{ + value: value, + expiresAt: time.Now().Add(ttl), + size: itemSize, + } + + c.items[key] = newItem + c.currentSize += itemSize +} + +func (c *Cache) Get(key string) (interface{}, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + + item, exists := c.items[key] + if !exists { + return nil, false + } + + if time.Now().After(item.expiresAt) { + // Item expired, will be cleaned up later + return nil, false + } + + return item.value, true +} + +func (c *Cache) Delete(key string) { + c.mu.Lock() + defer c.mu.Unlock() + if item, exists := c.items[key]; exists { + c.currentSize -= item.size + delete(c.items, key) + } +} + +func (c *Cache) Clear() { + c.mu.Lock() + defer c.mu.Unlock() + c.items = make(map[string]*item) + c.currentSize = 0 +} + +func (c *Cache) cleanup() { + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() + + for range ticker.C { + c.mu.Lock() + now := time.Now() + for key, item := range c.items { + if now.After(item.expiresAt) { + delete(c.items, key) + } + } + c.mu.Unlock() + } +} + +func (c *Cache) Stats() map[string]interface{} { + c.mu.RLock() + defer c.mu.RUnlock() + + expired := 0 + now := time.Now() + for _, item := range c.items { + if now.After(item.expiresAt) { + expired++ + } + } + + return map[string]interface{}{ + "total_items": len(c.items), + "expired_items": expired, + "active_items": len(c.items) - expired, + "memory_used_mb": c.currentSize / (1024 * 1024), + "memory_limit_mb": c.maxMemory / (1024 * 1024), + "memory_used_bytes": c.currentSize, + } +} + +// estimateSize estimates the memory size of a value in bytes +func (c *Cache) estimateSize(value interface{}) int64 { + if value == nil { + return 8 // pointer size + } + + // Use reflection to get the approximate size + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.String: + return int64(len(v.String()) + 16) // string header + data + case reflect.Slice, reflect.Array: + size := int64(v.Len()) * 8 // estimate 8 bytes per element as baseline + // For slice of interfaces or complex types, add more + if v.Len() > 0 && v.Index(0).Kind() == reflect.Interface { + size *= 4 // interfaces are more expensive + } + return size + 24 // slice header + case reflect.Map: + return int64(v.Len()) * 32 // estimate 32 bytes per map entry + case reflect.Ptr: + if v.IsNil() { + return 8 + } + return 8 + c.estimateSize(v.Elem().Interface()) + case reflect.Struct: + // For structs, estimate based on number of fields * average field size + return int64(v.NumField()) * 16 + default: + return int64(unsafe.Sizeof(value)) + } +} + +// evictToFitMemory removes items until there's enough space for newItemSize +func (c *Cache) evictToFitMemory(newItemSize int64) { + targetSize := c.maxMemory - newItemSize + + // First try to remove expired items + c.evictExpired() + + // If still not enough space, remove oldest items by size + if c.currentSize > targetSize { + c.evictOldestBySize(c.currentSize - targetSize) + } +} + +// evictExpired removes all expired items (called with lock held) +func (c *Cache) evictExpired() { + now := time.Now() + for key, item := range c.items { + if now.After(item.expiresAt) { + c.currentSize -= item.size + delete(c.items, key) + } + } +} + +// evictOldest removes the N oldest items by expiration time (called with lock held) +func (c *Cache) evictOldest(count int) { + if count <= 0 { + return + } + + // Collect items with their keys and sort by expiration time + type keyItem struct { + key string + item *item + } + + items := make([]keyItem, 0, len(c.items)) + for key, item := range c.items { + items = append(items, keyItem{key, item}) + } + + // Sort by expiration time (oldest first) + for i := 0; i < len(items); i++ { + for j := i + 1; j < len(items); j++ { + if items[i].item.expiresAt.After(items[j].item.expiresAt) { + items[i], items[j] = items[j], items[i] + } + } + } + + // Remove the oldest count items + for i := 0; i < count && i < len(items); i++ { + c.currentSize -= items[i].item.size + delete(c.items, items[i].key) + } +} + +// evictOldestBySize removes items until the specified amount of memory is freed +func (c *Cache) evictOldestBySize(targetBytesToFree int64) { + if targetBytesToFree <= 0 { + return + } + + // Collect items with their keys and sort by expiration time (oldest first) + type keyItem struct { + key string + item *item + } + + items := make([]keyItem, 0, len(c.items)) + for key, item := range c.items { + items = append(items, keyItem{key, item}) + } + + // Sort by expiration time (oldest first) + for i := 0; i < len(items); i++ { + for j := i + 1; j < len(items); j++ { + if items[i].item.expiresAt.After(items[j].item.expiresAt) { + items[i], items[j] = items[j], items[i] + } + } + } + + // Remove items until we've freed enough memory + freedBytes := int64(0) + for i := 0; i < len(items) && freedBytes < targetBytesToFree; i++ { + freedBytes += items[i].item.size + c.currentSize -= items[i].item.size + delete(c.items, items[i].key) + } +} + +// GenerateKey creates a cache key from multiple string components +func GenerateKey(components ...string) string { + combined := "" + for _, comp := range components { + combined += comp + "|" + } + + hash := md5.Sum([]byte(combined)) + return fmt.Sprintf("%x", hash) +} diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go new file mode 100644 index 000000000..23f7861bd --- /dev/null +++ b/pkg/cache/cache_test.go @@ -0,0 +1,57 @@ +package cache + +import ( + "testing" + "time" +) + +func TestCache_BasicOperations(t *testing.T) { + cache := New(5 * time.Second) + defer cache.Clear() + + // Test Set and Get + cache.Set("test_key", "test_value", 0) + + value, found := cache.Get("test_key") + if !found { + t.Error("Expected to find cached value") + } + if value.(string) != "test_value" { + t.Errorf("Expected 'test_value', got %v", value) + } +} + +func TestCache_Expiration(t *testing.T) { + cache := New(100 * time.Millisecond) + defer cache.Clear() + + cache.Set("expire_key", "expire_value", 100*time.Millisecond) + + // Should be found immediately + _, found := cache.Get("expire_key") + if !found { + t.Error("Expected to find value immediately after setting") + } + + // Wait for expiration + time.Sleep(150 * time.Millisecond) + + _, found = cache.Get("expire_key") + if found { + t.Error("Expected value to be expired") + } +} + +func TestCache_GenerateKey(t *testing.T) { + key1 := GenerateKey("component1", "component2") + key2 := GenerateKey("component1", "component2") + key3 := GenerateKey("component1", "different") + + if key1 != key2 { + t.Error("Same components should generate same key") + } + + if key1 == key3 { + t.Error("Different components should generate different keys") + } +} diff --git a/pkg/cli/cli.go b/pkg/cli/cli.go index 277525360..7e329d90e 100644 --- a/pkg/cli/cli.go +++ b/pkg/cli/cli.go @@ -296,6 +296,13 @@ func testClient(cl *client.Client, retryCount int, retryDelay time.Duration) (ab func Run() { initOptions() + + // Initialize caches after options are loaded + api.InitializeCaches() + + // Set shared metadata cache reference in client package + client.MetadataCache = api.MetadataCache + initClient() if api.DbClient != nil { diff --git a/pkg/client/client.go b/pkg/client/client.go index af46c7386..8759da19b 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -2,6 +2,7 @@ package client import ( "context" + "crypto/md5" "errors" "fmt" "log" @@ -15,6 +16,7 @@ import ( _ "github.com/lib/pq" "github.com/flowbi/pgweb/pkg/bookmarks" + "github.com/flowbi/pgweb/pkg/cache" "github.com/flowbi/pgweb/pkg/command" "github.com/flowbi/pgweb/pkg/connection" "github.com/flowbi/pgweb/pkg/history" @@ -22,6 +24,9 @@ import ( "github.com/flowbi/pgweb/pkg/statements" ) +// Shared metadata cache - will be set by API package +var MetadataCache *cache.Cache + var ( regexErrAuthFailed = regexp.MustCompile(`(authentication failed|role "(.*)" does not exist)`) regexErrConnectionRefused = regexp.MustCompile(`(connection|actively) refused`) @@ -144,6 +149,14 @@ type Client struct { External bool `json:"external"` History []history.Record `json:"history"` ConnectionString string `json:"connection_string"` + // Remove per-client cache - we'll use shared cache instead +} + +// generateMetadataCacheKey creates a cache key for metadata queries +func (client *Client) generateMetadataCacheKey(queryType string, params ...string) string { + data := fmt.Sprintf("%s|%s|%s", client.ConnectionString, queryType, strings.Join(params, "|")) + hash := md5.Sum([]byte(data)) + return fmt.Sprintf("metadata:%x", hash) } func getSchemaAndTable(str string) (string, string) { @@ -351,6 +364,13 @@ func (client *Client) TestWithTimeout(timeout time.Duration) (result error) { } func (client *Client) Info() (*Result, error) { + cacheKey := client.generateMetadataCacheKey("info") + if MetadataCache != nil { + if cached, found := MetadataCache.Get(cacheKey); found { + return cached.(*Result), nil + } + } + result, err := client.query(statements.Info) if err != nil { msg := err.Error() @@ -359,6 +379,11 @@ func (client *Client) Info() (*Result, error) { result, err = client.query(statements.InfoSimple) } } + + if err == nil && MetadataCache != nil { + MetadataCache.Set(cacheKey, result, 10*time.Minute) + } + return result, err } @@ -367,6 +392,13 @@ func (client *Client) Databases() ([]string, error) { } func (client *Client) Schemas() ([]string, error) { + cacheKey := client.generateMetadataCacheKey("schemas", command.Opts.HideSchemas) + if MetadataCache != nil { + if cached, found := MetadataCache.Get(cacheKey); found { + return cached.([]string), nil + } + } + schemas, err := client.fetchRows(statements.Schemas) if err != nil { return nil, err @@ -378,10 +410,22 @@ func (client *Client) Schemas() ([]string, error) { return nil, fmt.Errorf("failed to compile schema hide patterns: %v", err) } - return FilterStringSlice(schemas, patterns), nil + filteredSchemas := FilterStringSlice(schemas, patterns) + if MetadataCache != nil { + MetadataCache.Set(cacheKey, filteredSchemas, 10*time.Minute) + } + + return filteredSchemas, nil } func (client *Client) Objects() (*Result, error) { + cacheKey := client.generateMetadataCacheKey("objects", command.Opts.HideSchemas, command.Opts.HideObjects) + if MetadataCache != nil { + if cached, found := MetadataCache.Get(cacheKey); found { + return cached.(*Result), nil + } + } + result, err := client.query(statements.Objects) if err != nil { return nil, err @@ -399,12 +443,30 @@ func (client *Client) Objects() (*Result, error) { return nil, fmt.Errorf("failed to compile object hide patterns: %v", err) } - return filterObjectsResult(result, schemaPatterns, objectPatterns), nil + filteredResult := filterObjectsResult(result, schemaPatterns, objectPatterns) + if MetadataCache != nil { + MetadataCache.Set(cacheKey, filteredResult, 10*time.Minute) + } + + return filteredResult, nil } func (client *Client) Table(table string) (*Result, error) { - schema, table := getSchemaAndTable(table) - return client.query(statements.TableSchema, schema, table) + schema, tableName := getSchemaAndTable(table) + cacheKey := client.generateMetadataCacheKey("table", schema, tableName) + + if MetadataCache != nil { + if cached, found := MetadataCache.Get(cacheKey); found { + return cached.(*Result), nil + } + } + + result, err := client.query(statements.TableSchema, schema, tableName) + if err == nil && MetadataCache != nil { + MetadataCache.Set(cacheKey, result, 10*time.Minute) + } + + return result, err } func (client *Client) MaterializedView(name string) (*Result, error) { @@ -526,11 +588,22 @@ func (client *Client) TableRowsCount(table string, opts RowsOptions) (*Result, e } func (client *Client) TableInfo(table string) (*Result, error) { - if client.serverType == cockroachType { - return client.query(statements.TableInfoCockroach) + schema, tableName := getSchemaAndTable(table) + cacheKey := client.generateMetadataCacheKey("table_info", schema, tableName, client.serverType) + + if MetadataCache != nil { + if cached, found := MetadataCache.Get(cacheKey); found { + return cached.(*Result), nil + } } - schema, tableName := getSchemaAndTable(table) + if client.serverType == cockroachType { + result, err := client.query(statements.TableInfoCockroach) + if err == nil && MetadataCache != nil { + MetadataCache.Set(cacheKey, result, 10*time.Minute) + } + return result, err + } // Check if this is a foreign table isForeign, err := client.isForeignTable(schema, tableName) @@ -549,29 +622,51 @@ func (client *Client) TableInfo(table string) (*Result, error) { {"N/A", "N/A", "N/A", "Unknown", true}, }, } + if MetadataCache != nil { + MetadataCache.Set(cacheKey, result, 10*time.Minute) + } return result, nil } - return client.query(statements.TableInfo, fmt.Sprintf(`"%s"."%s"`, schema, tableName)) + result, err := client.query(statements.TableInfo, fmt.Sprintf(`"%s"."%s"`, schema, tableName)) + if err == nil && MetadataCache != nil { + MetadataCache.Set(cacheKey, result, 10*time.Minute) + } + + return result, err } func (client *Client) TableIndexes(table string) (*Result, error) { - schema, table := getSchemaAndTable(table) - res, err := client.query(statements.TableIndexes, schema, table) + schema, tableName := getSchemaAndTable(table) + cacheKey := client.generateMetadataCacheKey("table_indexes", schema, tableName) - if err != nil { - return nil, err + if MetadataCache != nil { + if cached, found := MetadataCache.Get(cacheKey); found { + return cached.(*Result), nil + } + } + + res, err := client.query(statements.TableIndexes, schema, tableName) + if err == nil && MetadataCache != nil { + MetadataCache.Set(cacheKey, res, 10*time.Minute) } return res, err } func (client *Client) TableConstraints(table string) (*Result, error) { - schema, table := getSchemaAndTable(table) - res, err := client.query(statements.TableConstraints, schema, table) + schema, tableName := getSchemaAndTable(table) + cacheKey := client.generateMetadataCacheKey("table_constraints", schema, tableName) - if err != nil { - return nil, err + if MetadataCache != nil { + if cached, found := MetadataCache.Get(cacheKey); found { + return cached.(*Result), nil + } + } + + res, err := client.query(statements.TableConstraints, schema, tableName) + if err == nil && MetadataCache != nil { + MetadataCache.Set(cacheKey, res, 10*time.Minute) } return res, err @@ -881,6 +976,11 @@ func (client *Client) SetRole(role string) { } } +// GetRole returns the current role for the client +func (client *Client) GetRole() string { + return client.defaultRole +} + // isValidRoleName validates that the role name matches expected pattern func isValidRoleName(role string) bool { // Allow alphanumeric characters, underscores, and typical user patterns diff --git a/pkg/command/options.go b/pkg/command/options.go index fe51bdf6d..3dbe6a851 100644 --- a/pkg/command/options.go +++ b/pkg/command/options.go @@ -6,6 +6,7 @@ import ( "os" "os/user" "path/filepath" + "strconv" "strings" "github.com/jackc/pgpassfile" @@ -68,6 +69,13 @@ type Options struct { MetricsAddr string `long:"metrics-addr" description:"Listen host and port for Prometheus metrics server"` HideSchemas string `long:"hide-schemas" description:"Comma-separated list of regex patterns to hide schemas (e.g., 'public,meta')"` HideObjects string `long:"hide-objects" description:"Comma-separated list of regex patterns to hide objects/tables (e.g., '^temp_,_backup$')"` + FontFamily string `long:"font-family" description:"CSS font family to use (e.g., 'Inter', 'Roboto', 'Space Grotesk')"` + FontSize string `long:"font-size" description:"CSS font size to use (e.g., '14px', '16px')" default:"14px"` + GoogleFonts string `long:"google-fonts" description:"Comma-separated list of Google Fonts to preload (e.g., 'Inter:300,400,500,700')"` + DisableQueryCache bool `long:"no-query-cache" description:"Disable query result caching"` + DisableMetadataCache bool `long:"no-metadata-cache" description:"Disable metadata caching"` + QueryCacheTTL uint `long:"query-cache-ttl" description:"Query cache TTL in seconds" default:"300"` + MetadataCacheTTL uint `long:"metadata-cache-ttl" description:"Metadata cache TTL in seconds" default:"600"` } var Opts Options @@ -164,6 +172,45 @@ func ParseOptions(args []string) (Options, error) { opts.HideObjects = getPrefixedEnvVar("HIDE_OBJECTS") } + if opts.FontFamily == "" { + opts.FontFamily = getPrefixedEnvVar("FONT_FAMILY") + } + + if opts.FontSize == "" || opts.FontSize == "14px" { + if envFontSize := getPrefixedEnvVar("FONT_SIZE"); envFontSize != "" { + opts.FontSize = envFontSize + } + } + + if opts.GoogleFonts == "" { + opts.GoogleFonts = getPrefixedEnvVar("GOOGLE_FONTS") + } + + // Cache configuration from environment variables + if envDisableQueryCache := getPrefixedEnvVar("DISABLE_QUERY_CACHE"); envDisableQueryCache != "" { + if envDisableQueryCache == "true" || envDisableQueryCache == "1" { + opts.DisableQueryCache = true + } + } + + if envDisableMetadataCache := getPrefixedEnvVar("DISABLE_METADATA_CACHE"); envDisableMetadataCache != "" { + if envDisableMetadataCache == "true" || envDisableMetadataCache == "1" { + opts.DisableMetadataCache = true + } + } + + if envQueryCacheTTL := getPrefixedEnvVar("QUERY_CACHE_TTL"); envQueryCacheTTL != "" { + if ttl, err := strconv.ParseUint(envQueryCacheTTL, 10, 32); err == nil { + opts.QueryCacheTTL = uint(ttl) + } + } + + if envMetadataCacheTTL := getPrefixedEnvVar("METADATA_CACHE_TTL"); envMetadataCacheTTL != "" { + if ttl, err := strconv.ParseUint(envMetadataCacheTTL, 10, 32); err == nil { + opts.MetadataCacheTTL = uint(ttl) + } + } + if opts.ConnectBackend != "" { if !opts.Sessions { return opts, errors.New("--sessions flag must be set") @@ -253,5 +300,8 @@ func AvailableEnvVars() string { " " + envVarPrefix + "AUTH_PASS HTTP basic auth password", " " + envVarPrefix + "HIDE_SCHEMAS Comma-separated regex patterns to hide schemas", " " + envVarPrefix + "HIDE_OBJECTS Comma-separated regex patterns to hide objects/tables", + " " + envVarPrefix + "FONT_FAMILY CSS font family to use", + " " + envVarPrefix + "FONT_SIZE CSS font size to use (default: 14px)", + " " + envVarPrefix + "GOOGLE_FONTS Comma-separated list of Google Fonts to preload", }, "\n") } diff --git a/pkg/statements/sql.go b/pkg/statements/sql.go index 46f8cc851..aaccbd8a4 100644 --- a/pkg/statements/sql.go +++ b/pkg/statements/sql.go @@ -5,6 +5,9 @@ import ( "log" "os" "path/filepath" + "time" + + "github.com/flowbi/pgweb/pkg/cache" ) var ( @@ -65,18 +68,49 @@ var ( "9.5": "SELECT datname, query, state, waiting, query_start, state_change, pid, datid, application_name, client_addr FROM pg_stat_activity WHERE datname = current_database() and usename = current_user", "9.6": "SELECT datname, query, state, wait_event, wait_event_type, query_start, state_change, pid, datid, application_name, client_addr FROM pg_stat_activity WHERE datname = current_database() and usename = current_user", } + + // Cache for external SQL files + sqlFileCache *cache.Cache ) func init() { + sqlFileCache = cache.New(30 * time.Minute) TableConstraints = loadTableConstraintsSQL() } func loadTableConstraintsSQL() string { externalPath := filepath.Join("/tmp/queries", "table_constraints.sql") - if data, err := os.ReadFile(externalPath); err == nil { - log.Printf("Using external table_constraints.sql from: %s", externalPath) - return string(data) + + // Check cache first + cacheKey := cache.GenerateKey("sql_file", externalPath) + if cached, found := sqlFileCache.Get(cacheKey); found { + return cached.(string) + } + + // Check if external file exists and get its mod time + if stat, err := os.Stat(externalPath); err == nil { + // Check if we have cached this file with its mod time + modTimeCacheKey := cache.GenerateKey("sql_file_modtime", externalPath, stat.ModTime().String()) + if cached, found := sqlFileCache.Get(modTimeCacheKey); found { + // Cache the content with the general key as well + content := cached.(string) + sqlFileCache.Set(cacheKey, content, 30*time.Minute) + return content + } + + // Read and cache the file + if data, err := os.ReadFile(externalPath); err == nil { + content := string(data) + log.Printf("Using external table_constraints.sql from: %s", externalPath) + + // Cache with both keys + sqlFileCache.Set(cacheKey, content, 30*time.Minute) + sqlFileCache.Set(modTimeCacheKey, content, 30*time.Minute) + return content + } } + // Cache the embedded fallback + sqlFileCache.Set(cacheKey, tableConstraintsEmbedded, 30*time.Minute) return tableConstraintsEmbedded } diff --git a/static/css/app.css b/static/css/app.css index 0a7a2dd1e..da668b812 100644 --- a/static/css/app.css +++ b/static/css/app.css @@ -2,6 +2,16 @@ --pgweb-primary-color: #79589f; --pgweb-primary-text-muted: #d6cce2; --pgweb-primary-text: #fff; + --pgweb-font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif; + --pgweb-font-size: 14px; + --pgweb-mono-font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; +} + +/* Base typography */ +body, html { + font-family: var(--pgweb-font-family); + font-size: var(--pgweb-font-size); + line-height: 1.4; } /* Tab hiding support */ diff --git a/static/index.html b/static/index.html index a20c0c502..234a4ec22 100644 --- a/static/index.html +++ b/static/index.html @@ -7,6 +7,8 @@ + + @@ -246,6 +248,7 @@

pgweb

+

diff --git a/static/js/app.js b/static/js/app.js index 8e951bf39..5d68d05b7 100644 --- a/static/js/app.js +++ b/static/js/app.js @@ -24,6 +24,7 @@ var filterOptions = { "not_null": "IS NOT NULL" }; + function getLuminance(hex) { // Handle 3-digit hex colors if (hex.length === 4) { @@ -129,6 +130,48 @@ function initializeTheme() { } } +function initializeFontsFromConfig() { + apiCall('get', '/config/fonts', {}, function(data) { + if (data.family || data.size || data.google_fonts) { + var fontFamily = data.family || 'inherit'; + var fontSize = data.size || 'inherit'; + var googleFonts = data.google_fonts; + + // Load Google Fonts if specified + if (googleFonts) { + var link = document.createElement('link'); + link.rel = 'stylesheet'; + link.href = 'https://fonts.googleapis.com/css2?family=' + encodeURIComponent(googleFonts) + '&display=swap'; + document.head.appendChild(link); + + // Wait for font to load before applying to editor + link.onload = function() { + applyFontsToEditor(fontFamily, fontSize); + }; + } else { + // Apply fonts immediately if no Google Fonts to load + applyFontsToEditor(fontFamily, fontSize); + } + + // Apply font family and size to CSS variables immediately + document.documentElement.style.setProperty('--pgweb-font-family', fontFamily); + document.documentElement.style.setProperty('--pgweb-font-size', fontSize); + } + }); +} + +function applyFontsToEditor(fontFamily, fontSize) { + // Apply font to Ace editor if it exists and force layout refresh + if (typeof editor !== 'undefined' && editor) { + editor.setOptions({ + fontFamily: fontFamily, + fontSize: fontSize + }); + // Force editor to recalculate character widths + editor.resize(true); + } +} + function getSessionId() { var id = sessionStorage.getItem("session_id"); @@ -1843,6 +1886,9 @@ $(document).ready(function() { // Initialize theme from URL parameters initializeTheme(); + // Initialize fonts from server configuration + initializeFontsFromConfig(); + bindInputResizeEvents(); bindContentModalEvents();