diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 27356c9af..e9734eb48 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -43,6 +43,10 @@ jobs: NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')") echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT + - name: Update Changelog + run: | + python scripts/update_changelog.py ${{ steps.update_version.outputs.new_version }} + - name: Set repository metadata id: meta run: | @@ -54,7 +58,7 @@ jobs: - name: Commit and Tag run: | - git add version.py + git add version.py CHANGELOG.md git commit -m "Release v${{ steps.update_version.outputs.new_version }}" git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}" git push origin main --tags diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..c188bd4ca --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,837 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +- `CHANGELOG.md` file following Keep a Changelog format to document all notable changes and project history +- System event logging and viewer: Comprehensive logging system that tracks internal application events (M3U refreshes, EPG updates, stream switches, errors) with a dedicated UI viewer for filtering and reviewing historical events. Improves monitoring, troubleshooting, and understanding system behavior +- M3U/EPG endpoint caching: Implements intelligent caching for frequently requested M3U playlists and EPG data to reduce database load and improve response times for clients. +- Search icon to name headers for the channels and streams tables (#686) +- Comprehensive logging for user authentication events and network access restrictions +- Validation for EPG objects and payloads in updateEPG functions to prevent errors from invalid data +- Referrerpolicy to YouTube iframes in series and VOD modals for better compatibility + +### Changed + +- XC player API now returns server_info for unknown actions to align with provider behavior +- XC player API refactored to streamline action handling and ensure consistent responses +- Date parsing logic in generate_custom_dummy_programs improved to handle empty or invalid inputs +- UI now reflects date and time formats chosen by user - Thanks [@Biologisten](https://github.com/Biologisten) +- "Uncategorized" categories and relations now automatically created for VOD accounts to improve content management (#627) +- Improved minimum horizontal size in the stats page for better usability on smaller displays +- M3U and EPG generation now handles missing channel profiles with appropriate error logging + +### Fixed + +- Episode URLs in series modal now use UUID instead of ID, fixing broken links (#684, #694) +- Stream preview now respects selected M3U profile instead of always using default profile (#690) +- Channel groups filter in M3UGroupFilter component now filters out non-existent groups (prevents blank webui when editing M3U after a group was removed) +- Stream order now preserved in PATCH/PUT responses from ChannelSerializer, ensuring consistent ordering across all API operations - Thanks [@FiveBoroughs](https://github.com/FiveBoroughs) (#643) +- XC client compatibility: float channel numbers now converted to integers +- M3U account and profile modals now scrollable on mobile devices for improved usability + +## [0.12.0] - 2025-11-19 + +### Added + +- RTSP stream support with automatic protocol detection when a proxy profile requires it. The proxy now forces FFmpeg for RTSP sources and properly handles RTSP URLs - Thanks [@ragchuck](https://github.com/ragchuck) (#184) +- UDP stream support, including correct handling when a proxy profile specifies a UDP source. The proxy now skips HTTP-specific headers (like `user_agent`) for non-HTTP protocols and performs manual redirect handling to improve reliability (#617) +- Separate VOD logos system with a new `VODLogo` model, database migration, dedicated API/viewset, and server-paginated UI. This separates movie/series logos from channel logos, making cleanup safer and enabling independent bulk operations + +### Changed + +- Background profile refresh now uses a rate-limiting/backoff strategy to avoid provider bans +- Bulk channel editing now validates all requested changes up front and applies updates in a single database transaction +- ProxyServer shutdown & ghost-client handling improved to avoid initializing channels for transient clients and prevent duplicate reinitialization during rapid reconnects +- URL / Stream validation expanded to support credentials on non-FQDN hosts, skips HTTP-only checks for RTSP/RTP/UDP streams, and improved host/port normalization +- TV guide scrolling & timeline synchronization improved with mouse-wheel scrolling, synchronized timeline position with guide navigation, and improved mobile momentum scrolling (#252) +- EPG Source dropdown now sorts alphabetically - Thanks [@0x53c65c0a8bd30fff](https://github.com/0x53c65c0a8bd30fff) +- M3U POST handling restored and improved for clients (e.g., Smarters) that request playlists using HTTP POST - Thanks [@maluueu](https://github.com/maluueu) +- Login form revamped with branding, cleaner layout, loading state, "Remember Me" option, and focused sign-in flow +- Series & VOD now have copy-link buttons in modals for easier URL sharing +- `get_host_and_port` now prioritizes verified port sources and handles reverse-proxy edge cases more accurately (#618) + +### Fixed + +- EXTINF parsing overhauled to correctly extract attributes such as `tvg-id`, `tvg-name`, and `group-title`, even when values include quotes or commas (#637) +- Websocket payload size reduced during EPG processing to avoid UI freezes, blank screens, or memory spikes in the browser (#327) +- Logo management UI fixes including confirmation dialogs, header checkbox reset, delete button reliability, and full client refetch after cleanup + +## [0.11.2] - 2025-11-04 + +### Added + +- Custom Dummy EPG improvements: + - Support for using an existing Custom Dummy EPG as a template for creating new EPGs + - Custom fallback templates for unmatched patterns + - `{endtime}` as an available output placeholder and renamed `{time}` → `{starttime}` (#590) + - Support for date placeholders that respect both source and output timezones (#597) + - Ability to bulk assign Custom Dummy EPGs to multiple channels + - "Include New Tag" option to mark programs as new in Dummy EPG output + - Support for month strings in date parsing + - Ability to set custom posters and channel logos via regex patterns for Custom Dummy EPGs + - Improved DST handling by calculating offsets based on the actual program date, not today's date + +### Changed + +- Stream model maximum URL length increased from 2000 to 4096 characters (#585) +- Groups now sorted during `xc_get_live_categories` based on the order they first appear (by lowest channel number) +- Client TTL settings updated and periodic refresh implemented during active streaming to maintain accurate connection tracking +- `ProgramData.sub_title` field changed from `CharField` to `TextField` to allow subtitles longer than 255 characters (#579) +- Startup improved by verifying `/data` directory ownership and automatically fixing permissions if needed. Pre-creates `/data/models` during initialization (#614) +- Port detection enhanced to check `request.META.get("SERVER_PORT")` before falling back to defaults, ensuring correct port when generating M3U, EPG, and logo URLs - Thanks [@lasharor](https://github.com/lasharor) + +### Fixed + +- Custom Dummy EPG frontend DST calculation now uses program date instead of current date +- Channel titles no longer truncated early after an apostrophe - Thanks [@0x53c65c0a8bd30fff](https://github.com/0x53c65c0a8bd30fff) + +## [0.11.1] - 2025-10-22 + +### Fixed + +- uWSGI not receiving environmental variables +- LXC unable to access daemons launched by uWSGI ([#575](https://github.com/Dispatcharr/Dispatcharr/issues/575), [#576](https://github.com/Dispatcharr/Dispatcharr/issues/576), [#577](https://github.com/Dispatcharr/Dispatcharr/issues/577)) + +## [0.11.0] - 2025-10-22 + +### Added + +- Custom Dummy EPG system: + - Regex pattern matching and name source selection + - Support for custom upcoming and ended programs + - Timezone-aware with source and local timezone selection + - Option to include categories and date/live tags in Dummy EPG output + - (#293) +- Auto-Enable & Category Improvements: + - Auto-enable settings for new groups and categories in M3U and VOD components (#208) +- IPv6 CIDR validation in Settings - Thanks [@jordandalley](https://github.com/jordandalley) (#236) +- Custom logo support for channel groups in Auto Sync Channels (#555) +- Tooltips added to the Stream Table + +### Changed + +- Celery and uWSGI now have configurable `nice` levels (defaults: `uWSGI=0`, `Celery=5`) to prioritize streaming when needed. (#571) +- Directory creation and ownership management refactored in init scripts to avoid unnecessary recursive `chown` operations and improve boot speed +- HTTP streamer switched to threaded model with piped output for improved robustness +- Chunk timeout configuration improved and StreamManager timeout handling enhanced +- Proxy timeout values reduced to avoid unnecessary waiting +- Resource cleanup improved to prevent "Too many open files" errors +- Proxy settings caching implemented and database connections properly closed after use +- EPG program fetching optimized with chunked retrieval and explicit ordering to reduce memory usage during output +- EPG output now sorted by channel number for consistent presentation +- Stream Table buttons reordered for better usability +- Database connection handling improved throughout the codebase to reduce overall connection count + +### Fixed + +- Crash when resizing columns in the Channel Table (#516) +- Errors when saving stream settings (#535) +- Preview and edit bugs for custom streams where profile and group selections did not display correctly +- `channel_id` and `channel.uuid` now converted to strings before processing to fix manual switching when the uWSGI worker was not the stream owner (#269) +- Stream locking and connection search issues when switching channels; increased search timeout to reduce premature failures (#503) +- Stream Table buttons no longer shift into multiple rows when selecting many streams +- Custom stream previews +- Custom Stream settings not loading properly (#186) +- Orphaned categories now automatically removed for VOD and Series during M3U refresh (#540) + +## [0.10.4] - 2025-10-08 + +### Added + +- "Assign TVG-ID from EPG" functionality with frontend actions for single-channel and batch operations +- Confirmation dialogs in `ChannelBatchForm` for setting names, logos, TVG-IDs, and clearing EPG assignments +- "Clear EPG" button to `ChannelBatchForm` for easy reset of assignments +- Batch editing of channel logos - Thanks [@EmeraldPi](https://github.com/EmeraldPi) +- Ability to set logo name from URL - Thanks [@EmeraldPi](https://github.com/EmeraldPi) +- Proper timestamp tracking for channel creation and updates; `XC Get Live Streams` now uses this information +- Time Zone Settings added to the application ([#482](https://github.com/Dispatcharr/Dispatcharr/issues/482), [#347](https://github.com/Dispatcharr/Dispatcharr/issues/347)) +- Comskip settings support including comskip.ini upload and custom directory selection (#418) +- Manual recording scheduling for channels without EPG data (#162) + +### Changed + +- Default M3U account type is now set to XC for new accounts +- Performance optimization: Only fetch playlists and channel profiles after a successful M3U refresh (rather than every status update) +- Playlist retrieval now includes current connection counts and improved session handling during VOD session start +- Improved stream selection logic when all profiles have reached max connections (retries faster) + +### Fixed + +- Large EPGs now fully parse all channels +- Duplicate channel outputs for streamer profiles set to "All" +- Streamer profiles with "All" assigned now receive all eligible channels +- PostgreSQL btree index errors from logo URL validation during channel creation (#519) +- M3U processing lock not releasing when no streams found during XC refresh, which also skipped VOD scanning (#449) +- Float conversion errors by normalizing decimal format during VOD scanning (#526) +- Direct URL ordering in M3U output to use correct stream sequence (#528) +- Adding multiple M3U accounts without refreshing modified only the first entry (#397) +- UI state bug where new playlist creation was not notified to frontend ("Fetching Groups" stuck) +- Minor FFmpeg task and stream termination bugs in DVR module +- Input escaping issue where single quotes were interpreted as code delimiters (#406) + +## [0.10.3] - 2025-10-04 + +### Added + +- Logo management UI improvements where Channel editor now uses the Logo Manager modal, allowing users to add logos by URL directly from the edit form - Thanks [@EmeraldPi](https://github.com/EmeraldPi) + +### Changed + +- FFmpeg base container rebuilt with improved native build support - Thanks [@EmeraldPi](https://github.com/EmeraldPi) +- GitHub Actions workflow updated to use native runners instead of QEMU emulation for more reliable multi-architecture builds + +### Fixed + +- EPG parsing stability when large EPG files would not fully parse all channels. Parser now uses `iterparse` with `recover=True` for both channel and program-level parsing, ensuring complete and resilient XML processing even when Cloudflare injects additional root elements + +## [0.10.2] - 2025-10-03 + +### Added + +- `m3u_id` parameter to `generate_hash_key` and updated related calls +- Support for `x-tvg-url` and `url-tvg` generation with preserved query parameters (#345) +- Exact Gracenote ID matching for EPG channel mapping (#291) +- Recovery handling for XMLTV parser errors +- `nice -n 5` added to Celery commands for better process priority management + +### Changed + +- Default M3U hash key changed to URL only for new installs +- M3U profile retrieval now includes current connection counts and improved session handling during VOD session start +- Improved stream selection logic when all profiles have reached max connections (retries faster) +- XMLTV parsing refactored to use `iterparse` for `` element +- Release workflow refactored to run on native architecture +- Docker build system improvements: + - Split install/build steps + - Switch from Yarn → NPM + - Updated to Node.js 24 (frontend build) + - Improved ARM build reliability + - Pushes to DockerHub with combined manifest + - Removed redundant tags and improved build organization + +### Fixed + +- Cloudflare-hosted EPG feeds breaking parsing (#497) +- Bulk channel creation now preserves the order channels were selected in (no longer reversed) +- M3U hash settings not saving properly +- VOD selecting the wrong M3U profile at session start (#461) +- Redundant `h` removed from 12-hour time format in settings page + +## [0.10.1] - 2025-09-24 + +### Added + +- Virtualized rendering for TV Guide for smoother performance when displaying large guides - Thanks [@stlalpha](https://github.com/stlalpha) (#438) +- Enhanced channel/program mapping to reuse EPG data across multiple channels that share the same TVG-ID + +### Changed + +- `URL` field length in EPGSource model increased from 200 → 1000 characters to support long URLs with tokens +- Improved URL transformation logic with more advanced regex during profile refreshes +- During EPG scanning, the first display name for a channel is now used instead of the last +- `whiteSpace` style changed from `nowrap` → `pre` in StreamsTable for better text formatting + +### Fixed + +- EPG channel parsing failure when channel `URL` exceeded 500 characters by adding validation during scanning (#452) +- Frontend incorrectly saving case-sensitive setting as a JSON string for stream filters + +## [0.10.0] - 2025-09-18 + +### Added + +- Channel Creation Improvements: + - Ability to specify channel number during channel creation ([#377](https://github.com/Dispatcharr/Dispatcharr/issues/377), [#169](https://github.com/Dispatcharr/Dispatcharr/issues/169)) + - Asynchronous bulk channel creation from stream IDs with WebSocket progress updates + - WebSocket notifications when channels are created +- EPG Auto-Matching (Rewritten & Enhanced): + - Completely refactored for improved accuracy and efficiency + - Can now be applied to selected channels or triggered directly from the channel edit form + - Uses stricter matching logic with support from sentence transformers + - Added progress notifications during the matching process + - Implemented memory cleanup for ML models after matching operations + - Removed deprecated matching scripts +- Logo & EPG Management: + - Ability in channel edit form and bulk channel editor to set logos and names from assigned EPG (#157) + - Improved logo update flow: frontend refreshes on changes, store updates after bulk changes, progress shown via notifications +- Table Enhancements: + - All tables now support adjustable column resizing (#295) + - Channels and Streams tables persist column widths and center divider position to local storage + - Improved sizing and layout for user-agents, stream profiles, logos, M3U, and EPG tables + +### Changed + +- Simplified VOD and series access: removed user-level restrictions on M3U accounts +- Skip disabled M3U accounts when choosing streams during playback (#402) +- Enhanced `UserViewSet` queryset to prefetch related channel profiles for better performance +- Auto-focus added to EPG filter input +- Category API retrieval now sorts by name +- Increased default column size for EPG fields and removed max size on group/EPG columns +- Standardized EPG column header to display `(EPG ID - TVG-ID)` + +### Fixed + +- Bug during VOD cleanup where all VODs not from the current M3U scan could be deleted +- Logos not being set correctly in some cases +- Bug where not setting a channel number caused an error when creating a channel (#422) +- Bug where clicking "Add Channel" with a channel selected opened the edit form instead +- Bug where a newly created channel could reuse streams from another channel due to form not clearing properly +- VOD page not displaying correct order while changing pages +- `ReferenceError: setIsInitialized is not defined` when logging into web UI +- `cannot access local variable 'total_chunks' where it is not associated with a value` during VOD refresh + +## [0.9.1] - 2025-09-13 + +### Fixed + +- Broken migrations affecting the plugins system +- DVR and plugin paths to ensure proper functionality (#381) + +## [0.9.0] - 2025-09-12 + +### Added + +- **Video on Demand (VOD) System:** + - Complete VOD infrastructure with support for movies and TV series + - Advanced VOD metadata including IMDB/TMDB integration, trailers, cast information + - Smart VOD categorization with filtering by type (movies vs series) + - Multi-provider VOD support with priority-based selection + - VOD streaming proxy with connection tracking and statistics + - Season/episode organization for TV series with expandable episode details + - VOD statistics and monitoring integrated with existing stats dashboard + - Optimized VOD parsing and category filtering + - Dedicated VOD page with movies and series tabs + - Rich VOD modals with backdrop images, trailers, and metadata + - Episode management with season-based organization + - Play button integration with external player support + - VOD statistics cards similar to channel cards +- **Plugin System:** + - Extensible Plugin Framework - Developers can build custom functionality without modifying Dispatcharr core + - Plugin Discovery & Management - Automatic detection of installed plugins, with enable/disable controls in the UI + - Backend API Support - New APIs for listing, loading, and managing plugins programmatically + - Plugin Registry - Structured models for plugin metadata (name, version, author, description) + - UI Enhancements - Dedicated Plugins page in the admin panel for centralized plugin management + - Documentation & Scaffolding - Initial documentation and scaffolding to accelerate plugin development +- **DVR System:** + - Refreshed DVR page for managing scheduled and completed recordings + - Global pre/post padding controls surfaced in Settings + - Playback support for completed recordings directly in the UI + - DVR table view includes title, channel, time, and padding adjustments for clear scheduling + - Improved population of DVR listings, fixing intermittent blank screen issues + - Comskip integration for automated commercial detection and skipping in recordings + - User-configurable comskip toggle in Settings +- **Enhanced Channel Management:** + - EPG column added to channels table for better organization + - EPG filtering by channel assignment and source name + - Channel batch renaming for efficient bulk channel name updates + - Auto channel sync improvements with custom stream profile override + - Channel logo management overhaul with background loading +- Date and time format customization in settings - Thanks [@Biologisten](https://github.com/Biologisten) +- Auto-refresh intervals for statistics with better UI controls +- M3U profile notes field for better organization +- XC account information retrieval and display with account refresh functionality and notifications + +### Changed + +- JSONB field conversion for custom properties (replacing text fields) for better performance +- Database encoding converted from ASCII to UTF8 for better character support +- Batch processing for M3U updates and channel operations +- Query optimization with prefetch_related to eliminate N+1 queries +- Reduced API calls by fetching all data at once instead of per-category +- Buffering speed setting now affects UI indicators +- Swagger endpoint accessible with or without trailing slash +- EPG source names displayed before channel names in edit forms +- Logo loading improvements with background processing +- Channel card enhancements with better status indicators +- Group column width optimization +- Better content-type detection for streams +- Improved headers with content-range and total length +- Enhanced user-agent handling for M3U accounts +- HEAD request support with connection keep-alive +- Progress tracking improvements for clients with new sessions +- Server URL length increased to 1000 characters for token support +- Prettier formatting applied to all frontend code +- String quote standardization and code formatting improvements + +### Fixed + +- Logo loading issues in channel edit forms resolved +- M3U download error handling and user feedback improved +- Unique constraint violations fixed during stream rehashing +- Channel stats fetching moved from Celery beat task to configurable API calls +- Speed badge colors now use configurable buffering speed setting +- Channel cards properly close when streams stop +- Active streams labeling updated from "Active Channels" +- WebSocket updates for client connect/disconnect events +- Null value handling before database saves +- Empty string scrubbing for cleaner data +- Group relationship cleanup for removed M3U groups +- Logo cleanup for unused files with proper batch processing +- Recordings start 5 mins after show starts (#102) + +### Closed + +- [#350](https://github.com/Dispatcharr/Dispatcharr/issues/350): Allow DVR recordings to be played via the UI +- [#349](https://github.com/Dispatcharr/Dispatcharr/issues/349): DVR screen doesn't populate consistently +- [#340](https://github.com/Dispatcharr/Dispatcharr/issues/340): Global find and replace +- [#311](https://github.com/Dispatcharr/Dispatcharr/issues/311): Stat's "Current Speed" does not reflect "Buffering Speed" setting +- [#304](https://github.com/Dispatcharr/Dispatcharr/issues/304): Name ignored when uploading logo +- [#300](https://github.com/Dispatcharr/Dispatcharr/issues/300): Updating Logo throws error +- [#286](https://github.com/Dispatcharr/Dispatcharr/issues/286): 2 Value/Column EPG in Channel Edit +- [#280](https://github.com/Dispatcharr/Dispatcharr/issues/280): Add general text field in M3U/XS profiles +- [#190](https://github.com/Dispatcharr/Dispatcharr/issues/190): Show which stream is being used and allow it to be altered in channel properties +- [#155](https://github.com/Dispatcharr/Dispatcharr/issues/155): Additional column with EPG assignment information / Allow filtering by EPG assignment +- [#138](https://github.com/Dispatcharr/Dispatcharr/issues/138): Bulk Channel Edit Functions + +## [0.8.0] - 2025-08-19 + +### Added + +- Channel & Stream Enhancements: + - Preview streams under a channel, with stream logo and name displayed in the channel card + - Advanced stats for channel streams + - Stream qualities displayed in the channel table + - Stream stats now saved to the database + - URL badges can now be clicked to copy stream links to the clipboard +- M3U Filtering for Streams: + - Streams for an M3U account can now be filtered using flexible parameters + - Apply filters based on stream name, group title, or stream URL (via regex) + - Filters support both inclusion and exclusion logic for precise control + - Multiple filters can be layered with a priority order for complex rules +- Ability to reverse the sort order for auto channel sync +- Custom validator for URL fields now allows non-FQDN hostnames (#63) +- Membership creation added in `UpdateChannelMembershipAPIView` if not found (#275) + +### Changed + +- Bumped Postgres to version 17 +- Updated dependencies in `requirements.txt` for compatibility and improvements +- Improved chunked extraction to prevent memory issues - Thanks [@pantherale0](https://github.com/pantherale0) + +### Fixed + +- XML escaping for channel ID in `generate_dummy_epg` function +- Bug where creating a channel from a stream not displayed in the table used an invalid stream name +- Debian install script - Thanks [@deku-m](https://github.com/deku-m) + +## [0.7.1] - 2025-07-29 + +### Added + +- Natural sorting for channel names during auto channel sync +- Ability to sort auto sync order by provider order (default), channel name, TVG ID, or last updated time +- Auto-created channels can now be assigned to specific channel profiles (#255) +- Channel profiles are now fetched automatically after a successful M3U refresh +- Uses only whole numbers when assigning the next available channel number + +### Changed + +- Logo upload behavior changed to wait for the Create button before saving +- Uses the channel name as the display name in EPG output for improved readability +- Ensures channels are only added to a selected profile if one is explicitly chosen + +### Fixed + +- Logo Manager prevents redundant messages from the file scanner by properly tracking uploaded logos in Redis +- Fixed an issue preventing logo uploads via URL +- Adds internal support for assigning multiple profiles via API + +## [0.7.0] - 2025-07-19 + +### Added + +- **Logo Manager:** + - Complete logo management system with filtering, search, and usage tracking + - Upload logos directly through the UI + - Automatically scan `/data/logos` for existing files (#69) + - View which channels use each logo + - Bulk delete unused logos with cleanup + - Enhanced display with hover effects and improved sizing + - Improved logo fetching with timeouts and user-agent headers to prevent hanging +- **Group Manager:** + - Comprehensive group management interface (#128) + - Search and filter groups with ease + - Bulk operations for cleanup + - Filter channels by group membership + - Automatically clean up unused groups +- **Auto Channel Sync:** + - Automatic channel synchronization from M3U sources (#147) + - Configure auto-sync settings per M3U account group + - Set starting channel numbers by group + - Override group names during sync + - Apply regex match and replace for channel names + - Filter channels by regex match on stream name + - Track auto-created vs manually added channels + - Smart updates preserve UUIDs and existing links +- Stream rehashing with WebSocket notifications +- Better error handling for blocked rehash attempts +- Lock acquisition to prevent conflicts +- Real-time progress tracking + +### Changed + +- Persist table page sizes in local storage (streams & channels) +- Smoother pagination and improved UX +- Fixed z-index issues during table refreshes +- Improved XC client with connection pooling +- Better error handling for API and JSON decode failures +- Smarter handling of empty content and blocking responses +- Improved EPG XML generation with richer metadata +- Better support for keywords, languages, ratings, and credits +- Better form layouts and responsive buttons +- Enhanced confirmation dialogs and feedback + +### Fixed + +- Channel table now correctly restores page size from local storage +- Resolved WebSocket message formatting issues +- Fixed logo uploads and edits +- Corrected ESLint issues across the codebase +- Fixed HTML validation errors in menus +- Optimized logo fetching with proper timeouts and headers ([#101](https://github.com/Dispatcharr/Dispatcharr/issues/101), [#217](https://github.com/Dispatcharr/Dispatcharr/issues/217)) + +## [0.6.2] - 2025-07-10 + +### Fixed + +- **Streaming & Connection Stability:** + - Provider timeout issues - Slow but responsive providers no longer cause channel lockups + - Added chunk and process timeouts - Prevents hanging during stream processing and transcoding + - Improved connection handling - Enhanced process management and socket closure detection for safer streaming + - Enhanced health monitoring - Health monitor now properly notifies main thread without attempting reconnections +- **User Interface & Experience:** + - Touch screen compatibility - Web player can now be properly closed on touch devices + - Improved user management - Added support for first/last names, login tracking, and standardized table formatting +- Improved logging - Enhanced log messages with channel IDs for better debugging +- Code cleanup - Removed unused imports, variables, and dead links + +## [0.6.1] - 2025-06-27 + +### Added + +- Dynamic parameter options for M3U and EPG URLs (#207) +- Support for 'num' property in channel number extraction (fixes channel creation from XC streams not having channel numbers) + +### Changed + +- EPG generation now uses streaming responses to prevent client timeouts during large EPG file generation (#179) +- Improved reliability when downloading EPG data from external sources +- Better program positioning - Programs that start before the current view now have proper text positioning (#223) +- Better mobile support - Improved sizing and layout for mobile devices across multiple tables +- Responsive stats cards - Better calculation for card layout and improved filling on different screen sizes (#218) +- Enhanced table rendering - M3U and EPG tables now render better on small screens +- Optimized spacing - Removed unnecessary padding and blank space throughout the interface +- Better settings layout - Improved minimum widths and mobile support for settings pages +- Always show 2 decimal places for FFmpeg speed values + +### Fixed + +- TV Guide now properly filters channels based on selected channel group +- Resolved loading issues - Fixed channels and groups not loading correctly in the TV Guide +- Stream profile fixes - Resolved issue with setting stream profile to 'use default' +- Single channel editing - When only one channel is selected, the correct channel editor now opens +- Bulk edit improvements - Added "no change" options for bulk editing operations +- Bulk channel editor now properly saves changes (#222) +- Link form improvements - Better sizing and rendering of link forms with proper layering +- Confirmation dialogs added with warning suppression for user deletion, channel profile deletion, and M3U profile deletion + +## [0.6.0] - 2025-06-19 + +### Added + +- **User Management & Access Control:** + - Complete user management system with user levels and channel access controls + - Network access control with CIDR validation and IP-based restrictions + - Logout functionality and improved loading states for authenticated users +- **Xtream Codes Output:** + - Xtream Codes support enables easy output to IPTV clients (#195) +- **Stream Management & Monitoring:** + - FFmpeg statistics integration - Real-time display of video/audio codec info, resolution, speed, and stream type + - Automatic stream switching when buffering is detected + - Enhanced stream profile management with better connection tracking + - Improved stream state detection, including buffering as an active state +- **Channel Management:** + - Bulk channel editing for channel group, stream profile, and user access level +- **Enhanced M3U & EPG Features:** + - Dynamic `tvg-id` source selection for M3U and EPG (`tvg_id`, `gracenote`, or `channel_number`) + - Direct URL support in M3U output via `direct=true` parameter + - Flexible EPG output with a configurable day limit via `days=#` parameter + - Support for LIVE tags and `dd_progrid` numbering in EPG processing +- Proxy settings configuration with UI integration and improved validation +- Stream retention controls - Set stale stream days to `0` to disable retention completely (#123) +- Tuner flexibility - Minimum of 1 tuner now allowed for HDHomeRun output +- Fallback IP geolocation provider (#127) - Thanks [@maluueu](https://github.com/maluueu) +- POST method now allowed for M3U output, enabling support for Smarters IPTV - Thanks [@maluueu](https://github.com/maluueu) + +### Changed + +- Improved channel cards with better status indicators and tooltips +- Clearer error messaging for unsupported codecs in the web player +- Network access warnings to prevent accidental lockouts +- Case-insensitive M3U parsing for improved compatibility +- Better EPG processing with improved channel matching +- Replaced Mantine React Table with custom implementations +- Improved tooltips and parameter wrapping for cleaner interfaces +- Better badge colors and status indicators +- Stronger form validation and user feedback +- Streamlined settings management using JSON configs +- Default value population for clean installs +- Environment-specific configuration support for multiple deployment scenarios + +### Fixed + +- FFmpeg process cleanup - Ensures FFmpeg fully exits before marking connection closed +- Resolved stream profile update issues in statistics display +- Fixed M3U profile ID behavior when switching streams +- Corrected stream switching logic - Redis is only updated on successful switches +- Fixed connection counting - Excludes the current profile from available connection counts +- Fixed custom stream channel creation when no group is assigned (#122) +- Resolved EPG auto-matching deadlock when many channels match simultaneously - Thanks [@xham3](https://github.com/xham3) + +## [0.5.2] - 2025-06-03 + +### Added + +- Direct Logo Support: Added ability to bypass logo caching by adding `?cachedlogos=false` to the end of M3U and EPG URLs (#109) + +### Changed + +- Dynamic Resource Management: Auto-scales Celery workers based on demand, reducing overall memory and CPU usage while still allowing high-demand tasks to complete quickly (#111) +- Enhanced Logging: + - Improved logging for M3U processing + - Better error output from XML parser for easier troubleshooting + +### Fixed + +- XMLTV Parsing: Added `remove_blank_text=True` to lxml parser to prevent crashes with poorly formatted XMLTV files (#115) +- Stats Display: Refactored channel info retrieval for safer decoding and improved error logging, fixing intermittent issues with statistics not displaying properly + +## [0.5.1] - 2025-05-28 + +### Added + +- Support for ZIP-compressed EPG files +- Automatic extraction of compressed files after downloading +- Intelligent file type detection for EPG sources: + - Reads the first bits of files to determine file type + - If a compressed file is detected, it peeks inside to find XML files +- Random descriptions for dummy channels in the TV guide +- Support for decimal channel numbers (converted from integer to float) - Thanks [@MooseyOnTheLoosey](https://github.com/MooseyOnTheLoosey) +- Show channels without EPG data in TV Guide +- Profile name added to HDHR-friendly name and device ID (allows adding multiple HDHR profiles to Plex) + +### Changed + +- About 30% faster EPG processing +- Significantly improved memory usage for large EPG files +- Improved timezone handling +- Cleaned up cached files when deleting EPG sources +- Performance improvements when processing extremely large M3U files +- Improved batch processing with better cleanup +- Enhanced WebSocket update handling for large operations +- Redis configured for better performance (no longer saves to disk) +- Improved memory management for Celery tasks +- Separated beat schedules with a file scanning interval set to 20 seconds +- Improved authentication error handling with user redirection to the login page +- Improved channel card formatting for different screen resolutions (can now actually read the channel stats card on mobile) +- Decreased line height for status messages in the EPG and M3U tables for better appearance on smaller screens +- Updated the EPG form to match the M3U form for consistency + +### Fixed + +- Profile selection issues that previously caused WebUI crashes +- Issue with `tvc-guide-id` (Gracenote ID) in bulk channel creation +- Bug when uploading an M3U with the default user-agent set +- Bug where multiple channel initializations could occur, causing zombie streams and performance issues (choppy streams) +- Better error handling for buffer overflow issues +- Fixed various memory leaks +- Bug in the TV Guide that would crash the web UI when selecting a profile to filter by +- Multiple minor bug fixes and code cleanup + +## [0.5.0] - 2025-05-15 + +### Added + +- **XtreamCodes Support:** + - Initial XtreamCodes client support + - Option to add EPG source with XC account + - Improved XC login and authentication + - Improved error handling for XC connections +- **Hardware Acceleration:** + - Detection of hardware acceleration capabilities with recommendations (available in logs after startup) + - Improved support for NVIDIA, Intel (QSV), and VAAPI acceleration methods + - Added necessary drivers and libraries for hardware acceleration + - Automatically assigns required permissions for hardware acceleration + - Thanks to [@BXWeb](https://github.com/BXWeb), @chris.r3x, [@rykr](https://github.com/rykr), @j3111, [@jesmannstl](https://github.com/jesmannstl), @jimmycarbone, [@gordlaben](https://github.com/gordlaben), [@roofussummers](https://github.com/roofussummers), [@slamanna212](https://github.com/slamanna212) +- **M3U and EPG Management:** + - Enhanced M3U profile creation with live regex results + - Added stale stream detection with configurable thresholds + - Improved status messaging for M3U and EPG operations: + - Shows download speed with estimated time remaining + - Shows parsing time remaining + - Added "Pending Setup" status for M3U's requiring group selection + - Improved handling of M3U group filtering +- **UI Improvements:** + - Added configurable table sizes + - Enhanced video player with loading and error states + - Improved WebSocket connection handling with authentication + - Added confirmation dialogs for critical operations + - Auto-assign numbers now configurable by selection + - Added bulk editing of channel profile membership (select multiple channels, then click the profile toggle on any selected channel to apply the change to all) +- **Infrastructure & Performance:** + - Standardized and improved the logging system + - New environment variable to set logging level: `DISPATCHARR_LOG_LEVEL` (default: `INFO`, available: `TRACE`, `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`) + - Introduced a new base image build process: updates are now significantly smaller (typically under 15MB unless the base image changes) + - Improved environment variable handling in container +- Support for Gracenote ID (`tvc-guide-stationid`) - Thanks [@rykr](https://github.com/rykr) +- Improved file upload handling with size limits removed + +### Fixed + +- Issues with profiles not loading correctly +- Problems with stream previews in tables +- Channel creation and editing workflows +- Logo display issues +- WebSocket connection problems +- Multiple React-related errors and warnings +- Pagination and filtering issues in tables + +## [0.4.1] - 2025-05-01 + +### Changed + +- Optimized uWSGI configuration settings for better server performance +- Improved asynchronous processing by converting additional timers to gevent +- Enhanced EPG (Electronic Program Guide) downloading with proper user agent headers + +### Fixed + +- Issue with "add streams to channel" functionality to correctly follow disabled state logic + +## [0.4.0] - 2025-05-01 + +### Added + +- URL copy buttons for stream and channel URLs +- Manual stream switching ability +- EPG auto-match notifications - Users now receive feedback about how many matches were found +- Informative tooltips throughout the interface, including stream profiles and user-agent details +- Display of connected time for each client +- Current M3U profile information to stats +- Better logging for which channel clients are getting chunks from + +### Changed + +- Table System Rewrite: Completely refactored channel and stream tables for dramatically improved performance with large datasets +- Improved Concurrency: Replaced time.sleep with gevent.sleep for better performance when handling multiple streams +- Improved table interactions: + - Restored alternating row colors and hover effects + - Added shift-click support for multiple row selection + - Preserved drag-and-drop functionality +- Adjusted logo display to prevent layout shifts with different sized logos +- Improved sticky headers in tables +- Fixed spacing and padding in EPG and M3U tables for better readability on smaller displays +- Stream URL handling improved for search/replace patterns +- Enhanced stream lock management for better reliability +- Added stream name to channel status for better visibility +- Properly track current stream ID during stream switches +- Improved EPG cache handling and cleanup of old cache files +- Corrected content type for M3U file (using m3u instead of m3u8) +- Fixed logo URL handling in M3U generation +- Enhanced tuner count calculation to include only active M3U accounts +- Increased thread stack size in uwsgi configuration +- Changed proxy to use uwsgi socket +- Added build timestamp to version information +- Reduced excessive logging during M3U/EPG file importing +- Improved store variable handling to increase application efficiency +- Frontend now being built by Yarn instead of NPM + +### Fixed + +- Issues with channel statistics randomly not working +- Stream ordering in channel selection +- M3U profile name added to stream names for better identification +- Channel form not updating some properties after saving +- Issue with setting logos to default +- Channel creation from streams +- Channel group saving +- Improved error handling throughout the application +- Bugs in deleting stream profiles +- Resolved mimetype detection issues +- Fixed form display issues +- Added proper requerying after form submissions and item deletions +- Bug overwriting tvg-id when loading TV Guide +- Bug that prevented large m3u's and epg's from uploading +- Typo in Stream Profile header column for Description - Thanks [@LoudSoftware](https://github.com/LoudSoftware) +- Typo in m3u input processing (tv-chno instead of tvg-chno) - Thanks @www2a + +## [0.3.3] - 2025-04-18 + +### Fixed + +- Issue with dummy EPG calculating hours above 24, ensuring time values remain within valid 24-hour format +- Auto import functionality to properly process old files that hadn't been imported yet, rather than ignoring them + +## [0.3.2] - 2025-04-16 + +### Fixed + +- Issue with stream ordering for channels - resolved problem where stream objects were incorrectly processed when assigning order in channel configurations + +## [0.3.1] - 2025-04-16 + +### Added + +- Key to navigation links in sidebar to resolve DOM errors when loading web UI +- Channels that are set to 'dummy' epg to the TV Guide + +### Fixed + +- Issue preventing dummy EPG from being set +- Channel numbers not saving properly +- EPGs not refreshing when linking EPG to channel +- Improved error messages in notifications + +## [0.3.0] - 2025-04-15 + +### Added + +- URL validation for redirect profile: + - Validates stream URLs before redirecting clients + - Prevents clients from being redirected to unavailable streams + - Now tries alternate streams when primary stream validation fails +- Dynamic tuner configuration for HDHomeRun devices: + - TunerCount is now dynamically created based on profile max connections + - Sets minimum of 2 tuners, up to 10 for unlimited profiles + +### Changed + +- More robust stream switching: + - Clients now wait properly if a stream is in the switching state + - Improved reliability during stream transitions +- Performance enhancements: + - Increased workers and threads for uwsgi for better concurrency + +### Fixed + +- Issue with multiple dead streams in a row - System now properly handles cases where several sequential streams are unavailable +- Broken links to compose files in documentation + +## [0.2.1] - 2025-04-13 + +### Fixed + +- Stream preview (not channel) +- Streaming wouldn't work when using default user-agent for an M3U +- WebSockets and M3U profile form issues + +## [0.2.0] - 2025-04-12 + +Initial beta public release. diff --git a/apps/accounts/api_views.py b/apps/accounts/api_views.py index bf87c2ab5..41e2f0773 100644 --- a/apps/accounts/api_views.py +++ b/apps/accounts/api_views.py @@ -20,30 +20,88 @@ class TokenObtainPairView(TokenObtainPairView): def post(self, request, *args, **kwargs): # Custom logic here if not network_access_allowed(request, "UI"): + # Log blocked login attempt due to network restrictions + from core.utils import log_system_event + username = request.data.get("username", 'unknown') + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='login_failed', + user=username, + client_ip=client_ip, + user_agent=user_agent, + reason='Network access denied', + ) return Response({"error": "Forbidden"}, status=status.HTTP_403_FORBIDDEN) # Get the response from the parent class first - response = super().post(request, *args, **kwargs) + username = request.data.get("username") + + # Log login attempt + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + try: + response = super().post(request, *args, **kwargs) + + # If login was successful, update last_login and log success + if response.status_code == 200: + if username: + from django.utils import timezone + try: + user = User.objects.get(username=username) + user.last_login = timezone.now() + user.save(update_fields=['last_login']) + + # Log successful login + log_system_event( + event_type='login_success', + user=username, + client_ip=client_ip, + user_agent=user_agent, + ) + except User.DoesNotExist: + pass # User doesn't exist, but login somehow succeeded + else: + # Log failed login attempt + log_system_event( + event_type='login_failed', + user=username or 'unknown', + client_ip=client_ip, + user_agent=user_agent, + reason='Invalid credentials', + ) - # If login was successful, update last_login - if response.status_code == 200: - username = request.data.get("username") - if username: - from django.utils import timezone - try: - user = User.objects.get(username=username) - user.last_login = timezone.now() - user.save(update_fields=['last_login']) - except User.DoesNotExist: - pass # User doesn't exist, but login somehow succeeded + return response - return response + except Exception as e: + # If parent class raises an exception (e.g., validation error), log failed attempt + log_system_event( + event_type='login_failed', + user=username or 'unknown', + client_ip=client_ip, + user_agent=user_agent, + reason=f'Authentication error: {str(e)[:100]}', + ) + raise # Re-raise the exception to maintain normal error flow class TokenRefreshView(TokenRefreshView): def post(self, request, *args, **kwargs): # Custom logic here if not network_access_allowed(request, "UI"): + # Log blocked token refresh attempt due to network restrictions + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='login_failed', + user='token_refresh', + client_ip=client_ip, + user_agent=user_agent, + reason='Network access denied (token refresh)', + ) return Response({"error": "Unauthorized"}, status=status.HTTP_403_FORBIDDEN) return super().post(request, *args, **kwargs) @@ -80,6 +138,15 @@ def initialize_superuser(request): class AuthViewSet(viewsets.ViewSet): """Handles user login and logout""" + def get_permissions(self): + """ + Login doesn't require auth, but logout does + """ + if self.action == 'logout': + from rest_framework.permissions import IsAuthenticated + return [IsAuthenticated()] + return [] + @swagger_auto_schema( operation_description="Authenticate and log in a user", request_body=openapi.Schema( @@ -100,6 +167,11 @@ def login(self, request): password = request.data.get("password") user = authenticate(request, username=username, password=password) + # Get client info for logging + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + if user: login(request, user) # Update last_login timestamp @@ -107,6 +179,14 @@ def login(self, request): user.last_login = timezone.now() user.save(update_fields=['last_login']) + # Log successful login + log_system_event( + event_type='login_success', + user=username, + client_ip=client_ip, + user_agent=user_agent, + ) + return Response( { "message": "Login successful", @@ -118,6 +198,15 @@ def login(self, request): }, } ) + + # Log failed login attempt + log_system_event( + event_type='login_failed', + user=username or 'unknown', + client_ip=client_ip, + user_agent=user_agent, + reason='Invalid credentials', + ) return Response({"error": "Invalid credentials"}, status=400) @swagger_auto_schema( @@ -126,6 +215,19 @@ def login(self, request): ) def logout(self, request): """Logs out the authenticated user""" + # Log logout event before actually logging out + from core.utils import log_system_event + username = request.user.username if request.user and request.user.is_authenticated else 'unknown' + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + log_system_event( + event_type='logout', + user=username, + client_ip=client_ip, + user_agent=user_agent, + ) + logout(request) return Response({"message": "Logout successful"}) diff --git a/apps/channels/api_urls.py b/apps/channels/api_urls.py index 7cfdc1b19..7999abd93 100644 --- a/apps/channels/api_urls.py +++ b/apps/channels/api_urls.py @@ -13,12 +13,14 @@ UpdateChannelMembershipAPIView, BulkUpdateChannelMembershipAPIView, RecordingViewSet, + RecurringRecordingRuleViewSet, GetChannelStreamsAPIView, SeriesRulesAPIView, DeleteSeriesRuleAPIView, EvaluateSeriesRulesAPIView, BulkRemoveSeriesRecordingsAPIView, BulkDeleteUpcomingRecordingsAPIView, + ComskipConfigAPIView, ) app_name = 'channels' # for DRF routing @@ -30,6 +32,7 @@ router.register(r'logos', LogoViewSet, basename='logo') router.register(r'profiles', ChannelProfileViewSet, basename='profile') router.register(r'recordings', RecordingViewSet, basename='recording') +router.register(r'recurring-rules', RecurringRecordingRuleViewSet, basename='recurring-rule') urlpatterns = [ # Bulk delete is a single APIView, not a ViewSet @@ -46,6 +49,7 @@ path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'), path('series-rules//', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'), path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'), + path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'), ] urlpatterns += router.urls diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index 7a3d51357..bc9205379 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -28,6 +28,7 @@ ChannelProfile, ChannelProfileMembership, Recording, + RecurringRecordingRule, ) from .serializers import ( StreamSerializer, @@ -38,8 +39,17 @@ BulkChannelProfileMembershipSerializer, ChannelProfileSerializer, RecordingSerializer, + RecurringRecordingRuleSerializer, +) +from .tasks import ( + match_epg_channels, + evaluate_series_rules, + evaluate_series_rules_impl, + match_single_channel_epg, + match_selected_channels_epg, + sync_recurring_rule_impl, + purge_recurring_rule_impl, ) -from .tasks import match_epg_channels, evaluate_series_rules, evaluate_series_rules_impl, match_single_channel_epg, match_selected_channels_epg import django_filters from django_filters.rest_framework import DjangoFilterBackend from rest_framework.filters import SearchFilter, OrderingFilter @@ -49,10 +59,12 @@ from django.http import StreamingHttpResponse, FileResponse, Http404 from django.utils import timezone import mimetypes +from django.conf import settings from rest_framework.pagination import PageNumberPagination + logger = logging.getLogger(__name__) @@ -423,8 +435,8 @@ def get_serializer_context(self): @action(detail=False, methods=["patch"], url_path="edit/bulk") def edit_bulk(self, request): """ - Bulk edit channels. - Expects a list of channels with their updates. + Bulk edit channels efficiently. + Validates all updates first, then applies in a single transaction. """ data = request.data if not isinstance(data, list): @@ -433,63 +445,94 @@ def edit_bulk(self, request): status=status.HTTP_400_BAD_REQUEST, ) - updated_channels = [] - errors = [] + # Extract IDs and validate presence + channel_updates = {} + missing_ids = [] - for channel_data in data: + for i, channel_data in enumerate(data): channel_id = channel_data.get("id") if not channel_id: - errors.append({"error": "Channel ID is required"}) - continue + missing_ids.append(f"Item {i}: Channel ID is required") + else: + channel_updates[channel_id] = channel_data - try: - channel = Channel.objects.get(id=channel_id) - - # Handle channel_group_id properly - convert string to integer if needed - if 'channel_group_id' in channel_data: - group_id = channel_data['channel_group_id'] - if group_id is not None: - try: - channel_data['channel_group_id'] = int(group_id) - except (ValueError, TypeError): - channel_data['channel_group_id'] = None - - # Use the serializer to validate and update - serializer = ChannelSerializer( - channel, data=channel_data, partial=True - ) + if missing_ids: + return Response( + {"errors": missing_ids}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Fetch all channels at once (one query) + channels_dict = { + c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys()) + } + + # Validate and prepare updates + validated_updates = [] + errors = [] - if serializer.is_valid(): - updated_channel = serializer.save() - updated_channels.append(updated_channel) - else: - errors.append({ - "channel_id": channel_id, - "errors": serializer.errors - }) + for channel_id, channel_data in channel_updates.items(): + channel = channels_dict.get(channel_id) - except Channel.DoesNotExist: + if not channel: errors.append({ "channel_id": channel_id, "error": "Channel not found" }) - except Exception as e: + continue + + # Handle channel_group_id conversion + if 'channel_group_id' in channel_data: + group_id = channel_data['channel_group_id'] + if group_id is not None: + try: + channel_data['channel_group_id'] = int(group_id) + except (ValueError, TypeError): + channel_data['channel_group_id'] = None + + # Validate with serializer + serializer = ChannelSerializer( + channel, data=channel_data, partial=True + ) + + if serializer.is_valid(): + validated_updates.append((channel, serializer.validated_data)) + else: errors.append({ "channel_id": channel_id, - "error": str(e) + "errors": serializer.errors }) if errors: return Response( - {"errors": errors, "updated_count": len(updated_channels)}, + {"errors": errors, "updated_count": len(validated_updates)}, status=status.HTTP_400_BAD_REQUEST, ) - # Serialize the updated channels for response - serialized_channels = ChannelSerializer(updated_channels, many=True).data + # Apply all updates in a transaction + with transaction.atomic(): + for channel, validated_data in validated_updates: + for key, value in validated_data.items(): + setattr(channel, key, value) + + # Single bulk_update query instead of individual saves + channels_to_update = [channel for channel, _ in validated_updates] + if channels_to_update: + Channel.objects.bulk_update( + channels_to_update, + fields=list(validated_updates[0][1].keys()), + batch_size=100 + ) + + # Return the updated objects (already in memory) + serialized_channels = ChannelSerializer( + [channel for channel, _ in validated_updates], + many=True, + context=self.get_serializer_context() + ).data return Response({ - "message": f"Successfully updated {len(updated_channels)} channels", + "message": f"Successfully updated {len(validated_updates)} channels", "channels": serialized_channels }) @@ -555,6 +598,37 @@ def set_logos_from_epg(self, request): "channel_count": len(channel_ids) }) + @action(detail=False, methods=["post"], url_path="set-tvg-ids-from-epg") + def set_tvg_ids_from_epg(self, request): + """ + Trigger a Celery task to set channel TVG-IDs from EPG data + """ + from .tasks import set_channels_tvg_ids_from_epg + + data = request.data + channel_ids = data.get("channel_ids", []) + + if not channel_ids: + return Response( + {"error": "channel_ids is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + if not isinstance(channel_ids, list): + return Response( + {"error": "channel_ids must be a list"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Start the Celery task + task = set_channels_tvg_ids_from_epg.delay(channel_ids) + + return Response({ + "message": f"Started EPG TVG-ID setting task for {len(channel_ids)} channels", + "task_id": task.id, + "channel_count": len(channel_ids) + }) + @action(detail=False, methods=["get"], url_path="ids") def get_ids(self, request, *args, **kwargs): # Get the filtered queryset @@ -704,10 +778,14 @@ def from_stream(self, request): channel_data["channel_group_id"] = channel_group.id if stream.logo_url: - logo, _ = Logo.objects.get_or_create( - url=stream.logo_url, defaults={"name": stream.name or stream.tvg_id} - ) - channel_data["logo_id"] = logo.id + # Import validation function + from apps.channels.tasks import validate_logo_url + validated_logo_url = validate_logo_url(stream.logo_url) + if validated_logo_url: + logo, _ = Logo.objects.get_or_create( + url=validated_logo_url, defaults={"name": stream.name or stream.tvg_id} + ) + channel_data["logo_id"] = logo.id # Attempt to find existing EPGs with the same tvg-id epgs = EPGData.objects.filter(tvg_id=stream.tvg_id) @@ -940,19 +1018,27 @@ def set_epg(self, request, pk=None): channel.epg_data = epg_data channel.save(update_fields=["epg_data"]) - # Explicitly trigger program refresh for this EPG - from apps.epg.tasks import parse_programs_for_tvg_id + # Only trigger program refresh for non-dummy EPG sources + status_message = None + if epg_data.epg_source.source_type != 'dummy': + # Explicitly trigger program refresh for this EPG + from apps.epg.tasks import parse_programs_for_tvg_id + + task_result = parse_programs_for_tvg_id.delay(epg_data.id) - task_result = parse_programs_for_tvg_id.delay(epg_data.id) + # Prepare response with task status info + status_message = "EPG refresh queued" + if task_result.result == "Task already running": + status_message = "EPG refresh already in progress" - # Prepare response with task status info - status_message = "EPG refresh queued" - if task_result.result == "Task already running": - status_message = "EPG refresh already in progress" + # Build response message + message = f"EPG data set to {epg_data.tvg_id} for channel {channel.name}" + if status_message: + message += f". {status_message}" return Response( { - "message": f"EPG data set to {epg_data.tvg_id} for channel {channel.name}. {status_message}.", + "message": message, "channel": self.get_serializer(channel).data, "task_status": status_message, } @@ -984,8 +1070,15 @@ def set_epg(self, request, pk=None): def batch_set_epg(self, request): """Efficiently associate multiple channels with EPG data at once.""" associations = request.data.get("associations", []) - channels_updated = 0 - programs_refreshed = 0 + + if not associations: + return Response( + {"error": "associations list is required"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + # Extract channel IDs upfront + channel_updates = {} unique_epg_ids = set() for assoc in associations: @@ -995,32 +1088,58 @@ def batch_set_epg(self, request): if not channel_id: continue - try: - # Get the channel - channel = Channel.objects.get(id=channel_id) - - # Set the EPG data - channel.epg_data_id = epg_data_id - channel.save(update_fields=["epg_data"]) - channels_updated += 1 + channel_updates[channel_id] = epg_data_id + if epg_data_id: + unique_epg_ids.add(epg_data_id) - # Track unique EPG data IDs - if epg_data_id: - unique_epg_ids.add(epg_data_id) + # Batch fetch all channels (single query) + channels_dict = { + c.id: c for c in Channel.objects.filter(id__in=channel_updates.keys()) + } - except Channel.DoesNotExist: + # Collect channels to update + channels_to_update = [] + for channel_id, epg_data_id in channel_updates.items(): + if channel_id not in channels_dict: logger.error(f"Channel with ID {channel_id} not found") - except Exception as e: - logger.error( - f"Error setting EPG data for channel {channel_id}: {str(e)}" + continue + + channel = channels_dict[channel_id] + channel.epg_data_id = epg_data_id + channels_to_update.append(channel) + + # Bulk update all channels (single query) + if channels_to_update: + with transaction.atomic(): + Channel.objects.bulk_update( + channels_to_update, + fields=["epg_data_id"], + batch_size=100 ) - # Trigger program refresh for unique EPG data IDs + channels_updated = len(channels_to_update) + + # Trigger program refresh for unique EPG data IDs (skip dummy EPGs) from apps.epg.tasks import parse_programs_for_tvg_id + from apps.epg.models import EPGData + + # Batch fetch EPG data (single query) + epg_data_dict = { + epg.id: epg + for epg in EPGData.objects.filter(id__in=unique_epg_ids).select_related('epg_source') + } + programs_refreshed = 0 for epg_id in unique_epg_ids: - parse_programs_for_tvg_id.delay(epg_id) - programs_refreshed += 1 + epg_data = epg_data_dict.get(epg_id) + if not epg_data: + logger.error(f"EPGData with ID {epg_id} not found") + continue + + # Only refresh non-dummy EPG sources + if epg_data.epg_source.source_type != 'dummy': + parse_programs_for_tvg_id.delay(epg_id) + programs_refreshed += 1 return Response( { @@ -1185,7 +1304,7 @@ def get_permissions(self): return [Authenticated()] @swagger_auto_schema( - operation_description="Delete all logos that are not used by any channels, movies, or series", + operation_description="Delete all channel logos that are not used by any channels", request_body=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ @@ -1199,24 +1318,11 @@ def get_permissions(self): responses={200: "Cleanup completed"}, ) def post(self, request): - """Delete all logos with no channel, movie, or series associations""" + """Delete all channel logos with no channel associations""" delete_files = request.data.get("delete_files", False) - # Find logos that are not used by channels, movies, or series - filter_conditions = Q(channels__isnull=True) - - # Add VOD conditions if models are available - try: - filter_conditions &= Q(movie__isnull=True) - except: - pass - - try: - filter_conditions &= Q(series__isnull=True) - except: - pass - - unused_logos = Logo.objects.filter(filter_conditions) + # Find logos that are not used by any channels + unused_logos = Logo.objects.filter(channels__isnull=True) deleted_count = unused_logos.count() logo_names = list(unused_logos.values_list('name', flat=True)) local_files_deleted = 0 @@ -1288,13 +1394,6 @@ def get_queryset(self): # Start with basic prefetch for channels queryset = Logo.objects.prefetch_related('channels').order_by('name') - # Try to prefetch VOD relations if available - try: - queryset = queryset.prefetch_related('movie', 'series') - except: - # VOD app might not be available, continue without VOD prefetch - pass - # Filter by specific IDs ids = self.request.query_params.getlist('ids') if ids: @@ -1307,62 +1406,14 @@ def get_queryset(self): pass # Invalid IDs, return empty queryset queryset = Logo.objects.none() - # Filter by usage - now includes VOD content + # Filter by usage used_filter = self.request.query_params.get('used', None) if used_filter == 'true': - # Logo is used if it has any channels, movies, or series - filter_conditions = Q(channels__isnull=False) - - # Add VOD conditions if models are available - try: - filter_conditions |= Q(movie__isnull=False) - except: - pass - - try: - filter_conditions |= Q(series__isnull=False) - except: - pass - - queryset = queryset.filter(filter_conditions).distinct() - + # Logo is used if it has any channels + queryset = queryset.filter(channels__isnull=False).distinct() elif used_filter == 'false': - # Logo is unused if it has no channels, movies, or series - filter_conditions = Q(channels__isnull=True) - - # Add VOD conditions if models are available - try: - filter_conditions &= Q(movie__isnull=True) - except: - pass - - try: - filter_conditions &= Q(series__isnull=True) - except: - pass - - queryset = queryset.filter(filter_conditions) - - # Filter for channel assignment (unused + channel-used, exclude VOD-only) - channel_assignable = self.request.query_params.get('channel_assignable', None) - if channel_assignable == 'true': - # Include logos that are either: - # 1. Completely unused, OR - # 2. Used by channels (but may also be used by VOD) - # Exclude logos that are ONLY used by VOD content - - unused_condition = Q(channels__isnull=True) - channel_used_condition = Q(channels__isnull=False) - - # Add VOD conditions if models are available - try: - unused_condition &= Q(movie__isnull=True) & Q(series__isnull=True) - except: - pass - - # Combine: unused OR used by channels - filter_conditions = unused_condition | channel_used_condition - queryset = queryset.filter(filter_conditions).distinct() + # Logo is unused if it has no channels + queryset = queryset.filter(channels__isnull=True) # Filter by name name_filter = self.request.query_params.get('name', None) @@ -1653,6 +1704,41 @@ def patch(self, request, profile_id): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) +class RecurringRecordingRuleViewSet(viewsets.ModelViewSet): + queryset = RecurringRecordingRule.objects.all().select_related("channel") + serializer_class = RecurringRecordingRuleSerializer + + def get_permissions(self): + return [IsAdmin()] + + def perform_create(self, serializer): + rule = serializer.save() + try: + sync_recurring_rule_impl(rule.id, drop_existing=True) + except Exception as err: + logger.warning(f"Failed to initialize recurring rule {rule.id}: {err}") + return rule + + def perform_update(self, serializer): + rule = serializer.save() + try: + if rule.enabled: + sync_recurring_rule_impl(rule.id, drop_existing=True) + else: + purge_recurring_rule_impl(rule.id) + except Exception as err: + logger.warning(f"Failed to resync recurring rule {rule.id}: {err}") + return rule + + def perform_destroy(self, instance): + rule_id = instance.id + super().perform_destroy(instance) + try: + purge_recurring_rule_impl(rule_id) + except Exception as err: + logger.warning(f"Failed to purge recordings for rule {rule_id}: {err}") + + class RecordingViewSet(viewsets.ModelViewSet): queryset = Recording.objects.all() serializer_class = RecordingSerializer @@ -1832,6 +1918,49 @@ def _safe_remove(path: str): return response +class ComskipConfigAPIView(APIView): + """Upload or inspect the custom comskip.ini used by DVR processing.""" + + parser_classes = [MultiPartParser, FormParser] + + def get_permissions(self): + return [IsAdmin()] + + def get(self, request): + path = CoreSettings.get_dvr_comskip_custom_path() + exists = bool(path and os.path.exists(path)) + return Response({"path": path, "exists": exists}) + + def post(self, request): + uploaded = request.FILES.get("file") or request.FILES.get("comskip_ini") + if not uploaded: + return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST) + + name = (uploaded.name or "").lower() + if not name.endswith(".ini"): + return Response({"error": "Only .ini files are allowed"}, status=status.HTTP_400_BAD_REQUEST) + + if uploaded.size and uploaded.size > 1024 * 1024: + return Response({"error": "File too large (limit 1MB)"}, status=status.HTTP_400_BAD_REQUEST) + + dest_dir = os.path.join(settings.MEDIA_ROOT, "comskip") + os.makedirs(dest_dir, exist_ok=True) + dest_path = os.path.join(dest_dir, "comskip.ini") + + try: + with open(dest_path, "wb") as dest: + for chunk in uploaded.chunks(): + dest.write(chunk) + except Exception as e: + logger.error(f"Failed to save uploaded comskip.ini: {e}") + return Response({"error": "Unable to save file"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + # Persist path setting so DVR processing picks it up immediately + CoreSettings.set_dvr_comskip_custom_path(dest_path) + + return Response({"success": True, "path": dest_path, "exists": os.path.exists(dest_path)}) + + class BulkDeleteUpcomingRecordingsAPIView(APIView): """Delete all upcoming (future) recordings.""" def get_permissions(self): diff --git a/apps/channels/migrations/0026_recurringrecordingrule.py b/apps/channels/migrations/0026_recurringrecordingrule.py new file mode 100644 index 000000000..1b8cfdb87 --- /dev/null +++ b/apps/channels/migrations/0026_recurringrecordingrule.py @@ -0,0 +1,31 @@ +# Generated by Django 5.0.14 on 2025-09-18 14:56 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0025_alter_channelgroupm3uaccount_custom_properties_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='RecurringRecordingRule', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('days_of_week', models.JSONField(default=list)), + ('start_time', models.TimeField()), + ('end_time', models.TimeField()), + ('enabled', models.BooleanField(default=True)), + ('name', models.CharField(blank=True, max_length=255)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recurring_rules', to='dispatcharr_channels.channel')), + ], + options={ + 'ordering': ['channel', 'start_time'], + }, + ), + ] diff --git a/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py b/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py new file mode 100644 index 000000000..8cdb9868f --- /dev/null +++ b/apps/channels/migrations/0027_recurringrecordingrule_end_date_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 5.2.4 on 2025-10-05 20:50 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0026_recurringrecordingrule'), + ] + + operations = [ + migrations.AddField( + model_name='recurringrecordingrule', + name='end_date', + field=models.DateField(blank=True, null=True), + ), + migrations.AddField( + model_name='recurringrecordingrule', + name='start_date', + field=models.DateField(blank=True, null=True), + ), + ] diff --git a/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py b/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py new file mode 100644 index 000000000..08c426b11 --- /dev/null +++ b/apps/channels/migrations/0028_channel_created_at_channel_updated_at.py @@ -0,0 +1,25 @@ +# Generated by Django 5.2.4 on 2025-10-06 22:55 + +import django.utils.timezone +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0027_recurringrecordingrule_end_date_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='channel', + name='created_at', + field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Timestamp when this channel was created'), + preserve_default=False, + ), + migrations.AddField( + model_name='channel', + name='updated_at', + field=models.DateTimeField(auto_now=True, help_text='Timestamp when this channel was last updated'), + ), + ] diff --git a/apps/channels/migrations/0029_backfill_custom_stream_hashes.py b/apps/channels/migrations/0029_backfill_custom_stream_hashes.py new file mode 100644 index 000000000..3e270be23 --- /dev/null +++ b/apps/channels/migrations/0029_backfill_custom_stream_hashes.py @@ -0,0 +1,54 @@ +# Generated migration to backfill stream_hash for existing custom streams + +from django.db import migrations +import hashlib + + +def backfill_custom_stream_hashes(apps, schema_editor): + """ + Generate stream_hash for all custom streams that don't have one. + Uses stream ID to create a stable hash that won't change when name/url is edited. + """ + Stream = apps.get_model('dispatcharr_channels', 'Stream') + + custom_streams_without_hash = Stream.objects.filter( + is_custom=True, + stream_hash__isnull=True + ) + + updated_count = 0 + for stream in custom_streams_without_hash: + # Generate a stable hash using the stream's ID + # This ensures the hash never changes even if name/url is edited + unique_string = f"custom_stream_{stream.id}" + stream.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest() + stream.save(update_fields=['stream_hash']) + updated_count += 1 + + if updated_count > 0: + print(f"Backfilled stream_hash for {updated_count} custom streams") + else: + print("No custom streams needed stream_hash backfill") + + +def reverse_backfill(apps, schema_editor): + """ + Reverse migration - clear stream_hash for custom streams. + Note: This will break preview functionality for custom streams. + """ + Stream = apps.get_model('dispatcharr_channels', 'Stream') + + custom_streams = Stream.objects.filter(is_custom=True) + count = custom_streams.update(stream_hash=None) + print(f"Cleared stream_hash for {count} custom streams") + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0028_channel_created_at_channel_updated_at'), + ] + + operations = [ + migrations.RunPython(backfill_custom_stream_hashes, reverse_backfill), + ] diff --git a/apps/channels/migrations/0030_alter_stream_url.py b/apps/channels/migrations/0030_alter_stream_url.py new file mode 100644 index 000000000..203e411a8 --- /dev/null +++ b/apps/channels/migrations/0030_alter_stream_url.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-10-28 20:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dispatcharr_channels', '0029_backfill_custom_stream_hashes'), + ] + + operations = [ + migrations.AlterField( + model_name='stream', + name='url', + field=models.URLField(blank=True, max_length=4096, null=True), + ), + ] diff --git a/apps/channels/models.py b/apps/channels/models.py index 20c9ac428..3dfb392b3 100644 --- a/apps/channels/models.py +++ b/apps/channels/models.py @@ -55,7 +55,7 @@ class Stream(models.Model): """ name = models.CharField(max_length=255, default="Default Stream") - url = models.URLField(max_length=2000, blank=True, null=True) + url = models.URLField(max_length=4096, blank=True, null=True) m3u_account = models.ForeignKey( M3UAccount, on_delete=models.CASCADE, @@ -152,8 +152,14 @@ def update_or_create_by_hash(cls, hash_value, **fields_to_update): stream = cls.objects.create(**fields_to_update) return stream, True # True means it was created - # @TODO: honor stream's stream profile def get_stream_profile(self): + """ + Get the stream profile for this stream. + Uses the stream's own profile if set, otherwise returns the default. + """ + if self.stream_profile: + return self.stream_profile + stream_profile = StreamProfile.objects.get( id=CoreSettings.get_default_stream_profile_id() ) @@ -303,6 +309,15 @@ class Channel(models.Model): help_text="The M3U account that auto-created this channel" ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Timestamp when this channel was created" + ) + updated_at = models.DateTimeField( + auto_now=True, + help_text="Timestamp when this channel was last updated" + ) + def clean(self): # Enforce unique channel_number within a given group existing = Channel.objects.filter( @@ -601,3 +616,35 @@ class Recording(models.Model): def __str__(self): return f"{self.channel.name} - {self.start_time} to {self.end_time}" + + +class RecurringRecordingRule(models.Model): + """Rule describing a recurring manual DVR schedule.""" + + channel = models.ForeignKey( + "Channel", + on_delete=models.CASCADE, + related_name="recurring_rules", + ) + days_of_week = models.JSONField(default=list) + start_time = models.TimeField() + end_time = models.TimeField() + enabled = models.BooleanField(default=True) + name = models.CharField(max_length=255, blank=True) + start_date = models.DateField(null=True, blank=True) + end_date = models.DateField(null=True, blank=True) + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + ordering = ["channel", "start_time"] + + def __str__(self): + channel_name = getattr(self.channel, "name", str(self.channel_id)) + return f"Recurring rule for {channel_name}" + + def cleaned_days(self): + try: + return sorted({int(d) for d in (self.days_of_week or []) if 0 <= int(d) <= 6}) + except Exception: + return [] diff --git a/apps/channels/serializers.py b/apps/channels/serializers.py index 51bfe0a04..635281d5b 100644 --- a/apps/channels/serializers.py +++ b/apps/channels/serializers.py @@ -1,4 +1,6 @@ import json +from datetime import datetime + from rest_framework import serializers from .models import ( Stream, @@ -10,6 +12,7 @@ ChannelProfile, ChannelProfileMembership, Recording, + RecurringRecordingRule, ) from apps.epg.serializers import EPGDataSerializer from core.models import StreamProfile @@ -61,47 +64,15 @@ def get_cache_url(self, obj): return reverse("api:channels:logo-cache", args=[obj.id]) def get_channel_count(self, obj): - """Get the number of channels, movies, and series using this logo""" - channel_count = obj.channels.count() - - # Safely get movie count - try: - movie_count = obj.movie.count() if hasattr(obj, 'movie') else 0 - except AttributeError: - movie_count = 0 - - # Safely get series count - try: - series_count = obj.series.count() if hasattr(obj, 'series') else 0 - except AttributeError: - series_count = 0 - - return channel_count + movie_count + series_count + """Get the number of channels using this logo""" + return obj.channels.count() def get_is_used(self, obj): - """Check if this logo is used by any channels, movies, or series""" - # Check if used by channels - if obj.channels.exists(): - return True - - # Check if used by movies (handle case where VOD app might not be available) - try: - if hasattr(obj, 'movie') and obj.movie.exists(): - return True - except AttributeError: - pass - - # Check if used by series (handle case where VOD app might not be available) - try: - if hasattr(obj, 'series') and obj.series.exists(): - return True - except AttributeError: - pass - - return False + """Check if this logo is used by any channels""" + return obj.channels.exists() def get_channel_names(self, obj): - """Get the names of channels, movies, and series using this logo (limited to first 5)""" + """Get the names of channels using this logo (limited to first 5)""" names = [] # Get channel names @@ -109,28 +80,6 @@ def get_channel_names(self, obj): for channel in channels: names.append(f"Channel: {channel.name}") - # Get movie names (only if we haven't reached limit) - if len(names) < 5: - try: - if hasattr(obj, 'movie'): - remaining_slots = 5 - len(names) - movies = obj.movie.all()[:remaining_slots] - for movie in movies: - names.append(f"Movie: {movie.name}") - except AttributeError: - pass - - # Get series names (only if we haven't reached limit) - if len(names) < 5: - try: - if hasattr(obj, 'series'): - remaining_slots = 5 - len(names) - series = obj.series.all()[:remaining_slots] - for series_item in series: - names.append(f"Series: {series_item.name}") - except AttributeError: - pass - # Calculate total count for "more" message total_count = self.get_channel_count(obj) if total_count > 5: @@ -345,8 +294,17 @@ def to_representation(self, instance): if include_streams: self.fields["streams"] = serializers.SerializerMethodField() - - return super().to_representation(instance) + return super().to_representation(instance) + else: + # Fix: For PATCH/PUT responses, ensure streams are ordered + representation = super().to_representation(instance) + if "streams" in representation: + representation["streams"] = list( + instance.streams.all() + .order_by("channelstream__order") + .values_list("id", flat=True) + ) + return representation def get_logo(self, obj): return LogoSerializer(obj.logo).data @@ -454,6 +412,13 @@ def validate(self, data): start_time = data.get("start_time") end_time = data.get("end_time") + if start_time and timezone.is_naive(start_time): + start_time = timezone.make_aware(start_time, timezone.get_current_timezone()) + data["start_time"] = start_time + if end_time and timezone.is_naive(end_time): + end_time = timezone.make_aware(end_time, timezone.get_current_timezone()) + data["end_time"] = end_time + # If this is an EPG-based recording (program provided), apply global pre/post offsets try: cp = data.get("custom_properties") or {} @@ -497,3 +462,56 @@ def validate(self, data): raise serializers.ValidationError("End time must be after start time.") return data + + +class RecurringRecordingRuleSerializer(serializers.ModelSerializer): + class Meta: + model = RecurringRecordingRule + fields = "__all__" + read_only_fields = ["created_at", "updated_at"] + + def validate_days_of_week(self, value): + if not value: + raise serializers.ValidationError("Select at least one day of the week") + cleaned = [] + for entry in value: + try: + iv = int(entry) + except (TypeError, ValueError): + raise serializers.ValidationError("Days of week must be integers 0-6") + if iv < 0 or iv > 6: + raise serializers.ValidationError("Days of week must be between 0 (Monday) and 6 (Sunday)") + cleaned.append(iv) + return sorted(set(cleaned)) + + def validate(self, attrs): + start = attrs.get("start_time") or getattr(self.instance, "start_time", None) + end = attrs.get("end_time") or getattr(self.instance, "end_time", None) + start_date = attrs.get("start_date") if "start_date" in attrs else getattr(self.instance, "start_date", None) + end_date = attrs.get("end_date") if "end_date" in attrs else getattr(self.instance, "end_date", None) + if start_date is None: + existing_start = getattr(self.instance, "start_date", None) + if existing_start is None: + raise serializers.ValidationError("Start date is required") + if start_date and end_date and end_date < start_date: + raise serializers.ValidationError("End date must be on or after start date") + if end_date is None: + existing_end = getattr(self.instance, "end_date", None) + if existing_end is None: + raise serializers.ValidationError("End date is required") + if start and end and start_date and end_date: + start_dt = datetime.combine(start_date, start) + end_dt = datetime.combine(end_date, end) + if end_dt <= start_dt: + raise serializers.ValidationError("End datetime must be after start datetime") + elif start and end and end == start: + raise serializers.ValidationError("End time must be different from start time") + # Normalize empty strings to None for dates + if attrs.get("end_date") == "": + attrs["end_date"] = None + if attrs.get("start_date") == "": + attrs["start_date"] = None + return super().validate(attrs) + + def create(self, validated_data): + return super().create(validated_data) diff --git a/apps/channels/signals.py b/apps/channels/signals.py index d7a7414d9..27b361bae 100644 --- a/apps/channels/signals.py +++ b/apps/channels/signals.py @@ -45,6 +45,20 @@ def set_default_m3u_account(sender, instance, **kwargs): else: raise ValueError("No default M3UAccount found.") +@receiver(post_save, sender=Stream) +def generate_custom_stream_hash(sender, instance, created, **kwargs): + """ + Generate a stable stream_hash for custom streams after creation. + Uses the stream's ID to ensure the hash never changes even if name/url is edited. + """ + if instance.is_custom and not instance.stream_hash and created: + import hashlib + # Use stream ID for a stable, unique hash that never changes + unique_string = f"custom_stream_{instance.id}" + instance.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest() + # Use update to avoid triggering signals again + Stream.objects.filter(id=instance.id).update(stream_hash=instance.stream_hash) + @receiver(post_save, sender=Channel) def refresh_epg_programs(sender, instance, created, **kwargs): """ diff --git a/apps/channels/tasks.py b/apps/channels/tasks.py index 732d03cc9..5a9528a78 100755 --- a/apps/channels/tasks.py +++ b/apps/channels/tasks.py @@ -7,6 +7,8 @@ import time import json import subprocess +import signal +from zoneinfo import ZoneInfo from datetime import datetime, timedelta import gc @@ -28,6 +30,23 @@ logger = logging.getLogger(__name__) +# PostgreSQL btree index has a limit of ~2704 bytes (1/3 of 8KB page size) +# We use 2000 as a safe maximum to account for multibyte characters +def validate_logo_url(logo_url, max_length=2000): + """ + Fast validation for logo URLs during bulk creation. + Returns None if URL is too long (would exceed PostgreSQL btree index limit), + original URL otherwise. + + PostgreSQL btree indexes have a maximum size of ~2704 bytes. URLs longer than + this cannot be indexed and would cause database errors. These are typically + base64-encoded images embedded in URLs. + """ + if logo_url and len(logo_url) > max_length: + logger.warning(f"Logo URL too long ({len(logo_url)} > {max_length}), skipping: {logo_url[:100]}...") + return None + return logo_url + def send_epg_matching_progress(total_channels, matched_channels, current_channel_name="", stage="matching"): """ Send EPG matching progress via WebSocket @@ -1115,6 +1134,148 @@ def reschedule_upcoming_recordings_for_offset_change(): return reschedule_upcoming_recordings_for_offset_change_impl() +def _notify_recordings_refresh(): + try: + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', {"success": True, "type": "recordings_refreshed"}) + except Exception: + pass + + +def purge_recurring_rule_impl(rule_id: int) -> int: + """Remove all future recordings created by a recurring rule.""" + from django.utils import timezone + from .models import Recording + + now = timezone.now() + try: + removed, _ = Recording.objects.filter( + start_time__gte=now, + custom_properties__rule__id=rule_id, + ).delete() + except Exception: + removed = 0 + if removed: + _notify_recordings_refresh() + return removed + + +def sync_recurring_rule_impl(rule_id: int, drop_existing: bool = True, horizon_days: int = 14) -> int: + """Ensure recordings exist for a recurring rule within the scheduling horizon.""" + from django.utils import timezone + from .models import RecurringRecordingRule, Recording + + rule = RecurringRecordingRule.objects.filter(pk=rule_id).select_related("channel").first() + now = timezone.now() + removed = 0 + if drop_existing: + removed = purge_recurring_rule_impl(rule_id) + + if not rule or not rule.enabled: + return 0 + + days = rule.cleaned_days() + if not days: + return 0 + + tz_name = CoreSettings.get_system_time_zone() + try: + tz = ZoneInfo(tz_name) + except Exception: + logger.warning("Invalid or unsupported time zone '%s'; falling back to Server default", tz_name) + tz = timezone.get_current_timezone() + start_limit = rule.start_date or now.date() + end_limit = rule.end_date + horizon = now + timedelta(days=horizon_days) + start_window = max(start_limit, now.date()) + if drop_existing and end_limit: + end_window = end_limit + else: + end_window = horizon.date() + if end_limit and end_limit < end_window: + end_window = end_limit + if end_window < start_window: + return 0 + total_created = 0 + + for offset in range((end_window - start_window).days + 1): + target_date = start_window + timedelta(days=offset) + if target_date.weekday() not in days: + continue + if end_limit and target_date > end_limit: + continue + try: + start_dt = timezone.make_aware(datetime.combine(target_date, rule.start_time), tz) + end_dt = timezone.make_aware(datetime.combine(target_date, rule.end_time), tz) + except Exception: + continue + if end_dt <= start_dt: + end_dt = end_dt + timedelta(days=1) + if start_dt <= now: + continue + exists = Recording.objects.filter( + channel=rule.channel, + start_time=start_dt, + custom_properties__rule__id=rule.id, + ).exists() + if exists: + continue + description = rule.name or f"Recurring recording for {rule.channel.name}" + cp = { + "rule": { + "type": "recurring", + "id": rule.id, + "days_of_week": days, + "name": rule.name or "", + }, + "status": "scheduled", + "description": description, + "program": { + "title": rule.name or rule.channel.name, + "description": description, + "start_time": start_dt.isoformat(), + "end_time": end_dt.isoformat(), + }, + } + try: + Recording.objects.create( + channel=rule.channel, + start_time=start_dt, + end_time=end_dt, + custom_properties=cp, + ) + total_created += 1 + except Exception as err: + logger.warning(f"Failed to create recurring recording for rule {rule.id}: {err}") + + if removed or total_created: + _notify_recordings_refresh() + + return total_created + + +@shared_task +def rebuild_recurring_rule(rule_id: int, horizon_days: int = 14): + return sync_recurring_rule_impl(rule_id, drop_existing=True, horizon_days=horizon_days) + + +@shared_task +def maintain_recurring_recordings(): + from .models import RecurringRecordingRule + + total = 0 + for rule_id in RecurringRecordingRule.objects.filter(enabled=True).values_list("id", flat=True): + try: + total += sync_recurring_rule_impl(rule_id, drop_existing=False) + except Exception as err: + logger.warning(f"Recurring rule maintenance failed for {rule_id}: {err}") + return total + + +@shared_task +def purge_recurring_rule(rule_id: int): + return purge_recurring_rule_impl(rule_id) + @shared_task def _safe_name(s): try: @@ -1273,6 +1434,18 @@ def run_recording(recording_id, channel_id, start_time_str, end_time_str): logger.info(f"Starting recording for channel {channel.name}") + # Log system event for recording start + try: + from core.utils import log_system_event + log_system_event( + 'recording_start', + channel_id=channel.uuid, + channel_name=channel.name, + recording_id=recording_id + ) + except Exception as e: + logger.error(f"Could not log recording start event: {e}") + # Try to resolve the Recording row up front recording_obj = None try: @@ -1666,6 +1839,20 @@ def score(img): # After the loop, the file and response are closed automatically. logger.info(f"Finished recording for channel {channel.name}") + # Log system event for recording end + try: + from core.utils import log_system_event + log_system_event( + 'recording_end', + channel_id=channel.uuid, + channel_name=channel.name, + recording_id=recording_id, + interrupted=interrupted, + bytes_written=bytes_written + ) + except Exception as e: + logger.error(f"Could not log recording end event: {e}") + # Remux TS to MKV container remux_success = False try: @@ -1837,6 +2024,7 @@ def comskip_process_recording(recording_id: int): Safe to call even if comskip is not installed; stores status in custom_properties.comskip. """ import shutil + from django.db import DatabaseError from .models import Recording # Helper to broadcast status over websocket def _ws(status: str, extra: dict | None = None): @@ -1854,7 +2042,33 @@ def _ws(status: str, extra: dict | None = None): except Recording.DoesNotExist: return "not_found" - cp = rec.custom_properties or {} + cp = rec.custom_properties.copy() if isinstance(rec.custom_properties, dict) else {} + + def _persist_custom_properties(): + """Persist updated custom_properties without raising if the row disappeared.""" + try: + updated = Recording.objects.filter(pk=recording_id).update(custom_properties=cp) + if not updated: + logger.warning( + "Recording %s vanished before comskip status could be saved", + recording_id, + ) + return False + except DatabaseError as db_err: + logger.warning( + "Failed to persist comskip status for recording %s: %s", + recording_id, + db_err, + ) + return False + except Exception as unexpected: + logger.warning( + "Unexpected error while saving comskip status for recording %s: %s", + recording_id, + unexpected, + ) + return False + return True file_path = (cp or {}).get("file_path") if not file_path or not os.path.exists(file_path): return "no_file" @@ -1865,8 +2079,7 @@ def _ws(status: str, extra: dict | None = None): comskip_bin = shutil.which("comskip") if not comskip_bin: cp["comskip"] = {"status": "skipped", "reason": "comskip_not_installed"} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + _persist_custom_properties() _ws('skipped', {"reason": "comskip_not_installed"}) return "comskip_missing" @@ -1878,24 +2091,59 @@ def _ws(status: str, extra: dict | None = None): try: cmd = [comskip_bin, "--output", os.path.dirname(file_path)] - # Prefer system ini if present to squelch warning and get sane defaults - for ini_path in ("/etc/comskip/comskip.ini", "/app/docker/comskip.ini"): - if os.path.exists(ini_path): + # Prefer user-specified INI, fall back to known defaults + ini_candidates = [] + try: + custom_ini = CoreSettings.get_dvr_comskip_custom_path() + if custom_ini: + ini_candidates.append(custom_ini) + except Exception as ini_err: + logger.debug(f"Unable to load custom comskip.ini path: {ini_err}") + ini_candidates.extend(["/etc/comskip/comskip.ini", "/app/docker/comskip.ini"]) + selected_ini = None + for ini_path in ini_candidates: + if ini_path and os.path.exists(ini_path): + selected_ini = ini_path cmd.extend([f"--ini={ini_path}"]) break cmd.append(file_path) - subprocess.run(cmd, check=True) + subprocess.run( + cmd, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + except subprocess.CalledProcessError as e: + stderr_tail = (e.stderr or "").strip().splitlines() + stderr_tail = stderr_tail[-5:] if stderr_tail else [] + detail = { + "status": "error", + "reason": "comskip_failed", + "returncode": e.returncode, + } + if e.returncode and e.returncode < 0: + try: + detail["signal"] = signal.Signals(-e.returncode).name + except Exception: + detail["signal"] = f"signal_{-e.returncode}" + if stderr_tail: + detail["stderr"] = "\n".join(stderr_tail) + if selected_ini: + detail["ini_path"] = selected_ini + cp["comskip"] = detail + _persist_custom_properties() + _ws('error', {"reason": "comskip_failed", "returncode": e.returncode}) + return "comskip_failed" except Exception as e: cp["comskip"] = {"status": "error", "reason": f"comskip_failed: {e}"} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + _persist_custom_properties() _ws('error', {"reason": str(e)}) return "comskip_failed" if not os.path.exists(edl_path): cp["comskip"] = {"status": "error", "reason": "edl_not_found"} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + _persist_custom_properties() _ws('error', {"reason": "edl_not_found"}) return "no_edl" @@ -1913,8 +2161,7 @@ def _ffprobe_duration(path): duration = _ffprobe_duration(file_path) if duration is None: cp["comskip"] = {"status": "error", "reason": "duration_unknown"} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + _persist_custom_properties() _ws('error', {"reason": "duration_unknown"}) return "no_duration" @@ -1943,9 +2190,14 @@ def _ffprobe_duration(path): keep.append((cur, duration)) if not commercials or sum((e - s) for s, e in commercials) <= 0.5: - cp["comskip"] = {"status": "completed", "skipped": True, "edl": os.path.basename(edl_path)} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + cp["comskip"] = { + "status": "completed", + "skipped": True, + "edl": os.path.basename(edl_path), + } + if selected_ini: + cp["comskip"]["ini_path"] = selected_ini + _persist_custom_properties() _ws('skipped', {"reason": "no_commercials", "commercials": 0}) return "no_commercials" @@ -1969,7 +2221,8 @@ def _ffprobe_duration(path): list_path = os.path.join(workdir, "concat_list.txt") with open(list_path, "w") as lf: for pth in parts: - lf.write(f"file '{pth}'\n") + escaped = pth.replace("'", "'\\''") + lf.write(f"file '{escaped}'\n") output_path = os.path.join(workdir, f"{os.path.splitext(os.path.basename(file_path))[0]}.cut.mkv") subprocess.run([ @@ -1995,14 +2248,14 @@ def _ffprobe_duration(path): "segments_kept": len(parts), "commercials": len(commercials), } - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + if selected_ini: + cp["comskip"]["ini_path"] = selected_ini + _persist_custom_properties() _ws('completed', {"commercials": len(commercials), "segments_kept": len(parts)}) return "ok" except Exception as e: cp["comskip"] = {"status": "error", "reason": str(e)} - rec.custom_properties = cp - rec.save(update_fields=["custom_properties"]) + _persist_custom_properties() _ws('error', {"reason": str(e)}) return f"error:{e}" def _resolve_poster_for_program(channel_name, program): @@ -2333,15 +2586,16 @@ def get_auto_number(): # Store profile IDs for this channel profile_map.append(channel_profile_ids) - # Handle logo - if stream.logo_url: + # Handle logo - validate URL length to avoid PostgreSQL btree index errors + validated_logo_url = validate_logo_url(stream.logo_url) if stream.logo_url else None + if validated_logo_url: logos_to_create.append( Logo( - url=stream.logo_url, + url=validated_logo_url, name=stream.name or stream.tvg_id, ) ) - logo_map.append(stream.logo_url) + logo_map.append(validated_logo_url) else: logo_map.append(None) @@ -2711,3 +2965,98 @@ def set_channels_logos_from_epg(self, channel_ids): 'error': str(e) }) raise + + +@shared_task(bind=True) +def set_channels_tvg_ids_from_epg(self, channel_ids): + """ + Celery task to set channel TVG-IDs from EPG data for multiple channels + """ + from core.utils import send_websocket_update + + task_id = self.request.id + total_channels = len(channel_ids) + updated_count = 0 + errors = [] + + try: + logger.info(f"Starting EPG TVG-ID setting task for {total_channels} channels") + + # Send initial progress + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'running', + 'message': 'Starting EPG TVG-ID setting...' + }) + + batch_size = 100 + for i in range(0, total_channels, batch_size): + batch_ids = channel_ids[i:i + batch_size] + batch_updates = [] + + # Get channels and their EPG data + channels = Channel.objects.filter(id__in=batch_ids).select_related('epg_data') + + for channel in channels: + try: + if channel.epg_data and channel.epg_data.tvg_id: + if channel.tvg_id != channel.epg_data.tvg_id: + channel.tvg_id = channel.epg_data.tvg_id + batch_updates.append(channel) + updated_count += 1 + except Exception as e: + errors.append(f"Channel {channel.id}: {str(e)}") + logger.error(f"Error processing channel {channel.id}: {e}") + + # Bulk update the batch + if batch_updates: + Channel.objects.bulk_update(batch_updates, ['tvg_id']) + + # Send progress update + progress = min(i + batch_size, total_channels) + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': progress, + 'total': total_channels, + 'status': 'running', + 'message': f'Updated {updated_count} channel TVG-IDs...', + 'updated_count': updated_count + }) + + # Send completion notification + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': total_channels, + 'total': total_channels, + 'status': 'completed', + 'message': f'Successfully updated {updated_count} channel TVG-IDs from EPG data', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + }) + + logger.info(f"EPG TVG-ID setting task completed. Updated {updated_count} channels") + return { + 'status': 'completed', + 'updated_count': updated_count, + 'error_count': len(errors), + 'errors': errors + } + + except Exception as e: + logger.error(f"EPG TVG-ID setting task failed: {e}") + send_websocket_update('updates', 'update', { + 'type': 'epg_tvg_id_setting_progress', + 'task_id': task_id, + 'progress': 0, + 'total': total_channels, + 'status': 'failed', + 'message': f'Task failed: {str(e)}', + 'error': str(e) + }) + raise diff --git a/apps/channels/tests/__init__.py b/apps/channels/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/apps/channels/tests/test_recurring_rules.py b/apps/channels/tests/test_recurring_rules.py new file mode 100644 index 000000000..982ecb932 --- /dev/null +++ b/apps/channels/tests/test_recurring_rules.py @@ -0,0 +1,40 @@ +from datetime import datetime, timedelta +from django.test import TestCase +from django.utils import timezone + +from apps.channels.models import Channel, RecurringRecordingRule, Recording +from apps.channels.tasks import sync_recurring_rule_impl, purge_recurring_rule_impl + + +class RecurringRecordingRuleTasksTests(TestCase): + def test_sync_recurring_rule_creates_and_purges_recordings(self): + now = timezone.now() + channel = Channel.objects.create(channel_number=1, name='Test Channel') + + start_time = (now + timedelta(minutes=15)).time().replace(second=0, microsecond=0) + end_time = (now + timedelta(minutes=75)).time().replace(second=0, microsecond=0) + + rule = RecurringRecordingRule.objects.create( + channel=channel, + days_of_week=[now.weekday()], + start_time=start_time, + end_time=end_time, + ) + + created = sync_recurring_rule_impl(rule.id, drop_existing=True, horizon_days=1) + self.assertEqual(created, 1) + + recording = Recording.objects.filter(custom_properties__rule__id=rule.id).first() + self.assertIsNotNone(recording) + self.assertEqual(recording.channel, channel) + self.assertEqual(recording.custom_properties.get('rule', {}).get('id'), rule.id) + + expected_start = timezone.make_aware( + datetime.combine(recording.start_time.date(), start_time), + timezone.get_current_timezone(), + ) + self.assertLess(abs((recording.start_time - expected_start).total_seconds()), 60) + + removed = purge_recurring_rule_impl(rule.id) + self.assertEqual(removed, 1) + self.assertFalse(Recording.objects.filter(custom_properties__rule__id=rule.id).exists()) diff --git a/apps/epg/api_views.py b/apps/epg/api_views.py index f3248677e..2fc5a7439 100644 --- a/apps/epg/api_views.py +++ b/apps/epg/api_views.py @@ -147,23 +147,37 @@ def get(self, request, format=None): f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows." ) - # Generate dummy programs for channels that have no EPG data + # Generate dummy programs for channels that have no EPG data OR dummy EPG sources from apps.channels.models import Channel + from apps.epg.models import EPGSource from django.db.models import Q - # Get channels with no EPG data + # Get channels with no EPG data at all (standard dummy) channels_without_epg = Channel.objects.filter(Q(epg_data__isnull=True)) - channels_count = channels_without_epg.count() - # Log more detailed information about channels missing EPG data - if channels_count > 0: + # Get channels with custom dummy EPG sources (generate on-demand with patterns) + channels_with_custom_dummy = Channel.objects.filter( + epg_data__epg_source__source_type='dummy' + ).distinct() + + # Log what we found + without_count = channels_without_epg.count() + custom_count = channels_with_custom_dummy.count() + + if without_count > 0: channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_without_epg] - logger.warning( - f"EPGGridAPIView: Missing EPG data for these channels: {', '.join(channel_names)}" + logger.debug( + f"EPGGridAPIView: Channels needing standard dummy EPG: {', '.join(channel_names)}" + ) + + if custom_count > 0: + channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_with_custom_dummy] + logger.debug( + f"EPGGridAPIView: Channels needing custom dummy EPG: {', '.join(channel_names)}" ) logger.debug( - f"EPGGridAPIView: Found {channels_count} channels with no EPG data." + f"EPGGridAPIView: Found {without_count} channels needing standard dummy, {custom_count} needing custom dummy EPG." ) # Serialize the regular programs @@ -205,12 +219,91 @@ def get(self, request, format=None): # Generate and append dummy programs dummy_programs = [] + + # Import the function from output.views + from apps.output.views import generate_dummy_programs as gen_dummy_progs + + # Handle channels with CUSTOM dummy EPG sources (with patterns) + for channel in channels_with_custom_dummy: + # For dummy EPGs, ALWAYS use channel UUID to ensure unique programs per channel + # This prevents multiple channels assigned to the same dummy EPG from showing identical data + # Each channel gets its own unique program data even if they share the same EPG source + dummy_tvg_id = str(channel.uuid) + + try: + # Get the custom dummy EPG source + epg_source = channel.epg_data.epg_source if channel.epg_data else None + + logger.debug(f"Generating custom dummy programs for channel: {channel.name} (ID: {channel.id})") + + # Determine which name to parse based on custom properties + name_to_parse = channel.name + if epg_source and epg_source.custom_properties: + custom_props = epg_source.custom_properties + name_source = custom_props.get('name_source') + + if name_source == 'stream': + # Get the stream index (1-based from user, convert to 0-based) + stream_index = custom_props.get('stream_index', 1) - 1 + + # Get streams ordered by channelstream order + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + name_to_parse = stream.name + logger.debug(f"Using stream name for parsing: {name_to_parse} (stream index: {stream_index})") + else: + logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name") + elif name_source == 'channel': + logger.debug(f"Using channel name for parsing: {name_to_parse}") + + # Generate programs using custom patterns from the dummy EPG source + # Use the same tvg_id that will be set in the program data + generated = gen_dummy_progs( + channel_id=dummy_tvg_id, + channel_name=name_to_parse, + num_days=1, + program_length_hours=4, + epg_source=epg_source + ) + + # Custom dummy should always return data (either from patterns or fallback) + if generated: + logger.debug(f"Generated {len(generated)} custom dummy programs for {channel.name}") + # Convert generated programs to API format + for program in generated: + dummy_program = { + "id": f"dummy-custom-{channel.id}-{program['start_time'].hour}", + "epg": {"tvg_id": dummy_tvg_id, "name": channel.name}, + "start_time": program['start_time'].isoformat(), + "end_time": program['end_time'].isoformat(), + "title": program['title'], + "description": program['description'], + "tvg_id": dummy_tvg_id, + "sub_title": None, + "custom_properties": None, + } + dummy_programs.append(dummy_program) + else: + logger.warning(f"No programs generated for custom dummy EPG channel: {channel.name}") + + except Exception as e: + logger.error( + f"Error creating custom dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}" + ) + + # Handle channels with NO EPG data (standard dummy with humorous descriptions) for channel in channels_without_epg: - # Use the channel UUID as tvg_id for dummy programs to match in the guide + # For channels with no EPG, use UUID to ensure uniqueness (matches frontend logic) + # The frontend uses: tvgRecord?.tvg_id ?? channel.uuid + # Since there's no EPG data, it will fall back to UUID dummy_tvg_id = str(channel.uuid) try: - # Create programs every 4 hours for the next 24 hours + logger.debug(f"Generating standard dummy programs for channel: {channel.name} (ID: {channel.id})") + + # Create programs every 4 hours for the next 24 hours with humorous descriptions for hour_offset in range(0, 24, 4): # Use timedelta for time arithmetic instead of replace() to avoid hour overflow start_time = now + timedelta(hours=hour_offset) @@ -238,7 +331,7 @@ def get(self, request, format=None): # Create a dummy program in the same format as regular programs dummy_program = { - "id": f"dummy-{channel.id}-{hour_offset}", # Create a unique ID + "id": f"dummy-standard-{channel.id}-{hour_offset}", "epg": {"tvg_id": dummy_tvg_id, "name": channel.name}, "start_time": start_time.isoformat(), "end_time": end_time.isoformat(), @@ -252,7 +345,7 @@ def get(self, request, format=None): except Exception as e: logger.error( - f"Error creating dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}" + f"Error creating standard dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}" ) # Combine regular and dummy programs @@ -284,7 +377,22 @@ def get_permissions(self): ) def post(self, request, format=None): logger.info("EPGImportAPIView: Received request to import EPG data.") - refresh_epg_data.delay(request.data.get("id", None)) # Trigger Celery task + epg_id = request.data.get("id", None) + + # Check if this is a dummy EPG source + try: + from .models import EPGSource + epg_source = EPGSource.objects.get(id=epg_id) + if epg_source.source_type == 'dummy': + logger.info(f"EPGImportAPIView: Skipping refresh for dummy EPG source {epg_id}") + return Response( + {"success": False, "message": "Dummy EPG sources do not require refreshing."}, + status=status.HTTP_400_BAD_REQUEST, + ) + except EPGSource.DoesNotExist: + pass # Let the task handle the missing source + + refresh_epg_data.delay(epg_id) # Trigger Celery task logger.info("EPGImportAPIView: Task dispatched to refresh EPG data.") return Response( {"success": True, "message": "EPG data import initiated."}, @@ -308,3 +416,4 @@ def get_permissions(self): return [perm() for perm in permission_classes_by_action[self.action]] except KeyError: return [Authenticated()] + diff --git a/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py b/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py new file mode 100644 index 000000000..70ebb2149 --- /dev/null +++ b/apps/epg/migrations/0018_epgsource_custom_properties_and_more.py @@ -0,0 +1,23 @@ +# Generated by Django 5.2.4 on 2025-10-17 17:02 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0017_alter_epgsource_url'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='custom_properties', + field=models.JSONField(blank=True, default=dict, help_text='Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)', null=True), + ), + migrations.AlterField( + model_name='epgsource', + name='source_type', + field=models.CharField(choices=[('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), ('dummy', 'Custom Dummy EPG')], max_length=20), + ), + ] diff --git a/apps/epg/migrations/0019_alter_programdata_sub_title.py b/apps/epg/migrations/0019_alter_programdata_sub_title.py new file mode 100644 index 000000000..5a53627ca --- /dev/null +++ b/apps/epg/migrations/0019_alter_programdata_sub_title.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-10-22 21:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0018_epgsource_custom_properties_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='programdata', + name='sub_title', + field=models.TextField(blank=True, null=True), + ), + ] diff --git a/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py b/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py new file mode 100644 index 000000000..8f53bb0aa --- /dev/null +++ b/apps/epg/migrations/0020_migrate_time_to_starttime_placeholders.py @@ -0,0 +1,119 @@ +# Generated migration to replace {time} placeholders with {starttime} + +import re +from django.db import migrations + + +def migrate_time_placeholders(apps, schema_editor): + """ + Replace {time} with {starttime} and {time24} with {starttime24} + in all dummy EPG source custom_properties templates. + """ + EPGSource = apps.get_model('epg', 'EPGSource') + + # Fields that contain templates with placeholders + template_fields = [ + 'title_template', + 'description_template', + 'upcoming_title_template', + 'upcoming_description_template', + 'ended_title_template', + 'ended_description_template', + 'channel_logo_url', + 'program_poster_url', + ] + + # Get all dummy EPG sources + dummy_sources = EPGSource.objects.filter(source_type='dummy') + + updated_count = 0 + for source in dummy_sources: + if not source.custom_properties: + continue + + modified = False + custom_props = source.custom_properties.copy() + + for field in template_fields: + if field in custom_props and custom_props[field]: + original_value = custom_props[field] + + # Replace {time24} first (before {time}) to avoid double replacement + # e.g., {time24} shouldn't become {starttime24} via {time} -> {starttime} + new_value = original_value + new_value = re.sub(r'\{time24\}', '{starttime24}', new_value) + new_value = re.sub(r'\{time\}', '{starttime}', new_value) + + if new_value != original_value: + custom_props[field] = new_value + modified = True + + if modified: + source.custom_properties = custom_props + source.save(update_fields=['custom_properties']) + updated_count += 1 + + if updated_count > 0: + print(f"Migration complete: Updated {updated_count} dummy EPG source(s) with new placeholder names.") + else: + print("No dummy EPG sources needed placeholder updates.") + + +def reverse_migration(apps, schema_editor): + """ + Reverse the migration by replacing {starttime} back to {time}. + """ + EPGSource = apps.get_model('epg', 'EPGSource') + + template_fields = [ + 'title_template', + 'description_template', + 'upcoming_title_template', + 'upcoming_description_template', + 'ended_title_template', + 'ended_description_template', + 'channel_logo_url', + 'program_poster_url', + ] + + dummy_sources = EPGSource.objects.filter(source_type='dummy') + + updated_count = 0 + for source in dummy_sources: + if not source.custom_properties: + continue + + modified = False + custom_props = source.custom_properties.copy() + + for field in template_fields: + if field in custom_props and custom_props[field]: + original_value = custom_props[field] + + # Reverse the replacements + new_value = original_value + new_value = re.sub(r'\{starttime24\}', '{time24}', new_value) + new_value = re.sub(r'\{starttime\}', '{time}', new_value) + + if new_value != original_value: + custom_props[field] = new_value + modified = True + + if modified: + source.custom_properties = custom_props + source.save(update_fields=['custom_properties']) + updated_count += 1 + + if updated_count > 0: + print(f"Reverse migration complete: Reverted {updated_count} dummy EPG source(s) to old placeholder names.") + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0019_alter_programdata_sub_title'), + ] + + operations = [ + migrations.RunPython(migrate_time_placeholders, reverse_migration), + ] diff --git a/apps/epg/models.py b/apps/epg/models.py index da6ac8e6e..e5f3847bd 100644 --- a/apps/epg/models.py +++ b/apps/epg/models.py @@ -8,6 +8,7 @@ class EPGSource(models.Model): SOURCE_TYPE_CHOICES = [ ('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), + ('dummy', 'Custom Dummy EPG'), ] STATUS_IDLE = 'idle' @@ -38,6 +39,12 @@ class EPGSource(models.Model): refresh_task = models.ForeignKey( PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True ) + custom_properties = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)" + ) status = models.CharField( max_length=20, choices=STATUS_CHOICES, @@ -148,7 +155,7 @@ class ProgramData(models.Model): start_time = models.DateTimeField() end_time = models.DateTimeField() title = models.CharField(max_length=255) - sub_title = models.CharField(max_length=255, blank=True, null=True) + sub_title = models.TextField(blank=True, null=True) description = models.TextField(blank=True, null=True) tvg_id = models.CharField(max_length=255, null=True, blank=True) custom_properties = models.JSONField(default=dict, blank=True, null=True) diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index 85186cae8..bfb750fc0 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -4,7 +4,7 @@ from apps.channels.models import Channel class EPGSourceSerializer(serializers.ModelSerializer): - epg_data_ids = serializers.SerializerMethodField() + epg_data_count = serializers.SerializerMethodField() read_only_fields = ['created_at', 'updated_at'] url = serializers.CharField( required=False, @@ -28,11 +28,13 @@ class Meta: 'last_message', 'created_at', 'updated_at', - 'epg_data_ids' + 'custom_properties', + 'epg_data_count' ] - def get_epg_data_ids(self, obj): - return list(obj.epgs.values_list('id', flat=True)) + def get_epg_data_count(self, obj): + """Return the count of EPG data entries instead of all IDs to prevent large payloads""" + return obj.epgs.count() class ProgramDataSerializer(serializers.ModelSerializer): class Meta: diff --git a/apps/epg/signals.py b/apps/epg/signals.py index e8a004cbd..e41d3aaf4 100644 --- a/apps/epg/signals.py +++ b/apps/epg/signals.py @@ -1,9 +1,9 @@ from django.db.models.signals import post_save, post_delete, pre_save from django.dispatch import receiver -from .models import EPGSource +from .models import EPGSource, EPGData from .tasks import refresh_epg_data, delete_epg_refresh_task_by_id from django_celery_beat.models import PeriodicTask, IntervalSchedule -from core.utils import is_protected_path +from core.utils import is_protected_path, send_websocket_update import json import logging import os @@ -12,15 +12,77 @@ @receiver(post_save, sender=EPGSource) def trigger_refresh_on_new_epg_source(sender, instance, created, **kwargs): - # Trigger refresh only if the source is newly created and active - if created and instance.is_active: + # Trigger refresh only if the source is newly created, active, and not a dummy EPG + if created and instance.is_active and instance.source_type != 'dummy': refresh_epg_data.delay(instance.id) +@receiver(post_save, sender=EPGSource) +def create_dummy_epg_data(sender, instance, created, **kwargs): + """ + Automatically create EPGData for dummy EPG sources when they are created. + This allows channels to be assigned to dummy EPGs immediately without + requiring a refresh first. + """ + if instance.source_type == 'dummy': + # Ensure dummy EPGs always have idle status and no status message + if instance.status != EPGSource.STATUS_IDLE or instance.last_message: + instance.status = EPGSource.STATUS_IDLE + instance.last_message = None + instance.save(update_fields=['status', 'last_message']) + + # Create a URL-friendly tvg_id from the dummy EPG name + # Replace spaces and special characters with underscores + friendly_tvg_id = instance.name.replace(' ', '_').replace('-', '_') + # Remove any characters that aren't alphanumeric or underscores + friendly_tvg_id = ''.join(c for c in friendly_tvg_id if c.isalnum() or c == '_') + # Convert to lowercase for consistency + friendly_tvg_id = friendly_tvg_id.lower() + # Prefix with 'dummy_' to make it clear this is a dummy EPG + friendly_tvg_id = f"dummy_{friendly_tvg_id}" + + # Create or update the EPGData record + epg_data, data_created = EPGData.objects.get_or_create( + tvg_id=friendly_tvg_id, + epg_source=instance, + defaults={ + 'name': instance.name, + 'icon_url': None + } + ) + + # Update name if it changed and record already existed + if not data_created and epg_data.name != instance.name: + epg_data.name = instance.name + epg_data.save(update_fields=['name']) + + if data_created: + logger.info(f"Auto-created EPGData for dummy EPG source: {instance.name} (ID: {instance.id})") + + # Send websocket update to notify frontend that EPG data has been created + # This allows the channel form to immediately show the new dummy EPG without refreshing + send_websocket_update('updates', 'update', { + 'type': 'epg_data_created', + 'source_id': instance.id, + 'source_name': instance.name, + 'epg_data_id': epg_data.id + }) + else: + logger.debug(f"EPGData already exists for dummy EPG source: {instance.name} (ID: {instance.id})") + @receiver(post_save, sender=EPGSource) def create_or_update_refresh_task(sender, instance, **kwargs): """ Create or update a Celery Beat periodic task when an EPGSource is created/updated. + Skip creating tasks for dummy EPG sources as they don't need refreshing. """ + # Skip task creation for dummy EPGs + if instance.source_type == 'dummy': + # If there's an existing task, disable it + if instance.refresh_task: + instance.refresh_task.enabled = False + instance.refresh_task.save(update_fields=['enabled']) + return + task_name = f"epg_source-refresh-{instance.id}" interval, _ = IntervalSchedule.objects.get_or_create( every=int(instance.refresh_interval), @@ -80,7 +142,14 @@ def delete_refresh_task(sender, instance, **kwargs): def update_status_on_active_change(sender, instance, **kwargs): """ When an EPGSource's is_active field changes, update the status accordingly. + For dummy EPGs, always ensure status is idle and no status message. """ + # Dummy EPGs should always be idle with no status message + if instance.source_type == 'dummy': + instance.status = EPGSource.STATUS_IDLE + instance.last_message = None + return + if instance.pk: # Only for existing records, not new ones try: # Get the current record from the database diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 0f2af709a..59d658b19 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -24,7 +24,7 @@ from channels.layers import get_channel_layer from .models import EPGSource, EPGData, ProgramData -from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory +from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory, log_system_event logger = logging.getLogger(__name__) @@ -133,8 +133,9 @@ def delete_epg_refresh_task_by_id(epg_id): @shared_task def refresh_all_epg_data(): logger.info("Starting refresh_epg_data task.") - active_sources = EPGSource.objects.filter(is_active=True) - logger.debug(f"Found {active_sources.count()} active EPGSource(s).") + # Exclude dummy EPG sources from refresh - they don't need refreshing + active_sources = EPGSource.objects.filter(is_active=True).exclude(source_type='dummy') + logger.debug(f"Found {active_sources.count()} active EPGSource(s) (excluding dummy EPGs).") for source in active_sources: refresh_epg_data(source.id) @@ -180,6 +181,13 @@ def refresh_epg_data(source_id): gc.collect() return + # Skip refresh for dummy EPG sources - they don't need refreshing + if source.source_type == 'dummy': + logger.info(f"Skipping refresh for dummy EPG source {source.name} (ID: {source_id})") + release_task_lock('refresh_epg_data', source_id) + gc.collect() + return + # Continue with the normal processing... logger.info(f"Processing EPGSource: {source.name} (type: {source.source_type})") if source.source_type == 'xmltv': @@ -875,17 +883,15 @@ def parse_channels_only(source): if process: logger.debug(f"[parse_channels_only] Memory after opening file: {process.memory_info().rss / 1024 / 1024:.2f} MB") - # Use iterparse to find the element + # Change iterparse to look for both channel and programme elements logger.debug(f"Creating iterparse context for channels and programmes") - tv_finder = etree.iterparse(source_file, events=('start',), tag='tv', remove_blank_text=True, recover=True) - _, tv_root = next(tv_finder) + channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True, recover=True) if process: logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB") channel_count = 0 total_elements_processed = 0 # Track total elements processed, not just channels - - for elem in tv_root.iter('channel', 'programme'): + for _, elem in channel_parser: total_elements_processed += 1 # Only process channel elements if elem.tag == 'channel': @@ -1151,6 +1157,12 @@ def parse_programs_for_tvg_id(epg_id): epg = EPGData.objects.get(id=epg_id) epg_source = epg.epg_source + # Skip program parsing for dummy EPG sources - they don't have program data files + if epg_source.source_type == 'dummy': + logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})") + release_task_lock('parse_epg_programs', epg_id) + return + if not Channel.objects.filter(epg_data=epg).exists(): logger.info(f"No channels matched to EPG {epg.tvg_id}") release_task_lock('parse_epg_programs', epg_id) @@ -1244,7 +1256,7 @@ def parse_programs_for_tvg_id(epg_id): source_file = open(file_path, 'rb') # Stream parse the file using lxml's iterparse - program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True) + program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True) for _, elem in program_parser: if elem.get('channel') == epg.tvg_id: @@ -1484,6 +1496,15 @@ def parse_programs_for_source(epg_source, tvg_id=None): epg_source.updated_at = timezone.now() epg_source.save(update_fields=['status', 'last_message', 'updated_at']) + # Log system event for EPG refresh + log_system_event( + event_type='epg_refresh', + source_name=epg_source.name, + programs=program_count, + channels=channel_count, + updated=updated_count, + ) + # Send completion notification with status send_epg_update(epg_source.id, "parsing_programs", 100, status="success", @@ -1945,3 +1966,20 @@ def detect_file_format(file_path=None, content=None): # If we reach here, we couldn't reliably determine the format return format_type, is_compressed, file_extension + + +def generate_dummy_epg(source): + """ + DEPRECATED: This function is no longer used. + + Dummy EPG programs are now generated on-demand when they are requested + (during XMLTV export or EPG grid display), rather than being pre-generated + and stored in the database. + + See: apps/output/views.py - generate_custom_dummy_programs() + + This function remains for backward compatibility but should not be called. + """ + logger.warning(f"generate_dummy_epg() called for {source.name} but this function is deprecated. " + f"Dummy EPG programs are now generated on-demand.") + return True diff --git a/apps/m3u/api_views.py b/apps/m3u/api_views.py index 9c5d5c14d..1f16f20fc 100644 --- a/apps/m3u/api_views.py +++ b/apps/m3u/api_views.py @@ -81,6 +81,13 @@ def create(self, request, *args, **kwargs): account_type = response.data.get("account_type") account_id = response.data.get("id") + # Notify frontend that a new playlist was created + from core.utils import send_websocket_update + send_websocket_update('updates', 'update', { + 'type': 'playlist_created', + 'playlist_id': account_id + }) + if account_type == M3UAccount.Types.XC: refresh_m3u_groups(account_id) @@ -145,6 +152,46 @@ def update(self, request, *args, **kwargs): and not old_vod_enabled and new_vod_enabled ): + # Create Uncategorized categories immediately so they're available in the UI + from apps.vod.models import VODCategory, M3UVODCategoryRelation + + # Create movie Uncategorized category + movie_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="movie", + defaults={} + ) + + # Create series Uncategorized category + series_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="series", + defaults={} + ) + + # Create relations for both categories (disabled by default until first refresh) + account_custom_props = instance.custom_properties or {} + auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True) + + M3UVODCategoryRelation.objects.get_or_create( + category=movie_category, + m3u_account=instance, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + M3UVODCategoryRelation.objects.get_or_create( + category=series_category, + m3u_account=instance, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + # Trigger full VOD refresh from apps.vod.tasks import refresh_vod_content refresh_vod_content.delay(instance.id) diff --git a/apps/m3u/serializers.py b/apps/m3u/serializers.py index 05462d0fc..a607dc07c 100644 --- a/apps/m3u/serializers.py +++ b/apps/m3u/serializers.py @@ -136,6 +136,9 @@ class M3UAccountSerializer(serializers.ModelSerializer): validators=[validate_flexible_url], ) enable_vod = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_live = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_vod = serializers.BooleanField(required=False, write_only=True) + auto_enable_new_groups_series = serializers.BooleanField(required=False, write_only=True) class Meta: model = M3UAccount @@ -164,6 +167,9 @@ class Meta: "status", "last_message", "enable_vod", + "auto_enable_new_groups_live", + "auto_enable_new_groups_vod", + "auto_enable_new_groups_series", ] extra_kwargs = { "password": { @@ -175,23 +181,36 @@ class Meta: def to_representation(self, instance): data = super().to_representation(instance) - # Parse custom_properties to get VOD preference + # Parse custom_properties to get VOD preference and auto_enable_new_groups settings custom_props = instance.custom_properties or {} data["enable_vod"] = custom_props.get("enable_vod", False) + data["auto_enable_new_groups_live"] = custom_props.get("auto_enable_new_groups_live", True) + data["auto_enable_new_groups_vod"] = custom_props.get("auto_enable_new_groups_vod", True) + data["auto_enable_new_groups_series"] = custom_props.get("auto_enable_new_groups_series", True) return data def update(self, instance, validated_data): - # Handle enable_vod preference + # Handle enable_vod preference and auto_enable_new_groups settings enable_vod = validated_data.pop("enable_vod", None) + auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", None) + auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", None) + auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", None) - if enable_vod is not None: - # Get existing custom_properties - custom_props = instance.custom_properties or {} + # Get existing custom_properties + custom_props = instance.custom_properties or {} - # Update VOD preference + # Update preferences + if enable_vod is not None: custom_props["enable_vod"] = enable_vod - validated_data["custom_properties"] = custom_props + if auto_enable_new_groups_live is not None: + custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live + if auto_enable_new_groups_vod is not None: + custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod + if auto_enable_new_groups_series is not None: + custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series + + validated_data["custom_properties"] = custom_props # Pop out channel group memberships so we can handle them manually channel_group_data = validated_data.pop("channel_group", []) @@ -225,14 +244,20 @@ def update(self, instance, validated_data): return instance def create(self, validated_data): - # Handle enable_vod preference during creation + # Handle enable_vod preference and auto_enable_new_groups settings during creation enable_vod = validated_data.pop("enable_vod", False) + auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", True) + auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", True) + auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", True) # Parse existing custom_properties or create new custom_props = validated_data.get("custom_properties", {}) - # Set VOD preference + # Set preferences (default to True for auto_enable_new_groups) custom_props["enable_vod"] = enable_vod + custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live + custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod + custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series validated_data["custom_properties"] = custom_props return super().create(validated_data) diff --git a/apps/m3u/tasks.py b/apps/m3u/tasks.py index b892caef3..cb82402e5 100644 --- a/apps/m3u/tasks.py +++ b/apps/m3u/tasks.py @@ -24,11 +24,13 @@ acquire_task_lock, release_task_lock, natural_sort_key, + log_system_event, ) from core.models import CoreSettings, UserAgent from asgiref.sync import async_to_sync from core.xtream_codes import Client as XCClient from core.utils import send_websocket_update +from .utils import normalize_stream_url logger = logging.getLogger(__name__) @@ -219,6 +221,10 @@ def fetch_m3u_lines(account, use_cache=False): # Has HTTP URLs, might be a simple M3U without headers is_valid_m3u = True logger.info("Content validated as M3U: contains HTTP URLs") + elif any(line.strip().startswith(('rtsp', 'rtp', 'udp')) for line in content_lines): + # Has RTSP/RTP/UDP URLs, might be a simple M3U without headers + is_valid_m3u = True + logger.info("Content validated as M3U: contains RTSP/RTP/UDP URLs") if not is_valid_m3u: # Log what we actually received for debugging @@ -434,25 +440,51 @@ def get_case_insensitive_attr(attributes, key, default=""): def parse_extinf_line(line: str) -> dict: """ Parse an EXTINF line from an M3U file. - This function removes the "#EXTINF:" prefix, then splits the remaining - string on the first comma that is not enclosed in quotes. + This function removes the "#EXTINF:" prefix, then extracts all key="value" attributes, + and treats everything after the last attribute as the display name. Returns a dictionary with: - 'attributes': a dict of attribute key/value pairs (e.g. tvg-id, tvg-logo, group-title) - - 'display_name': the text after the comma (the fallback display name) + - 'display_name': the text after the attributes (the fallback display name) - 'name': the value from tvg-name (if present) or the display name otherwise. """ if not line.startswith("#EXTINF:"): return None content = line[len("#EXTINF:") :].strip() - # Split on the first comma that is not inside quotes. - parts = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', content, maxsplit=1) - if len(parts) != 2: - return None - attributes_part, display_name = parts[0], parts[1].strip() - attrs = dict(re.findall(r'([^\s]+)=["\']([^"\']+)["\']', attributes_part)) - # Use tvg-name attribute if available; otherwise, use the display name. - name = get_case_insensitive_attr(attrs, "tvg-name", display_name) + + # Single pass: extract all attributes AND track the last attribute position + # This regex matches both key="value" and key='value' patterns + attrs = {} + last_attr_end = 0 + + # Use a single regex that handles both quote types + for match in re.finditer(r'([^\s]+)=(["\'])([^\2]*?)\2', content): + key = match.group(1) + value = match.group(3) + attrs[key] = value + last_attr_end = match.end() + + # Everything after the last attribute (skipping leading comma and whitespace) is the display name + if last_attr_end > 0: + remaining = content[last_attr_end:].strip() + # Remove leading comma if present + if remaining.startswith(','): + remaining = remaining[1:].strip() + display_name = remaining + else: + # No attributes found, try the old comma-split method as fallback + parts = content.split(',', 1) + if len(parts) == 2: + display_name = parts[1].strip() + else: + display_name = content.strip() + + # Use tvg-name attribute if available; otherwise try tvc-guide-title, then fall back to display name. + name = get_case_insensitive_attr(attrs, "tvg-name", None) + if not name: + name = get_case_insensitive_attr(attrs, "tvc-guide-title", None) + if not name: + name = display_name return {"attributes": attrs, "display_name": display_name, "name": name} @@ -488,25 +520,29 @@ def process_groups(account, groups): } logger.info(f"Currently {len(existing_groups)} existing groups") - group_objs = [] + # Check if we should auto-enable new groups based on account settings + account_custom_props = account.custom_properties or {} + auto_enable_new_groups_live = account_custom_props.get("auto_enable_new_groups_live", True) + + # Separate existing groups from groups that need to be created + existing_group_objs = [] groups_to_create = [] - for group_name, custom_props in groups.items(): - logger.debug(f"Handling group for M3U account {account.id}: {group_name}") - if group_name not in existing_groups: - groups_to_create.append( - ChannelGroup( - name=group_name, - ) - ) + for group_name, custom_props in groups.items(): + if group_name in existing_groups: + existing_group_objs.append(existing_groups[group_name]) else: - group_objs.append(existing_groups[group_name]) + groups_to_create.append(ChannelGroup(name=group_name)) + # Create new groups and fetch them back with IDs + newly_created_group_objs = [] if groups_to_create: - logger.debug(f"Creating {len(groups_to_create)} groups") - created = ChannelGroup.bulk_create_and_fetch(groups_to_create) - logger.debug(f"Created {len(created)} groups") - group_objs.extend(created) + logger.info(f"Creating {len(groups_to_create)} new groups for account {account.id}") + newly_created_group_objs = list(ChannelGroup.bulk_create_and_fetch(groups_to_create)) + logger.debug(f"Successfully created {len(newly_created_group_objs)} new groups") + + # Combine all groups + all_group_objs = existing_group_objs + newly_created_group_objs # Get existing relationships for this account existing_relationships = { @@ -536,7 +572,7 @@ def process_groups(account, groups): relations_to_delete.append(rel) logger.debug(f"Marking relationship for deletion: group '{group_name}' no longer exists in source for account {account.id}") - for group in group_objs: + for group in all_group_objs: custom_props = groups.get(group.name, {}) if group.name in existing_relationships: @@ -566,35 +602,17 @@ def process_groups(account, groups): else: logger.debug(f"xc_id unchanged for group '{group.name}' - account {account.id}") else: - # Create new relationship - but check if there's an existing relationship that might have user settings - # This can happen if the group was temporarily removed and is now back - try: - potential_existing = ChannelGroupM3UAccount.objects.filter( - m3u_account=account, - channel_group=group - ).first() - - if potential_existing: - # Merge with existing custom properties to preserve user settings - existing_custom_props = potential_existing.custom_properties or {} - - # Merge new properties with existing ones - merged_custom_props = existing_custom_props.copy() - merged_custom_props.update(custom_props) - custom_props = merged_custom_props - logger.debug(f"Merged custom properties for existing relationship: group '{group.name}' - account {account.id}") - except Exception as e: - logger.debug(f"Could not check for existing relationship: {str(e)}") - # Fall back to using just the new custom properties - pass + # Create new relationship - this group is new to this M3U account + # Use the auto_enable setting to determine if it should start enabled + if not auto_enable_new_groups_live: + logger.info(f"Group '{group.name}' is new to account {account.id} - creating relationship but DISABLED (auto_enable_new_groups_live=False)") - # Create new relationship relations_to_create.append( ChannelGroupM3UAccount( channel_group=group, m3u_account=account, custom_properties=custom_props, - enabled=True, # Default to enabled + enabled=auto_enable_new_groups_live, ) ) @@ -908,6 +926,12 @@ def process_m3u_batch_direct(account_id, batch, groups, hash_keys): for stream_info in batch: try: name, url = stream_info["name"], stream_info["url"] + + # Validate URL length - maximum of 4096 characters + if url and len(url) > 4096: + logger.warning(f"Skipping stream '{name}': URL too long ({len(url)} characters, max 4096)") + continue + tvg_id, tvg_logo = get_case_insensitive_attr( stream_info["attributes"], "tvg-id", "" ), get_case_insensitive_attr(stream_info["attributes"], "tvg-logo", "") @@ -1194,52 +1218,14 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): auth_result = xc_client.authenticate() logger.debug(f"Authentication response: {auth_result}") - # Save account information to all active profiles + # Queue async profile refresh task to run in background + # This prevents any delay in the main refresh process try: - from apps.m3u.models import M3UAccountProfile - - profiles = M3UAccountProfile.objects.filter( - m3u_account=account, - is_active=True - ) - - # Update each profile with account information using its own transformed credentials - for profile in profiles: - try: - # Get transformed credentials for this specific profile - profile_url, profile_username, profile_password = get_transformed_credentials(account, profile) - - # Create a separate XC client for this profile's credentials - with XCClient( - profile_url, - profile_username, - profile_password, - user_agent_string - ) as profile_client: - # Authenticate with this profile's credentials - if profile_client.authenticate(): - # Get account information specific to this profile's credentials - profile_account_info = profile_client.get_account_info() - - # Merge with existing custom_properties if they exist - existing_props = profile.custom_properties or {} - existing_props.update(profile_account_info) - profile.custom_properties = existing_props - profile.save(update_fields=['custom_properties']) - - logger.info(f"Updated account information for profile '{profile.name}' with transformed credentials") - else: - logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials") - - except Exception as profile_error: - logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}") - # Continue with other profiles even if one fails - - logger.info(f"Processed account information for {profiles.count()} profiles for account {account.name}") - - except Exception as save_error: - logger.warning(f"Failed to process profile account information: {str(save_error)}") - # Don't fail the whole process if saving account info fails + logger.info(f"Queueing background profile refresh for account {account.name}") + refresh_account_profiles.delay(account.id) + except Exception as e: + logger.warning(f"Failed to queue profile refresh task: {str(e)}") + # Don't fail the main refresh if profile refresh can't be queued except Exception as e: error_msg = f"Failed to authenticate with XC server: {str(e)}" @@ -1381,10 +1367,12 @@ def refresh_m3u_groups(account_id, use_cache=False, full_refresh=False): ) problematic_lines.append((line_index + 1, line[:200])) - elif extinf_data and line.startswith("http"): + elif extinf_data and (line.startswith("http") or line.startswith("rtsp") or line.startswith("rtp") or line.startswith("udp")): url_count += 1 + # Normalize UDP URLs only (e.g., remove VLC-specific @ prefix) + normalized_url = normalize_stream_url(line) if line.startswith("udp") else line # Associate URL with the last EXTINF line - extinf_data[-1]["url"] = line + extinf_data[-1]["url"] = normalized_url valid_stream_count += 1 # Periodically log progress for large files @@ -1562,7 +1550,7 @@ def sync_auto_channels(account_id, scan_start_time=None): # Get force_dummy_epg, group_override, and regex patterns from group custom_properties group_custom_props = {} - force_dummy_epg = False + force_dummy_epg = False # Backward compatibility: legacy option to disable EPG override_group_id = None name_regex_pattern = None name_replace_pattern = None @@ -1571,6 +1559,8 @@ def sync_auto_channels(account_id, scan_start_time=None): channel_sort_order = None channel_sort_reverse = False stream_profile_id = None + custom_logo_id = None + custom_epg_id = None # New option: select specific EPG source (takes priority over force_dummy_epg) if group_relation.custom_properties: group_custom_props = group_relation.custom_properties force_dummy_epg = group_custom_props.get("force_dummy_epg", False) @@ -1581,11 +1571,13 @@ def sync_auto_channels(account_id, scan_start_time=None): ) name_match_regex = group_custom_props.get("name_match_regex") channel_profile_ids = group_custom_props.get("channel_profile_ids") + custom_epg_id = group_custom_props.get("custom_epg_id") channel_sort_order = group_custom_props.get("channel_sort_order") channel_sort_reverse = group_custom_props.get( "channel_sort_reverse", False ) stream_profile_id = group_custom_props.get("stream_profile_id") + custom_logo_id = group_custom_props.get("custom_logo_id") # Determine which group to use for created channels target_group = channel_group @@ -1840,7 +1832,25 @@ def sync_auto_channels(account_id, scan_start_time=None): # Handle logo updates current_logo = None - if stream.logo_url: + if custom_logo_id: + # Use the custom logo specified in group settings + from apps.channels.models import Logo + try: + current_logo = Logo.objects.get(id=custom_logo_id) + except Logo.DoesNotExist: + logger.warning( + f"Custom logo with ID {custom_logo_id} not found for existing channel, falling back to stream logo" + ) + # Fall back to stream logo if custom logo not found + if stream.logo_url: + current_logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + elif stream.logo_url: + # No custom logo configured, use stream logo from apps.channels.models import Logo current_logo, _ = Logo.objects.get_or_create( @@ -1856,10 +1866,42 @@ def sync_auto_channels(account_id, scan_start_time=None): # Handle EPG data updates current_epg_data = None - if stream.tvg_id and not force_dummy_epg: + if custom_epg_id: + # Use the custom EPG specified in group settings (e.g., a dummy EPG) + from apps.epg.models import EPGSource + try: + epg_source = EPGSource.objects.get(id=custom_epg_id) + # For dummy EPGs, select the first (and typically only) EPGData entry from this source + if epg_source.source_type == 'dummy': + current_epg_data = EPGData.objects.filter( + epg_source=epg_source + ).first() + if not current_epg_data: + logger.warning( + f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})" + ) + else: + # For non-dummy sources, try to find existing EPGData by tvg_id + if stream.tvg_id: + current_epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id, + epg_source=epg_source + ).first() + except EPGSource.DoesNotExist: + logger.warning( + f"Custom EPG source with ID {custom_epg_id} not found for existing channel, falling back to auto-match" + ) + # Fall back to auto-match by tvg_id + if stream.tvg_id and not force_dummy_epg: + current_epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + elif stream.tvg_id and not force_dummy_epg: + # Auto-match EPG by tvg_id (original behavior) current_epg_data = EPGData.objects.filter( tvg_id=stream.tvg_id ).first() + # If force_dummy_epg is True and no custom_epg_id, current_epg_data stays None if existing_channel.epg_data != current_epg_data: existing_channel.epg_data = current_epg_data @@ -1949,19 +1991,81 @@ def sync_auto_channels(account_id, scan_start_time=None): ChannelProfileMembership.objects.bulk_create(memberships) # Try to match EPG data - if stream.tvg_id and not force_dummy_epg: + if custom_epg_id: + # Use the custom EPG specified in group settings (e.g., a dummy EPG) + from apps.epg.models import EPGSource + try: + epg_source = EPGSource.objects.get(id=custom_epg_id) + # For dummy EPGs, select the first (and typically only) EPGData entry from this source + if epg_source.source_type == 'dummy': + epg_data = EPGData.objects.filter( + epg_source=epg_source + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + else: + logger.warning( + f"No EPGData found for dummy EPG source {epg_source.name} (ID: {custom_epg_id})" + ) + else: + # For non-dummy sources, try to find existing EPGData by tvg_id + if stream.tvg_id: + epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id, + epg_source=epg_source + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + except EPGSource.DoesNotExist: + logger.warning( + f"Custom EPG source with ID {custom_epg_id} not found, falling back to auto-match" + ) + # Fall back to auto-match by tvg_id + if stream.tvg_id and not force_dummy_epg: + epg_data = EPGData.objects.filter( + tvg_id=stream.tvg_id + ).first() + if epg_data: + channel.epg_data = epg_data + channel.save(update_fields=["epg_data"]) + elif stream.tvg_id and not force_dummy_epg: + # Auto-match EPG by tvg_id (original behavior) epg_data = EPGData.objects.filter( tvg_id=stream.tvg_id ).first() if epg_data: channel.epg_data = epg_data channel.save(update_fields=["epg_data"]) - elif stream.tvg_id and force_dummy_epg: + elif force_dummy_epg: + # Force dummy EPG with no custom EPG selected (set to None) channel.epg_data = None channel.save(update_fields=["epg_data"]) # Handle logo - if stream.logo_url: + if custom_logo_id: + # Use the custom logo specified in group settings + from apps.channels.models import Logo + try: + custom_logo = Logo.objects.get(id=custom_logo_id) + channel.logo = custom_logo + channel.save(update_fields=["logo"]) + except Logo.DoesNotExist: + logger.warning( + f"Custom logo with ID {custom_logo_id} not found, falling back to stream logo" + ) + # Fall back to stream logo if custom logo not found + if stream.logo_url: + logo, _ = Logo.objects.get_or_create( + url=stream.logo_url, + defaults={ + "name": stream.name or stream.tvg_id or "Unknown" + }, + ) + channel.logo = logo + channel.save(update_fields=["logo"]) + elif stream.logo_url: from apps.channels.models import Logo logo, _ = Logo.objects.get_or_create( @@ -2128,6 +2232,106 @@ def get_transformed_credentials(account, profile=None): return base_url, base_username, base_password +@shared_task +def refresh_account_profiles(account_id): + """Refresh account information for all active profiles of an XC account. + + This task runs asynchronously in the background after account refresh completes. + It includes rate limiting delays between profile authentications to prevent provider bans. + """ + from django.conf import settings + import time + + try: + account = M3UAccount.objects.get(id=account_id, is_active=True) + + if account.account_type != M3UAccount.Types.XC: + logger.debug(f"Account {account_id} is not XC type, skipping profile refresh") + return f"Account {account_id} is not an XtreamCodes account" + + from apps.m3u.models import M3UAccountProfile + + profiles = M3UAccountProfile.objects.filter( + m3u_account=account, + is_active=True + ) + + if not profiles.exists(): + logger.info(f"No active profiles found for account {account.name}") + return f"No active profiles for account {account_id}" + + # Get user agent for this account + try: + user_agent_string = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + if account.user_agent_id: + from core.models import UserAgent + ua_obj = UserAgent.objects.get(id=account.user_agent_id) + if ua_obj and hasattr(ua_obj, "user_agent") and ua_obj.user_agent: + user_agent_string = ua_obj.user_agent + except Exception as e: + logger.warning(f"Error getting user agent, using fallback: {str(e)}") + logger.debug(f"Using user agent for profile refresh: {user_agent_string}") + # Get rate limiting delay from settings + profile_delay = getattr(settings, 'XC_PROFILE_REFRESH_DELAY', 2.5) + + profiles_updated = 0 + profiles_failed = 0 + + logger.info(f"Starting background refresh for {profiles.count()} profiles of account {account.name}") + + for idx, profile in enumerate(profiles): + try: + # Add delay between profiles to prevent rate limiting (except for first profile) + if idx > 0: + logger.info(f"Waiting {profile_delay}s before refreshing next profile to avoid rate limiting") + time.sleep(profile_delay) + + # Get transformed credentials for this specific profile + profile_url, profile_username, profile_password = get_transformed_credentials(account, profile) + + # Create a separate XC client for this profile's credentials + with XCClient( + profile_url, + profile_username, + profile_password, + user_agent_string + ) as profile_client: + # Authenticate with this profile's credentials + if profile_client.authenticate(): + # Get account information specific to this profile's credentials + profile_account_info = profile_client.get_account_info() + + # Merge with existing custom_properties if they exist + existing_props = profile.custom_properties or {} + existing_props.update(profile_account_info) + profile.custom_properties = existing_props + profile.save(update_fields=['custom_properties']) + + profiles_updated += 1 + logger.info(f"Updated account information for profile '{profile.name}' ({profiles_updated}/{profiles.count()})") + else: + profiles_failed += 1 + logger.warning(f"Failed to authenticate profile '{profile.name}' with transformed credentials") + + except Exception as profile_error: + profiles_failed += 1 + logger.error(f"Failed to update account information for profile '{profile.name}': {str(profile_error)}") + # Continue with other profiles even if one fails + + result_msg = f"Profile refresh complete for account {account.name}: {profiles_updated} updated, {profiles_failed} failed" + logger.info(result_msg) + return result_msg + + except M3UAccount.DoesNotExist: + error_msg = f"Account {account_id} not found" + logger.error(error_msg) + return error_msg + except Exception as e: + error_msg = f"Error refreshing profiles for account {account_id}: {str(e)}" + logger.error(error_msg) + return error_msg + + @shared_task def refresh_account_info(profile_id): """Refresh only the account information for a specific M3U profile.""" @@ -2523,76 +2727,75 @@ def refresh_single_m3u_account(account_id): if not all_xc_streams: logger.warning("No streams collected from XC groups") - return f"No streams found for XC account {account_id}", None - - # Now batch by stream count (like standard M3U processing) - batches = [ - all_xc_streams[i : i + BATCH_SIZE] - for i in range(0, len(all_xc_streams), BATCH_SIZE) - ] - - logger.info(f"Processing {len(all_xc_streams)} XC streams in {len(batches)} batches") - - # Use threading for XC stream processing - now with consistent batch sizes - max_workers = min(4, len(batches)) - logger.debug(f"Using {max_workers} threads for XC stream processing") - - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Submit stream batch processing tasks (reuse standard M3U processing) - future_to_batch = { - executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i - for i, batch in enumerate(batches) - } - - completed_batches = 0 - total_batches = len(batches) + else: + # Now batch by stream count (like standard M3U processing) + batches = [ + all_xc_streams[i : i + BATCH_SIZE] + for i in range(0, len(all_xc_streams), BATCH_SIZE) + ] + + logger.info(f"Processing {len(all_xc_streams)} XC streams in {len(batches)} batches") + + # Use threading for XC stream processing - now with consistent batch sizes + max_workers = min(4, len(batches)) + logger.debug(f"Using {max_workers} threads for XC stream processing") + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit stream batch processing tasks (reuse standard M3U processing) + future_to_batch = { + executor.submit(process_m3u_batch_direct, account_id, batch, existing_groups, hash_keys): i + for i, batch in enumerate(batches) + } - # Process completed batches as they finish - for future in as_completed(future_to_batch): - batch_idx = future_to_batch[future] - try: - result = future.result() - completed_batches += 1 + completed_batches = 0 + total_batches = len(batches) - # Extract stream counts from result - if isinstance(result, str): - try: - created_match = re.search(r"(\d+) created", result) - updated_match = re.search(r"(\d+) updated", result) - if created_match and updated_match: - created_count = int(created_match.group(1)) - updated_count = int(updated_match.group(1)) - streams_created += created_count - streams_updated += updated_count - except (AttributeError, ValueError): - pass + # Process completed batches as they finish + for future in as_completed(future_to_batch): + batch_idx = future_to_batch[future] + try: + result = future.result() + completed_batches += 1 - # Send progress update - progress = int((completed_batches / total_batches) * 100) - current_elapsed = time.time() - start_time + # Extract stream counts from result + if isinstance(result, str): + try: + created_match = re.search(r"(\d+) created", result) + updated_match = re.search(r"(\d+) updated", result) + if created_match and updated_match: + created_count = int(created_match.group(1)) + updated_count = int(updated_match.group(1)) + streams_created += created_count + streams_updated += updated_count + except (AttributeError, ValueError): + pass + + # Send progress update + progress = int((completed_batches / total_batches) * 100) + current_elapsed = time.time() - start_time - if progress > 0: - estimated_total = (current_elapsed / progress) * 100 - time_remaining = max(0, estimated_total - current_elapsed) - else: - time_remaining = 0 + if progress > 0: + estimated_total = (current_elapsed / progress) * 100 + time_remaining = max(0, estimated_total - current_elapsed) + else: + time_remaining = 0 - send_m3u_update( - account_id, - "parsing", - progress, - elapsed_time=current_elapsed, - time_remaining=time_remaining, - streams_processed=streams_created + streams_updated, - ) + send_m3u_update( + account_id, + "parsing", + progress, + elapsed_time=current_elapsed, + time_remaining=time_remaining, + streams_processed=streams_created + streams_updated, + ) - logger.debug(f"XC thread batch {completed_batches}/{total_batches} completed") + logger.debug(f"XC thread batch {completed_batches}/{total_batches} completed") - except Exception as e: - logger.error(f"Error in XC thread batch {batch_idx}: {str(e)}") - completed_batches += 1 # Still count it to avoid hanging + except Exception as e: + logger.error(f"Error in XC thread batch {batch_idx}: {str(e)}") + completed_batches += 1 # Still count it to avoid hanging - logger.info(f"XC thread-based processing completed for account {account_id}") + logger.info(f"XC thread-based processing completed for account {account_id}") # Ensure all database transactions are committed before cleanup logger.info( @@ -2638,6 +2841,17 @@ def refresh_single_m3u_account(account_id): account.updated_at = timezone.now() account.save(update_fields=["status", "last_message", "updated_at"]) + # Log system event for M3U refresh + log_system_event( + event_type='m3u_refresh', + account_name=account.name, + elapsed_time=round(elapsed_time, 2), + streams_created=streams_created, + streams_updated=streams_updated, + streams_deleted=streams_deleted, + total_processed=streams_processed, + ) + # Send final update with complete metrics and explicitly include success status send_m3u_update( account_id, @@ -2673,7 +2887,16 @@ def refresh_single_m3u_account(account_id): release_task_lock("refresh_single_m3u_account", account_id) # Aggressive garbage collection - del existing_groups, extinf_data, groups, batches + # Only delete variables if they exist + if 'existing_groups' in locals(): + del existing_groups + if 'extinf_data' in locals(): + del extinf_data + if 'groups' in locals(): + del groups + if 'batches' in locals(): + del batches + from core.utils import cleanup_memory cleanup_memory(log_usage=True, force_collection=True) diff --git a/apps/m3u/utils.py b/apps/m3u/utils.py index 4e1027b2e..598ef7134 100644 --- a/apps/m3u/utils.py +++ b/apps/m3u/utils.py @@ -8,6 +8,34 @@ active_streams_map = {} logger = logging.getLogger(__name__) + +def normalize_stream_url(url): + """ + Normalize stream URLs for compatibility with FFmpeg. + + Handles VLC-specific syntax like udp://@239.0.0.1:1234 by removing the @ symbol. + FFmpeg doesn't recognize the @ prefix for multicast addresses. + + Args: + url (str): The stream URL to normalize + + Returns: + str: The normalized URL + """ + if not url: + return url + + # Handle VLC-style UDP multicast URLs: udp://@239.0.0.1:1234 -> udp://239.0.0.1:1234 + # The @ symbol in VLC means "listen on all interfaces" but FFmpeg doesn't use this syntax + if url.startswith('udp://@'): + normalized = url.replace('udp://@', 'udp://', 1) + logger.debug(f"Normalized VLC-style UDP URL: {url} -> {normalized}") + return normalized + + # Could add other normalizations here in the future (rtp://@, etc.) + return url + + def increment_stream_count(account): with lock: current_usage = active_streams_map.get(account.id, 0) diff --git a/apps/output/tests.py b/apps/output/tests.py index e1e857eec..f87c83406 100644 --- a/apps/output/tests.py +++ b/apps/output/tests.py @@ -14,3 +14,26 @@ def test_generate_m3u_response(self): self.assertEqual(response.status_code, 200) content = response.content.decode() self.assertIn("#EXTM3U", content) + + def test_generate_m3u_response_post_empty_body(self): + """ + Test that a POST request with an empty body returns 200 OK. + """ + url = reverse('output:generate_m3u') + + response = self.client.post(url, data=None, content_type='application/x-www-form-urlencoded') + content = response.content.decode() + + self.assertEqual(response.status_code, 200, "POST with empty body should return 200 OK") + self.assertIn("#EXTM3U", content) + + def test_generate_m3u_response_post_with_body(self): + """ + Test that a POST request with a non-empty body returns 403 Forbidden. + """ + url = reverse('output:generate_m3u') + + response = self.client.post(url, data={'evilstring': 'muhahaha'}) + + self.assertEqual(response.status_code, 403, "POST with body should return 403 Forbidden") + self.assertIn("POST requests with body are not allowed, body is:", response.content.decode()) diff --git a/apps/output/views.py b/apps/output/views.py index 1666013de..bc2bace5f 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -9,7 +9,7 @@ from apps.accounts.models import User from core.models import CoreSettings, NETWORK_ACCESS from dispatcharr.utils import network_access_allowed -from django.utils import timezone +from django.utils import timezone as django_timezone from django.shortcuts import get_object_or_404 from datetime import datetime, timedelta import html # Add this import for XML escaping @@ -22,68 +22,157 @@ from django.db.models.functions import Lower import os from apps.m3u.utils import calculate_tuner_count +import regex +from core.utils import log_system_event +import hashlib logger = logging.getLogger(__name__) +def get_client_identifier(request): + """Get client information including IP, user agent, and a unique hash identifier + + Returns: + tuple: (client_id_hash, client_ip, user_agent) + """ + # Get client IP (handle proxies) + x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') + if x_forwarded_for: + client_ip = x_forwarded_for.split(',')[0].strip() + else: + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + + # Get user agent + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + + # Create a hash for a shorter cache key + client_str = f"{client_ip}:{user_agent}" + client_id_hash = hashlib.md5(client_str.encode()).hexdigest()[:12] + + return client_id_hash, client_ip, user_agent + def m3u_endpoint(request, profile_name=None, user=None): + logger.debug("m3u_endpoint called: method=%s, profile=%s", request.method, profile_name) if not network_access_allowed(request, "M3U_EPG"): + # Log blocked M3U download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='m3u_blocked', + profile=profile_name or 'all', + reason='Network access denied', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({"error": "Forbidden"}, status=403) + # Handle HEAD requests efficiently without generating content + if request.method == "HEAD": + logger.debug("Handling HEAD request for M3U") + response = HttpResponse(content_type="audio/x-mpegurl") + response["Content-Disposition"] = 'attachment; filename="channels.m3u"' + return response + return generate_m3u(request, profile_name, user) def epg_endpoint(request, profile_name=None, user=None): + logger.debug("epg_endpoint called: method=%s, profile=%s", request.method, profile_name) if not network_access_allowed(request, "M3U_EPG"): + # Log blocked EPG download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='epg_blocked', + profile=profile_name or 'all', + reason='Network access denied', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({"error": "Forbidden"}, status=403) + # Handle HEAD requests efficiently without generating content + if request.method == "HEAD": + logger.debug("Handling HEAD request for EPG") + response = HttpResponse(content_type="application/xml") + response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' + response["Cache-Control"] = "no-cache" + return response + return generate_epg(request, profile_name, user) @csrf_exempt -@require_http_methods(["GET", "POST"]) +@require_http_methods(["GET", "POST", "HEAD"]) def generate_m3u(request, profile_name=None, user=None): """ Dynamically generate an M3U file from channels. The stream URL now points to the new stream_view that uses StreamProfile. Supports both GET and POST methods for compatibility with IPTVSmarters. """ + # Check if this is a POST request and the body is not empty (which we don't want to allow) + logger.debug("Generating M3U for profile: %s, user: %s, method: %s", profile_name, user.username if user else "Anonymous", request.method) + + # Check cache for recent identical request (helps with double-GET from browsers) + from django.core.cache import cache + cache_params = f"{profile_name or 'all'}:{user.username if user else 'anonymous'}:{request.GET.urlencode()}" + content_cache_key = f"m3u_content:{cache_params}" + + cached_content = cache.get(content_cache_key) + if cached_content: + logger.debug("Serving M3U from cache") + response = HttpResponse(cached_content, content_type="audio/x-mpegurl") + response["Content-Disposition"] = 'attachment; filename="channels.m3u"' + return response # Check if this is a POST request with data (which we don't want to allow) if request.method == "POST" and request.body: - return HttpResponseForbidden("POST requests with content are not allowed") + if request.body.decode() != '{}': + return HttpResponseForbidden("POST requests with body are not allowed, body is: {}".format(request.body.decode())) if user is not None: if user.user_level == 0: - filters = { - "channelprofilemembership__enabled": True, - "user_level__lte": user.user_level, - } + user_profile_count = user.channel_profiles.count() - if user.channel_profiles.count() != 0: - channel_profiles = user.channel_profiles.all() - filters["channelprofilemembership__channel_profile__in"] = ( - channel_profiles - ) - - channels = Channel.objects.filter(**filters).order_by("channel_number") + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") else: channels = Channel.objects.filter(user_level__lte=user.user_level).order_by( "channel_number" ) - - if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) - channels = Channel.objects.filter( - channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True - ).order_by('channel_number') else: if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) + try: + channel_profile = ChannelProfile.objects.get(name=profile_name) + except ChannelProfile.DoesNotExist: + logger.warning("Requested channel profile (%s) during m3u generation does not exist", profile_name) + raise Http404(f"Channel profile '{profile_name}' not found") channels = Channel.objects.filter( channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True, - ).order_by("channel_number") + channelprofilemembership__enabled=True + ).order_by('channel_number') else: - channels = Channel.objects.order_by("channel_number") + if profile_name is not None: + try: + channel_profile = ChannelProfile.objects.get(name=profile_name) + except ChannelProfile.DoesNotExist: + logger.warning("Requested channel profile (%s) during m3u generation does not exist", profile_name) + raise Http404(f"Channel profile '{profile_name}' not found") + channels = Channel.objects.filter( + channelprofilemembership__channel_profile=channel_profile, + channelprofilemembership__enabled=True, + ).order_by("channel_number") + else: + channels = Channel.objects.order_by("channel_number") # Check if the request wants to use direct logo URLs instead of cache use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' @@ -163,7 +252,7 @@ def generate_m3u(request, profile_name=None, user=None): # Determine the stream URL based on the direct parameter if use_direct_urls: # Try to get the first stream's direct URL - first_stream = channel.streams.first() + first_stream = channel.streams.order_by('channelstream__order').first() if first_stream and first_stream.url: # Use the direct stream URL stream_url = first_stream.url @@ -178,17 +267,127 @@ def generate_m3u(request, profile_name=None, user=None): m3u_content += extinf_line + stream_url + "\n" + # Cache the generated content for 2 seconds to handle double-GET requests + cache.set(content_cache_key, m3u_content, 2) + + # Log system event for M3U download (with deduplication based on client) + client_id, client_ip, user_agent = get_client_identifier(request) + event_cache_key = f"m3u_download:{user.username if user else 'anonymous'}:{profile_name or 'all'}:{client_id}" + if not cache.get(event_cache_key): + log_system_event( + event_type='m3u_download', + profile=profile_name or 'all', + user=user.username if user else 'anonymous', + channels=channels.count(), + client_ip=client_ip, + user_agent=user_agent, + ) + cache.set(event_cache_key, True, 2) # Prevent duplicate events for 2 seconds + response = HttpResponse(m3u_content, content_type="audio/x-mpegurl") response["Content-Disposition"] = 'attachment; filename="channels.m3u"' return response -def generate_dummy_programs(channel_id, channel_name, num_days=1, program_length_hours=4): +def generate_fallback_programs(channel_id, channel_name, now, num_days, program_length_hours, fallback_title, fallback_description): + """ + Generate dummy programs using custom fallback templates when patterns don't match. + + Args: + channel_id: Channel ID for the programs + channel_name: Channel name to use as fallback in templates + now: Current datetime (in UTC) + num_days: Number of days to generate programs for + program_length_hours: Length of each program in hours + fallback_title: Custom fallback title template (empty string if not provided) + fallback_description: Custom fallback description template (empty string if not provided) + + Returns: + List of program dictionaries + """ + programs = [] + + # Use custom fallback title or channel name as default + title = fallback_title if fallback_title else channel_name + + # Use custom fallback description or a simple default message + if fallback_description: + description = fallback_description + else: + description = f"EPG information is currently unavailable for {channel_name}" + + # Create programs for each day + for day in range(num_days): + day_start = now + timedelta(days=day) + + # Create programs with specified length throughout the day + for hour_offset in range(0, 24, program_length_hours): + # Calculate program start and end times + start_time = day_start + timedelta(hours=hour_offset) + end_time = start_time + timedelta(hours=program_length_hours) + + programs.append({ + "channel_id": channel_id, + "start_time": start_time, + "end_time": end_time, + "title": title, + "description": description, + }) + + return programs + + +def generate_dummy_programs(channel_id, channel_name, num_days=1, program_length_hours=4, epg_source=None): + """ + Generate dummy EPG programs for channels. + + If epg_source is provided and it's a custom dummy EPG with patterns, + use those patterns to generate programs from the channel title. + Otherwise, generate default dummy programs. + + Args: + channel_id: Channel ID for the programs + channel_name: Channel title/name + num_days: Number of days to generate programs for + program_length_hours: Length of each program in hours + epg_source: Optional EPGSource for custom dummy EPG with patterns + + Returns: + List of program dictionaries + """ # Get current time rounded to hour - now = timezone.now() + now = django_timezone.now() now = now.replace(minute=0, second=0, microsecond=0) - # Humorous program descriptions based on time of day + # Check if this is a custom dummy EPG with regex patterns + if epg_source and epg_source.source_type == 'dummy' and epg_source.custom_properties: + custom_programs = generate_custom_dummy_programs( + channel_id, channel_name, now, num_days, + epg_source.custom_properties + ) + # If custom generation succeeded, return those programs + # If it returned empty (pattern didn't match), check for custom fallback templates + if custom_programs: + return custom_programs + else: + logger.info(f"Custom pattern didn't match for '{channel_name}', checking for custom fallback templates") + + # Check if custom fallback templates are provided + custom_props = epg_source.custom_properties + fallback_title = custom_props.get('fallback_title_template', '').strip() + fallback_description = custom_props.get('fallback_description_template', '').strip() + + # If custom fallback templates exist, use them instead of default + if fallback_title or fallback_description: + logger.info(f"Using custom fallback templates for '{channel_name}'") + return generate_fallback_programs( + channel_id, channel_name, now, num_days, + program_length_hours, fallback_title, fallback_description + ) + else: + logger.info(f"No custom fallback templates found, using default dummy EPG") + + # Default humorous program descriptions based on time of day time_descriptions = { (0, 4): [ f"Late Night with {channel_name} - Where insomniacs unite!", @@ -260,6 +459,720 @@ def generate_dummy_programs(channel_id, channel_name, num_days=1, program_length return programs +def generate_custom_dummy_programs(channel_id, channel_name, now, num_days, custom_properties): + """ + Generate programs using custom dummy EPG regex patterns. + + Extracts information from channel title using regex patterns and generates + programs based on the extracted data. + + TIMEZONE HANDLING: + ------------------ + The timezone parameter specifies the timezone of the event times in your channel + titles using standard timezone names (e.g., 'US/Eastern', 'US/Pacific', 'Europe/London'). + DST (Daylight Saving Time) is handled automatically by pytz. + + Examples: + - Channel: "NHL 01: Bruins VS Maple Leafs @ 8:00PM ET" + - Set timezone = "US/Eastern" + - In October (DST): 8:00PM EDT → 12:00AM UTC (automatically uses UTC-4) + - In January (no DST): 8:00PM EST → 1:00AM UTC (automatically uses UTC-5) + + Args: + channel_id: Channel ID for the programs + channel_name: Channel title to parse + now: Current datetime (in UTC) + num_days: Number of days to generate programs for + custom_properties: Dict with title_pattern, time_pattern, templates, etc. + - timezone: Timezone name (e.g., 'US/Eastern') + + Returns: + List of program dictionaries with start_time/end_time in UTC + """ + import pytz + + logger.info(f"Generating custom dummy programs for channel: {channel_name}") + + # Extract patterns from custom properties + title_pattern = custom_properties.get('title_pattern', '') + time_pattern = custom_properties.get('time_pattern', '') + date_pattern = custom_properties.get('date_pattern', '') + + # Get timezone name (e.g., 'US/Eastern', 'US/Pacific', 'Europe/London') + timezone_value = custom_properties.get('timezone', 'UTC') + output_timezone_value = custom_properties.get('output_timezone', '') # Optional: display times in different timezone + program_duration = custom_properties.get('program_duration', 180) # Minutes + title_template = custom_properties.get('title_template', '') + description_template = custom_properties.get('description_template', '') + + # Templates for upcoming/ended programs + upcoming_title_template = custom_properties.get('upcoming_title_template', '') + upcoming_description_template = custom_properties.get('upcoming_description_template', '') + ended_title_template = custom_properties.get('ended_title_template', '') + ended_description_template = custom_properties.get('ended_description_template', '') + + # Image URL templates + channel_logo_url_template = custom_properties.get('channel_logo_url', '') + program_poster_url_template = custom_properties.get('program_poster_url', '') + + # EPG metadata options + category_string = custom_properties.get('category', '') + # Split comma-separated categories and strip whitespace, filter out empty strings + categories = [cat.strip() for cat in category_string.split(',') if cat.strip()] if category_string else [] + include_date = custom_properties.get('include_date', True) + include_live = custom_properties.get('include_live', False) + include_new = custom_properties.get('include_new', False) + + # Parse timezone name + try: + source_tz = pytz.timezone(timezone_value) + logger.debug(f"Using timezone: {timezone_value} (DST will be handled automatically)") + except pytz.exceptions.UnknownTimeZoneError: + logger.warning(f"Unknown timezone: {timezone_value}, defaulting to UTC") + source_tz = pytz.utc + + # Parse output timezone if provided (for display purposes) + output_tz = None + if output_timezone_value: + try: + output_tz = pytz.timezone(output_timezone_value) + logger.debug(f"Using output timezone for display: {output_timezone_value}") + except pytz.exceptions.UnknownTimeZoneError: + logger.warning(f"Unknown output timezone: {output_timezone_value}, will use source timezone") + output_tz = None + + if not title_pattern: + logger.warning(f"No title_pattern in custom_properties, falling back to default") + return [] # Return empty, will use default + + logger.debug(f"Title pattern from DB: {repr(title_pattern)}") + + # Convert PCRE/JavaScript named groups (?) to Python format (?P) + # This handles patterns created with JavaScript regex syntax + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', title_pattern) + logger.debug(f"Converted title pattern: {repr(title_pattern)}") + + # Compile regex patterns using the enhanced regex module + # (supports variable-width lookbehinds like JavaScript) + try: + title_regex = regex.compile(title_pattern) + except Exception as e: + logger.error(f"Invalid title regex pattern after conversion: {e}") + logger.error(f"Pattern was: {repr(title_pattern)}") + return [] + + time_regex = None + if time_pattern: + # Convert PCRE/JavaScript named groups to Python format + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', time_pattern) + logger.debug(f"Converted time pattern: {repr(time_pattern)}") + try: + time_regex = regex.compile(time_pattern) + except Exception as e: + logger.warning(f"Invalid time regex pattern after conversion: {e}") + logger.warning(f"Pattern was: {repr(time_pattern)}") + + # Compile date regex if provided + date_regex = None + if date_pattern: + # Convert PCRE/JavaScript named groups to Python format + # Use negative lookahead to avoid matching lookbehind (?<=) and negative lookbehind (?]+)>', r'(?P<\1>', date_pattern) + logger.debug(f"Converted date pattern: {repr(date_pattern)}") + try: + date_regex = regex.compile(date_pattern) + except Exception as e: + logger.warning(f"Invalid date regex pattern after conversion: {e}") + logger.warning(f"Pattern was: {repr(date_pattern)}") + + # Try to match the channel name with the title pattern + # Use search() instead of match() to match JavaScript behavior where .match() searches anywhere in the string + title_match = title_regex.search(channel_name) + if not title_match: + logger.debug(f"Channel name '{channel_name}' doesn't match title pattern") + return [] # Return empty, will use default + + groups = title_match.groupdict() + logger.debug(f"Title pattern matched. Groups: {groups}") + + # Helper function to format template with matched groups + def format_template(template, groups, url_encode=False): + """Replace {groupname} placeholders with matched group values + + Args: + template: Template string with {groupname} placeholders + groups: Dict of group names to values + url_encode: If True, URL encode the group values for safe use in URLs + """ + if not template: + return '' + result = template + for key, value in groups.items(): + if url_encode and value: + # URL encode the value to handle spaces and special characters + from urllib.parse import quote + encoded_value = quote(str(value), safe='') + result = result.replace(f'{{{key}}}', encoded_value) + else: + result = result.replace(f'{{{key}}}', str(value) if value else '') + return result + + # Extract time from title if time pattern exists + time_info = None + time_groups = {} + if time_regex: + time_match = time_regex.search(channel_name) + if time_match: + time_groups = time_match.groupdict() + try: + hour = int(time_groups.get('hour')) + # Handle optional minute group - could be None if not captured + minute_value = time_groups.get('minute') + minute = int(minute_value) if minute_value is not None else 0 + ampm = time_groups.get('ampm') + ampm = ampm.lower() if ampm else None + + # Determine if this is 12-hour or 24-hour format + if ampm in ('am', 'pm'): + # 12-hour format: convert to 24-hour + if ampm == 'pm' and hour != 12: + hour += 12 + elif ampm == 'am' and hour == 12: + hour = 0 + logger.debug(f"Extracted time (12-hour): {hour}:{minute:02d} {ampm}") + else: + # 24-hour format: hour is already in 24-hour format + # Validate that it's actually a 24-hour time (0-23) + if hour > 23: + logger.warning(f"Invalid 24-hour time: {hour}. Must be 0-23.") + hour = hour % 24 # Wrap around just in case + logger.debug(f"Extracted time (24-hour): {hour}:{minute:02d}") + + time_info = {'hour': hour, 'minute': minute} + except (ValueError, TypeError) as e: + logger.warning(f"Error parsing time: {e}") + + # Extract date from title if date pattern exists + date_info = None + date_groups = {} + if date_regex: + date_match = date_regex.search(channel_name) + if date_match: + date_groups = date_match.groupdict() + try: + # Support various date group names: month, day, year + month_str = date_groups.get('month', '') + day_str = date_groups.get('day', '') + year_str = date_groups.get('year', '') + + # Parse day - default to current day if empty or invalid + day = int(day_str) if day_str else now.day + + # Parse year - default to current year if empty or invalid (matches frontend behavior) + year = int(year_str) if year_str else now.year + + # Parse month - can be numeric (1-12) or text (Jan, January, etc.) + month = None + if month_str: + if month_str.isdigit(): + month = int(month_str) + else: + # Try to parse text month names + import calendar + month_str_lower = month_str.lower() + # Check full month names + for i, month_name in enumerate(calendar.month_name): + if month_name.lower() == month_str_lower: + month = i + break + # Check abbreviated month names if not found + if month is None: + for i, month_abbr in enumerate(calendar.month_abbr): + if month_abbr.lower() == month_str_lower: + month = i + break + + # Default to current month if not extracted or invalid + if month is None: + month = now.month + + if month and 1 <= month <= 12 and 1 <= day <= 31: + date_info = {'year': year, 'month': month, 'day': day} + logger.debug(f"Extracted date: {year}-{month:02d}-{day:02d}") + else: + logger.warning(f"Invalid date values: month={month}, day={day}, year={year}") + except (ValueError, TypeError) as e: + logger.warning(f"Error parsing date: {e}") + + # Merge title groups, time groups, and date groups for template formatting + all_groups = {**groups, **time_groups, **date_groups} + + # Add normalized versions of all groups for cleaner URLs + # These remove all non-alphanumeric characters and convert to lowercase + for key, value in list(all_groups.items()): + if value: + # Remove all non-alphanumeric characters (except spaces temporarily) + # then replace spaces with nothing, and convert to lowercase + normalized = regex.sub(r'[^a-zA-Z0-9\s]', '', str(value)) + normalized = regex.sub(r'\s+', '', normalized).lower() + all_groups[f'{key}_normalize'] = normalized + + # Format channel logo URL if template provided (with URL encoding) + channel_logo_url = None + if channel_logo_url_template: + channel_logo_url = format_template(channel_logo_url_template, all_groups, url_encode=True) + logger.debug(f"Formatted channel logo URL: {channel_logo_url}") + + # Format program poster URL if template provided (with URL encoding) + program_poster_url = None + if program_poster_url_template: + program_poster_url = format_template(program_poster_url_template, all_groups, url_encode=True) + logger.debug(f"Formatted program poster URL: {program_poster_url}") + + # Add formatted time strings for better display (handles minutes intelligently) + if time_info: + hour_24 = time_info['hour'] + minute = time_info['minute'] + + # Determine the base date to use for placeholders + # If date was extracted, use it; otherwise use current date + if date_info: + base_date = datetime(date_info['year'], date_info['month'], date_info['day']) + else: + base_date = datetime.now() + + # If output_timezone is specified, convert the display time to that timezone + if output_tz: + # Create a datetime in the source timezone using the base date + temp_date = source_tz.localize(base_date.replace(hour=hour_24, minute=minute, second=0, microsecond=0)) + # Convert to output timezone + temp_date_output = temp_date.astimezone(output_tz) + # Extract converted hour and minute for display + hour_24 = temp_date_output.hour + minute = temp_date_output.minute + logger.debug(f"Converted display time from {source_tz} to {output_tz}: {hour_24}:{minute:02d}") + + # Add date placeholders based on the OUTPUT timezone + # This ensures {date}, {month}, {day}, {year} reflect the converted timezone + all_groups['date'] = temp_date_output.strftime('%Y-%m-%d') + all_groups['month'] = str(temp_date_output.month) + all_groups['day'] = str(temp_date_output.day) + all_groups['year'] = str(temp_date_output.year) + logger.debug(f"Converted date placeholders to {output_tz}: {all_groups['date']}") + else: + # No output timezone conversion - use source timezone for date + # Create temp date to get proper date in source timezone using the base date + temp_date_source = source_tz.localize(base_date.replace(hour=hour_24, minute=minute, second=0, microsecond=0)) + all_groups['date'] = temp_date_source.strftime('%Y-%m-%d') + all_groups['month'] = str(temp_date_source.month) + all_groups['day'] = str(temp_date_source.day) + all_groups['year'] = str(temp_date_source.year) + + # Format 24-hour start time string - only include minutes if non-zero + if minute > 0: + all_groups['starttime24'] = f"{hour_24}:{minute:02d}" + else: + all_groups['starttime24'] = f"{hour_24:02d}:00" + + # Convert 24-hour to 12-hour format for {starttime} placeholder + # Note: hour_24 is ALWAYS in 24-hour format at this point (converted earlier if needed) + ampm = 'AM' if hour_24 < 12 else 'PM' + hour_12 = hour_24 + if hour_24 == 0: + hour_12 = 12 + elif hour_24 > 12: + hour_12 = hour_24 - 12 + + # Format 12-hour start time string - only include minutes if non-zero + if minute > 0: + all_groups['starttime'] = f"{hour_12}:{minute:02d} {ampm}" + else: + all_groups['starttime'] = f"{hour_12} {ampm}" + + # Format long version that always includes minutes (e.g., "9:00 PM" instead of "9 PM") + all_groups['starttime_long'] = f"{hour_12}:{minute:02d} {ampm}" + + # Calculate end time based on program duration + # Create a datetime for calculations + temp_start = datetime.now(source_tz).replace(hour=hour_24, minute=minute, second=0, microsecond=0) + temp_end = temp_start + timedelta(minutes=program_duration) + + # Extract end time components (already in correct timezone if output_tz was applied above) + end_hour_24 = temp_end.hour + end_minute = temp_end.minute + + # Format 24-hour end time string - only include minutes if non-zero + if end_minute > 0: + all_groups['endtime24'] = f"{end_hour_24}:{end_minute:02d}" + else: + all_groups['endtime24'] = f"{end_hour_24:02d}:00" + + # Convert 24-hour to 12-hour format for {endtime} placeholder + end_ampm = 'AM' if end_hour_24 < 12 else 'PM' + end_hour_12 = end_hour_24 + if end_hour_24 == 0: + end_hour_12 = 12 + elif end_hour_24 > 12: + end_hour_12 = end_hour_24 - 12 + + # Format 12-hour end time string - only include minutes if non-zero + if end_minute > 0: + all_groups['endtime'] = f"{end_hour_12}:{end_minute:02d} {end_ampm}" + else: + all_groups['endtime'] = f"{end_hour_12} {end_ampm}" + + # Format long version that always includes minutes (e.g., "9:00 PM" instead of "9 PM") + all_groups['endtime_long'] = f"{end_hour_12}:{end_minute:02d} {end_ampm}" + + # Generate programs + programs = [] + + # If we have extracted time AND date, the event happens on a SPECIFIC date + # If we have time but NO date, generate for multiple days (existing behavior) + # All other days and times show "Upcoming" before or "Ended" after + event_happened = False + + # Determine how many iterations we need + if date_info and time_info: + # Specific date extracted - only generate for that one date + iterations = 1 + logger.debug(f"Date extracted, generating single event for specific date") + else: + # No specific date - use num_days (existing behavior) + iterations = num_days + + for day in range(iterations): + # Start from current time (like standard dummy) instead of midnight + # This ensures programs appear in the guide's current viewing window + day_start = now + timedelta(days=day) + day_end = day_start + timedelta(days=1) + + if time_info: + # We have an extracted event time - this is when the MAIN event starts + # The extracted time is in the SOURCE timezone (e.g., 8PM ET) + # We need to convert it to UTC for storage + + # Determine which date to use + if date_info: + # Use the extracted date from the channel title + current_date = datetime( + date_info['year'], + date_info['month'], + date_info['day'] + ).date() + logger.debug(f"Using extracted date: {current_date}") + else: + # No date extracted, use day offset from current time in SOURCE timezone + # This ensures we calculate "today" in the event's timezone, not UTC + # For example: 8:30 PM Central (1:30 AM UTC next day) for a 10 PM ET event + # should use today's date in ET, not tomorrow's date in UTC + now_in_source_tz = now.astimezone(source_tz) + current_date = (now_in_source_tz + timedelta(days=day)).date() + logger.debug(f"No date extracted, using day offset in {source_tz}: {current_date}") + + # Create a naive datetime (no timezone info) representing the event in source timezone + event_start_naive = datetime.combine( + current_date, + datetime.min.time().replace( + hour=time_info['hour'], + minute=time_info['minute'] + ) + ) + + # Use pytz to localize the naive datetime to the source timezone + # This automatically handles DST! + try: + event_start_local = source_tz.localize(event_start_naive) + # Convert to UTC + event_start_utc = event_start_local.astimezone(pytz.utc) + logger.debug(f"Converted {event_start_local} to UTC: {event_start_utc}") + except Exception as e: + logger.error(f"Error localizing time to {source_tz}: {e}") + # Fallback: treat as UTC + event_start_utc = django_timezone.make_aware(event_start_naive, pytz.utc) + + event_end_utc = event_start_utc + timedelta(minutes=program_duration) + + # Pre-generate the main event title and description for reuse + if title_template: + main_event_title = format_template(title_template, all_groups) + else: + title_parts = [] + if 'league' in all_groups and all_groups['league']: + title_parts.append(all_groups['league']) + if 'team1' in all_groups and 'team2' in all_groups: + title_parts.append(f"{all_groups['team1']} vs {all_groups['team2']}") + elif 'title' in all_groups and all_groups['title']: + title_parts.append(all_groups['title']) + main_event_title = ' - '.join(title_parts) if title_parts else channel_name + + if description_template: + main_event_description = format_template(description_template, all_groups) + else: + main_event_description = main_event_title + + + + # Determine if this day is before, during, or after the event + # Event only happens on day 0 (first day) + is_event_day = (day == 0) + + if is_event_day and not event_happened: + # This is THE day the event happens + # Fill programs BEFORE the event + current_time = day_start + + while current_time < event_start_utc: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), event_start_utc) + + # Use custom upcoming templates if provided, otherwise use defaults + if upcoming_title_template: + upcoming_title = format_template(upcoming_title_template, all_groups) + else: + upcoming_title = main_event_title + + if upcoming_description_template: + upcoming_description = format_template(upcoming_description_template, all_groups) + else: + upcoming_description = f"Upcoming: {main_event_description}" + + # Build custom_properties for upcoming programs (only date, no category/live) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": upcoming_title, + "description": upcoming_description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + current_time += timedelta(minutes=program_duration) + + # Add the MAIN EVENT at the extracted time + # Build custom_properties for main event (includes category and live) + main_event_custom_properties = {} + + # Add categories if provided + if categories: + main_event_custom_properties['categories'] = categories + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = event_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + main_event_custom_properties['date'] = date_str + + # Add live flag if requested + if include_live: + main_event_custom_properties['live'] = True + + # Add new flag if requested + if include_new: + main_event_custom_properties['new'] = True + + # Add program poster URL if provided + if program_poster_url: + main_event_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": event_start_utc, + "end_time": event_end_utc, + "title": main_event_title, + "description": main_event_description, + "custom_properties": main_event_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + event_happened = True + + # Fill programs AFTER the event until end of day + current_time = event_end_utc + + while current_time < day_end: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), day_end) + + # Use custom ended templates if provided, otherwise use defaults + if ended_title_template: + ended_title = format_template(ended_title_template, all_groups) + else: + ended_title = main_event_title + + if ended_description_template: + ended_description = format_template(ended_description_template, all_groups) + else: + ended_description = f"Ended: {main_event_description}" + + # Build custom_properties for ended programs (only date, no category/live) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": ended_title, + "description": ended_description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + current_time += timedelta(minutes=program_duration) + else: + # This day is either before the event (future days) or after the event happened + # Fill entire day with appropriate message + current_time = day_start + + # If event already happened, all programs show "Ended" + # If event hasn't happened yet (shouldn't occur with day 0 logic), show "Upcoming" + is_ended = event_happened + + while current_time < day_end: + program_start_utc = current_time + program_end_utc = min(current_time + timedelta(minutes=program_duration), day_end) + + # Use custom templates based on whether event has ended or is upcoming + if is_ended: + if ended_title_template: + program_title = format_template(ended_title_template, all_groups) + else: + program_title = main_event_title + + if ended_description_template: + program_description = format_template(ended_description_template, all_groups) + else: + program_description = f"Ended: {main_event_description}" + else: + if upcoming_title_template: + program_title = format_template(upcoming_title_template, all_groups) + else: + program_title = main_event_title + + if upcoming_description_template: + program_description = format_template(upcoming_description_template, all_groups) + else: + program_description = f"Upcoming: {main_event_description}" + + # Build custom_properties (only date for upcoming/ended filler programs) + program_custom_properties = {} + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": program_title, + "description": program_description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, + }) + + current_time += timedelta(minutes=program_duration) + else: + # No extracted time - fill entire day with regular intervals + # day_start and day_end are already in UTC, so no conversion needed + programs_per_day = max(1, int(24 / (program_duration / 60))) + + for program_num in range(programs_per_day): + program_start_utc = day_start + timedelta(minutes=program_num * program_duration) + program_end_utc = program_start_utc + timedelta(minutes=program_duration) + + if title_template: + title = format_template(title_template, all_groups) + else: + title_parts = [] + if 'league' in all_groups and all_groups['league']: + title_parts.append(all_groups['league']) + if 'team1' in all_groups and 'team2' in all_groups: + title_parts.append(f"{all_groups['team1']} vs {all_groups['team2']}") + elif 'title' in all_groups and all_groups['title']: + title_parts.append(all_groups['title']) + title = ' - '.join(title_parts) if title_parts else channel_name + + if description_template: + description = format_template(description_template, all_groups) + else: + description = title + + # Build custom_properties for this program + program_custom_properties = {} + + # Add categories if provided + if categories: + program_custom_properties['categories'] = categories + + # Add date if requested (YYYY-MM-DD format from start time in event timezone) + if include_date: + # Convert UTC time to event timezone for date calculation + local_time = program_start_utc.astimezone(source_tz) + date_str = local_time.strftime('%Y-%m-%d') + program_custom_properties['date'] = date_str + + # Add live flag if requested + if include_live: + program_custom_properties['live'] = True + + # Add new flag if requested + if include_new: + program_custom_properties['new'] = True + + # Add program poster URL if provided + if program_poster_url: + program_custom_properties['icon'] = program_poster_url + + programs.append({ + "channel_id": channel_id, + "start_time": program_start_utc, + "end_time": program_end_utc, + "title": title, + "description": description, + "custom_properties": program_custom_properties, + "channel_logo_url": channel_logo_url, # Pass channel logo for EPG generation + }) + + logger.info(f"Generated {len(programs)} custom dummy programs for {channel_name}") + return programs + + def generate_dummy_epg( channel_id, channel_name, xml_lines=None, num_days=1, program_length_hours=4 ): @@ -291,6 +1204,27 @@ def generate_dummy_epg( ) xml_lines.append(f" {html.escape(program['title'])}") xml_lines.append(f" {html.escape(program['description'])}") + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + xml_lines.append(f" {html.escape(cat)}") + + # Date tag + if 'date' in custom_data: + xml_lines.append(f" {html.escape(custom_data['date'])}") + + # Live tag + if custom_data.get('live', False): + xml_lines.append(f" ") + + # New tag + if custom_data.get('new', False): + xml_lines.append(f" ") + xml_lines.append(f" ") return xml_lines @@ -303,8 +1237,22 @@ def generate_epg(request, profile_name=None, user=None): by their associated EPGData record. This version filters data based on the 'days' parameter and sends keep-alives during processing. """ + # Check cache for recent identical request (helps with double-GET from browsers) + from django.core.cache import cache + cache_params = f"{profile_name or 'all'}:{user.username if user else 'anonymous'}:{request.GET.urlencode()}" + content_cache_key = f"epg_content:{cache_params}" + + cached_content = cache.get(content_cache_key) + if cached_content: + logger.debug("Serving EPG from cache") + response = HttpResponse(cached_content, content_type="application/xml") + response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' + response["Cache-Control"] = "no-cache" + return response + def epg_generator(): - """Generator function that yields EPG data with keep-alives during processing""" # Send initial HTTP headers as comments (these will be ignored by XML parsers but keep connection alive) + """Generator function that yields EPG data with keep-alives during processing""" + # Send initial HTTP headers as comments (these will be ignored by XML parsers but keep connection alive) xml_lines = [] xml_lines.append('') @@ -315,31 +1263,37 @@ def epg_generator(): # Get channels based on user/profile if user is not None: if user.user_level == 0: - filters = { - "channelprofilemembership__enabled": True, - "user_level__lte": user.user_level, - } - - if user.channel_profiles.count() != 0: - channel_profiles = user.channel_profiles.all() - filters["channelprofilemembership__channel_profile__in"] = ( - channel_profiles - ) + user_profile_count = user.channel_profiles.count() - channels = Channel.objects.filter(**filters).order_by("channel_number") + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") else: channels = Channel.objects.filter(user_level__lte=user.user_level).order_by( "channel_number" ) else: if profile_name is not None: - channel_profile = ChannelProfile.objects.get(name=profile_name) + try: + channel_profile = ChannelProfile.objects.get(name=profile_name) + except ChannelProfile.DoesNotExist: + logger.warning("Requested channel profile (%s) during epg generation does not exist", profile_name) + raise Http404(f"Channel profile '{profile_name}' not found") channels = Channel.objects.filter( channelprofilemembership__channel_profile=channel_profile, channelprofilemembership__enabled=True, - ) + ).order_by("channel_number") else: - channels = Channel.objects.all() + channels = Channel.objects.all().order_by("channel_number") # Check if the request wants to use direct logo URLs instead of cache use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' @@ -362,19 +1316,48 @@ def epg_generator(): dummy_days = num_days if num_days > 0 else 3 # Calculate cutoff date for EPG data filtering (only if days > 0) - now = timezone.now() + now = django_timezone.now() cutoff_date = now + timedelta(days=num_days) if num_days > 0 else None + # Build collision-free channel number mapping for XC clients (if user is authenticated) + # XC clients require integer channel numbers, so we need to ensure no conflicts + channel_num_map = {} + if user is not None: + # This is an XC client - build collision-free mapping + used_numbers = set() + + # First pass: assign integers for channels that already have integer numbers + for channel in channels: + if channel.channel_number == int(channel.channel_number): + num = int(channel.channel_number) + channel_num_map[channel.id] = num + used_numbers.add(num) + + # Second pass: assign integers for channels with float numbers + for channel in channels: + if channel.channel_number != int(channel.channel_number): + candidate = int(channel.channel_number) + while candidate in used_numbers: + candidate += 1 + channel_num_map[channel.id] = candidate + used_numbers.add(candidate) + # Process channels for the section for channel in channels: - # Format channel number as integer if it has no decimal component - same as M3U generation - if channel.channel_number is not None: - if channel.channel_number == int(channel.channel_number): - formatted_channel_number = int(channel.channel_number) - else: - formatted_channel_number = channel.channel_number + # For XC clients (user is not None), use collision-free integer mapping + # For regular clients (user is None), use original formatting logic + if user is not None: + # XC client - use collision-free integer + formatted_channel_number = channel_num_map[channel.id] else: - formatted_channel_number = "" + # Regular client - format channel number as integer if it has no decimal component + if channel.channel_number is not None: + if channel.channel_number == int(channel.channel_number): + formatted_channel_number = int(channel.channel_number) + else: + formatted_channel_number = channel.channel_number + else: + formatted_channel_number = "" # Determine the channel ID based on the selected source if tvg_id_source == 'tvg_id' and channel.tvg_id: @@ -387,7 +1370,62 @@ def epg_generator(): # Add channel logo if available tvg_logo = "" - if channel.logo: + + # Check if this is a custom dummy EPG with channel logo URL template + if channel.epg_data and channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + epg_source = channel.epg_data.epg_source + if epg_source.custom_properties: + custom_props = epg_source.custom_properties + channel_logo_url_template = custom_props.get('channel_logo_url', '') + + if channel_logo_url_template: + # Determine which name to use for pattern matching (same logic as program generation) + pattern_match_name = channel.name + name_source = custom_props.get('name_source') + + if name_source == 'stream': + stream_index = custom_props.get('stream_index', 1) - 1 + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + pattern_match_name = stream.name + + # Try to extract groups from the channel/stream name and build the logo URL + title_pattern = custom_props.get('title_pattern', '') + if title_pattern: + try: + # Convert PCRE/JavaScript named groups to Python format + title_pattern = regex.sub(r'\(\?<(?![=!])([^>]+)>', r'(?P<\1>', title_pattern) + title_regex = regex.compile(title_pattern) + title_match = title_regex.search(pattern_match_name) + + if title_match: + groups = title_match.groupdict() + + # Add normalized versions of all groups for cleaner URLs + for key, value in list(groups.items()): + if value: + # Remove all non-alphanumeric characters and convert to lowercase + normalized = regex.sub(r'[^a-zA-Z0-9\s]', '', str(value)) + normalized = regex.sub(r'\s+', '', normalized).lower() + groups[f'{key}_normalize'] = normalized + + # Format the logo URL template with the matched groups (with URL encoding) + from urllib.parse import quote + for key, value in groups.items(): + if value: + encoded_value = quote(str(value), safe='') + channel_logo_url_template = channel_logo_url_template.replace(f'{{{key}}}', encoded_value) + else: + channel_logo_url_template = channel_logo_url_template.replace(f'{{{key}}}', '') + tvg_logo = channel_logo_url_template + logger.debug(f"Built channel logo URL from template: {tvg_logo}") + except Exception as e: + logger.warning(f"Failed to build channel logo URL for {channel.name}: {e}") + + # If no custom dummy logo, use regular logo logic + if not tvg_logo and channel.logo: if use_cached_logos: # Use cached logo as before tvg_logo = build_absolute_uri_with_port(request, reverse('api:channels:logo-cache', args=[channel.logo.id])) @@ -406,7 +1444,8 @@ def epg_generator(): xml_lines.append(" ") # Send all channel definitions - yield '\n'.join(xml_lines) + '\n' + channel_xml = '\n'.join(xml_lines) + '\n' + yield channel_xml xml_lines = [] # Clear to save memory # Process programs for each channel @@ -418,23 +1457,55 @@ def epg_generator(): elif tvg_id_source == 'gracenote' and channel.tvc_guide_stationid: channel_id = channel.tvc_guide_stationid else: - # Get formatted channel number - if channel.channel_number is not None: - if channel.channel_number == int(channel.channel_number): - formatted_channel_number = int(channel.channel_number) - else: - formatted_channel_number = channel.channel_number + # For XC clients (user is not None), use collision-free integer mapping + # For regular clients (user is None), use original formatting logic + if user is not None: + # XC client - use collision-free integer from map + formatted_channel_number = channel_num_map[channel.id] else: - formatted_channel_number = "" + # Regular client - format channel number as before + if channel.channel_number is not None: + if channel.channel_number == int(channel.channel_number): + formatted_channel_number = int(channel.channel_number) + else: + formatted_channel_number = channel.channel_number + else: + formatted_channel_number = "" # Default to channel number channel_id = str(formatted_channel_number) if formatted_channel_number != "" else str(channel.id) + # Use EPG data name for display, but channel name for pattern matching display_name = channel.epg_data.name if channel.epg_data else channel.name + # For dummy EPG pattern matching, determine which name to use + pattern_match_name = channel.name + + # Check if we should use stream name instead of channel name + if channel.epg_data and channel.epg_data.epg_source: + epg_source = channel.epg_data.epg_source + if epg_source.custom_properties: + custom_props = epg_source.custom_properties + name_source = custom_props.get('name_source') + + if name_source == 'stream': + stream_index = custom_props.get('stream_index', 1) - 1 + channel_streams = channel.streams.all().order_by('channelstream__order') + + if channel_streams.exists() and 0 <= stream_index < channel_streams.count(): + stream = list(channel_streams)[stream_index] + pattern_match_name = stream.name + logger.debug(f"Using stream name for parsing: {pattern_match_name} (stream index: {stream_index})") + else: + logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name") if not channel.epg_data: # Use the enhanced dummy EPG generation function with defaults program_length_hours = 4 # Default to 4-hour program blocks - dummy_programs = generate_dummy_programs(channel_id, display_name, num_days=dummy_days, program_length_hours=program_length_hours) + dummy_programs = generate_dummy_programs( + channel_id, pattern_match_name, + num_days=dummy_days, + program_length_hours=program_length_hours, + epg_source=None + ) for program in dummy_programs: # Format times in XMLTV format @@ -445,255 +1516,374 @@ def epg_generator(): yield f' \n' yield f" {html.escape(program['title'])}\n" yield f" {html.escape(program['description'])}\n" + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + yield f" {html.escape(cat)}\n" + + # Date tag + if 'date' in custom_data: + yield f" {html.escape(custom_data['date'])}\n" + + # Live tag + if custom_data.get('live', False): + yield f" \n" + + # New tag + if custom_data.get('new', False): + yield f" \n" + + # Icon/poster URL + if 'icon' in custom_data: + yield f" \n" + yield f" \n" else: + # Check if this is a dummy EPG with no programs (generate on-demand) + if channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + # This is a custom dummy EPG - check if it has programs + if not channel.epg_data.programs.exists(): + # No programs stored, generate on-demand using custom patterns + # Use actual channel name for pattern matching + program_length_hours = 4 + dummy_programs = generate_dummy_programs( + channel_id, pattern_match_name, + num_days=dummy_days, + program_length_hours=program_length_hours, + epg_source=channel.epg_data.epg_source + ) + + for program in dummy_programs: + start_str = program['start_time'].strftime("%Y%m%d%H%M%S %z") + stop_str = program['end_time'].strftime("%Y%m%d%H%M%S %z") + + yield f' \n' + yield f" {html.escape(program['title'])}\n" + yield f" {html.escape(program['description'])}\n" + + # Add custom_properties if present + custom_data = program.get('custom_properties', {}) + + # Categories + if 'categories' in custom_data: + for cat in custom_data['categories']: + yield f" {html.escape(cat)}\n" + + # Date tag + if 'date' in custom_data: + yield f" {html.escape(custom_data['date'])}\n" + + # Live tag + if custom_data.get('live', False): + yield f" \n" + + # New tag + if custom_data.get('new', False): + yield f" \n" + + # Icon/poster URL + if 'icon' in custom_data: + yield f" \n" + + yield f" \n" + + continue # Skip to next channel + # For real EPG data - filter only if days parameter was specified if num_days > 0: - programs = channel.epg_data.programs.filter( + programs_qs = channel.epg_data.programs.filter( start_time__gte=now, start_time__lt=cutoff_date - ) + ).order_by('id') # Explicit ordering for consistent chunking else: # Return all programs if days=0 or not specified - programs = channel.epg_data.programs.all() + programs_qs = channel.epg_data.programs.all().order_by('id') - # Process programs in chunks to avoid memory issues + # Process programs in chunks to avoid cursor timeout issues program_batch = [] - batch_size = 100 - - for prog in programs.iterator(): # Use iterator to avoid loading all at once - start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z") - stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z") - - program_xml = [f' '] - program_xml.append(f' {html.escape(prog.title)}') - - # Add subtitle if available - if prog.sub_title: - program_xml.append(f" {html.escape(prog.sub_title)}") - - # Add description if available - if prog.description: - program_xml.append(f" {html.escape(prog.description)}") - - # Process custom properties if available - if prog.custom_properties: - custom_data = prog.custom_properties or {} - - # Add categories if available - if "categories" in custom_data and custom_data["categories"]: - for category in custom_data["categories"]: - program_xml.append(f" {html.escape(category)}") - - # Add keywords if available - if "keywords" in custom_data and custom_data["keywords"]: - for keyword in custom_data["keywords"]: - program_xml.append(f" {html.escape(keyword)}") - - # Handle episode numbering - multiple formats supported - # Prioritize onscreen_episode over standalone episode for onscreen system - if "onscreen_episode" in custom_data: - program_xml.append(f' {html.escape(custom_data["onscreen_episode"])}') - elif "episode" in custom_data: - program_xml.append(f' E{custom_data["episode"]}') - - # Handle dd_progid format - if 'dd_progid' in custom_data: - program_xml.append(f' {html.escape(custom_data["dd_progid"])}') - - # Handle external database IDs - for system in ['thetvdb.com', 'themoviedb.org', 'imdb.com']: - if f'{system}_id' in custom_data: - program_xml.append(f' {html.escape(custom_data[f"{system}_id"])}') - - # Add season and episode numbers in xmltv_ns format if available - if "season" in custom_data and "episode" in custom_data: - season = ( - int(custom_data["season"]) - 1 - if str(custom_data["season"]).isdigit() - else 0 - ) - episode = ( - int(custom_data["episode"]) - 1 - if str(custom_data["episode"]).isdigit() - else 0 - ) - program_xml.append(f' {season}.{episode}.') - - # Add language information - if "language" in custom_data: - program_xml.append(f' {html.escape(custom_data["language"])}') - - if "original_language" in custom_data: - program_xml.append(f' {html.escape(custom_data["original_language"])}') - - # Add length information - if "length" in custom_data and isinstance(custom_data["length"], dict): - length_value = custom_data["length"].get("value", "") - length_units = custom_data["length"].get("units", "minutes") - program_xml.append(f' {html.escape(str(length_value))}') - - # Add video information - if "video" in custom_data and isinstance(custom_data["video"], dict): - program_xml.append(" ") - - # Add audio information - if "audio" in custom_data and isinstance(custom_data["audio"], dict): - program_xml.append(" ") - - # Add subtitles information - if "subtitles" in custom_data and isinstance(custom_data["subtitles"], list): - for subtitle in custom_data["subtitles"]: - if isinstance(subtitle, dict): - subtitle_type = subtitle.get("type", "") - type_attr = f' type="{html.escape(subtitle_type)}"' if subtitle_type else "" - program_xml.append(f" ") - if "language" in subtitle: - program_xml.append(f" {html.escape(subtitle['language'])}") - program_xml.append(" ") - - # Add rating if available - if "rating" in custom_data: - rating_system = custom_data.get("rating_system", "TV Parental Guidelines") - program_xml.append(f' ') - program_xml.append(f' {html.escape(custom_data["rating"])}') - program_xml.append(f" ") - - # Add star ratings - if "star_ratings" in custom_data and isinstance(custom_data["star_ratings"], list): - for star_rating in custom_data["star_ratings"]: - if isinstance(star_rating, dict) and "value" in star_rating: - system_attr = f' system="{html.escape(star_rating["system"])}"' if "system" in star_rating else "" - program_xml.append(f" ") - program_xml.append(f" {html.escape(star_rating['value'])}") - program_xml.append(" ") - - # Add reviews - if "reviews" in custom_data and isinstance(custom_data["reviews"], list): - for review in custom_data["reviews"]: - if isinstance(review, dict) and "content" in review: - review_type = review.get("type", "text") - attrs = [f'type="{html.escape(review_type)}"'] - if "source" in review: - attrs.append(f'source="{html.escape(review["source"])}"') - if "reviewer" in review: - attrs.append(f'reviewer="{html.escape(review["reviewer"])}"') - attr_str = " ".join(attrs) - program_xml.append(f' {html.escape(review["content"])}') - - # Add images - if "images" in custom_data and isinstance(custom_data["images"], list): - for image in custom_data["images"]: - if isinstance(image, dict) and "url" in image: - attrs = [] - for attr in ['type', 'size', 'orient', 'system']: - if attr in image: - attrs.append(f'{attr}="{html.escape(image[attr])}"') - attr_str = " " + " ".join(attrs) if attrs else "" - program_xml.append(f' {html.escape(image["url"])}') - - # Add enhanced credits handling - if "credits" in custom_data: - program_xml.append(" ") - credits = custom_data["credits"] - - # Handle different credit types - for role in ['director', 'writer', 'adapter', 'producer', 'composer', 'editor', 'presenter', 'commentator', 'guest']: - if role in credits: - people = credits[role] - if isinstance(people, list): - for person in people: - program_xml.append(f" <{role}>{html.escape(person)}") - else: - program_xml.append(f" <{role}>{html.escape(people)}") - - # Handle actors separately to include role and guest attributes - if "actor" in credits: - actors = credits["actor"] - if isinstance(actors, list): - for actor in actors: - if isinstance(actor, dict): - name = actor.get("name", "") - role_attr = f' role="{html.escape(actor["role"])}"' if "role" in actor else "" - guest_attr = ' guest="yes"' if actor.get("guest") else "" - program_xml.append(f" {html.escape(name)}") + batch_size = 250 + chunk_size = 1000 # Fetch 1000 programs at a time from DB + + # Fetch chunks until no more results (avoids count() query) + offset = 0 + while True: + # Fetch a chunk of programs - this closes the cursor after fetching + program_chunk = list(programs_qs[offset:offset + chunk_size]) + + # Break if no more programs + if not program_chunk: + break + + # Process each program in the chunk + for prog in program_chunk: + start_str = prog.start_time.strftime("%Y%m%d%H%M%S %z") + stop_str = prog.end_time.strftime("%Y%m%d%H%M%S %z") + + program_xml = [f' '] + program_xml.append(f' {html.escape(prog.title)}') + + # Add subtitle if available + if prog.sub_title: + program_xml.append(f" {html.escape(prog.sub_title)}") + + # Add description if available + if prog.description: + program_xml.append(f" {html.escape(prog.description)}") + + # Process custom properties if available + if prog.custom_properties: + custom_data = prog.custom_properties or {} + + # Add categories if available + if "categories" in custom_data and custom_data["categories"]: + for category in custom_data["categories"]: + program_xml.append(f" {html.escape(category)}") + + # Add keywords if available + if "keywords" in custom_data and custom_data["keywords"]: + for keyword in custom_data["keywords"]: + program_xml.append(f" {html.escape(keyword)}") + + # Handle episode numbering - multiple formats supported + # Prioritize onscreen_episode over standalone episode for onscreen system + if "onscreen_episode" in custom_data: + program_xml.append(f' {html.escape(custom_data["onscreen_episode"])}') + elif "episode" in custom_data: + program_xml.append(f' E{custom_data["episode"]}') + + # Handle dd_progid format + if 'dd_progid' in custom_data: + program_xml.append(f' {html.escape(custom_data["dd_progid"])}') + + # Handle external database IDs + for system in ['thetvdb.com', 'themoviedb.org', 'imdb.com']: + if f'{system}_id' in custom_data: + program_xml.append(f' {html.escape(custom_data[f"{system}_id"])}') + + # Add season and episode numbers in xmltv_ns format if available + if "season" in custom_data and "episode" in custom_data: + season = ( + int(custom_data["season"]) - 1 + if str(custom_data["season"]).isdigit() + else 0 + ) + episode = ( + int(custom_data["episode"]) - 1 + if str(custom_data["episode"]).isdigit() + else 0 + ) + program_xml.append(f' {season}.{episode}.') + + # Add language information + if "language" in custom_data: + program_xml.append(f' {html.escape(custom_data["language"])}') + + if "original_language" in custom_data: + program_xml.append(f' {html.escape(custom_data["original_language"])}') + + # Add length information + if "length" in custom_data and isinstance(custom_data["length"], dict): + length_value = custom_data["length"].get("value", "") + length_units = custom_data["length"].get("units", "minutes") + program_xml.append(f' {html.escape(str(length_value))}') + + # Add video information + if "video" in custom_data and isinstance(custom_data["video"], dict): + program_xml.append(" ") + + # Add audio information + if "audio" in custom_data and isinstance(custom_data["audio"], dict): + program_xml.append(" ") + + # Add subtitles information + if "subtitles" in custom_data and isinstance(custom_data["subtitles"], list): + for subtitle in custom_data["subtitles"]: + if isinstance(subtitle, dict): + subtitle_type = subtitle.get("type", "") + type_attr = f' type="{html.escape(subtitle_type)}"' if subtitle_type else "" + program_xml.append(f" ") + if "language" in subtitle: + program_xml.append(f" {html.escape(subtitle['language'])}") + program_xml.append(" ") + + # Add rating if available + if "rating" in custom_data: + rating_system = custom_data.get("rating_system", "TV Parental Guidelines") + program_xml.append(f' ') + program_xml.append(f' {html.escape(custom_data["rating"])}') + program_xml.append(f" ") + + # Add star ratings + if "star_ratings" in custom_data and isinstance(custom_data["star_ratings"], list): + for star_rating in custom_data["star_ratings"]: + if isinstance(star_rating, dict) and "value" in star_rating: + system_attr = f' system="{html.escape(star_rating["system"])}"' if "system" in star_rating else "" + program_xml.append(f" ") + program_xml.append(f" {html.escape(star_rating['value'])}") + program_xml.append(" ") + + # Add reviews + if "reviews" in custom_data and isinstance(custom_data["reviews"], list): + for review in custom_data["reviews"]: + if isinstance(review, dict) and "content" in review: + review_type = review.get("type", "text") + attrs = [f'type="{html.escape(review_type)}"'] + if "source" in review: + attrs.append(f'source="{html.escape(review["source"])}"') + if "reviewer" in review: + attrs.append(f'reviewer="{html.escape(review["reviewer"])}"') + attr_str = " ".join(attrs) + program_xml.append(f' {html.escape(review["content"])}') + + # Add images + if "images" in custom_data and isinstance(custom_data["images"], list): + for image in custom_data["images"]: + if isinstance(image, dict) and "url" in image: + attrs = [] + for attr in ['type', 'size', 'orient', 'system']: + if attr in image: + attrs.append(f'{attr}="{html.escape(image[attr])}"') + attr_str = " " + " ".join(attrs) if attrs else "" + program_xml.append(f' {html.escape(image["url"])}') + + # Add enhanced credits handling + if "credits" in custom_data: + program_xml.append(" ") + credits = custom_data["credits"] + + # Handle different credit types + for role in ['director', 'writer', 'adapter', 'producer', 'composer', 'editor', 'presenter', 'commentator', 'guest']: + if role in credits: + people = credits[role] + if isinstance(people, list): + for person in people: + program_xml.append(f" <{role}>{html.escape(person)}") else: - program_xml.append(f" {html.escape(actor)}") + program_xml.append(f" <{role}>{html.escape(people)}") + + # Handle actors separately to include role and guest attributes + if "actor" in credits: + actors = credits["actor"] + if isinstance(actors, list): + for actor in actors: + if isinstance(actor, dict): + name = actor.get("name", "") + role_attr = f' role="{html.escape(actor["role"])}"' if "role" in actor else "" + guest_attr = ' guest="yes"' if actor.get("guest") else "" + program_xml.append(f" {html.escape(name)}") + else: + program_xml.append(f" {html.escape(actor)}") + else: + program_xml.append(f" {html.escape(actors)}") + + program_xml.append(" ") + + # Add program date if available (full date, not just year) + if "date" in custom_data: + program_xml.append(f' {html.escape(custom_data["date"])}') + + # Add country if available + if "country" in custom_data: + program_xml.append(f' {html.escape(custom_data["country"])}') + + # Add icon if available + if "icon" in custom_data: + program_xml.append(f' ') + + # Add special flags as proper tags with enhanced handling + if custom_data.get("previously_shown", False): + prev_shown_details = custom_data.get("previously_shown_details", {}) + attrs = [] + if "start" in prev_shown_details: + attrs.append(f'start="{html.escape(prev_shown_details["start"])}"') + if "channel" in prev_shown_details: + attrs.append(f'channel="{html.escape(prev_shown_details["channel"])}"') + attr_str = " " + " ".join(attrs) if attrs else "" + program_xml.append(f" ") + + if custom_data.get("premiere", False): + premiere_text = custom_data.get("premiere_text", "") + if premiere_text: + program_xml.append(f" {html.escape(premiere_text)}") else: - program_xml.append(f" {html.escape(actors)}") - - program_xml.append(" ") - - # Add program date if available (full date, not just year) - if "date" in custom_data: - program_xml.append(f' {html.escape(custom_data["date"])}') - - # Add country if available - if "country" in custom_data: - program_xml.append(f' {html.escape(custom_data["country"])}') - - # Add icon if available - if "icon" in custom_data: - program_xml.append(f' ') - - # Add special flags as proper tags with enhanced handling - if custom_data.get("previously_shown", False): - prev_shown_details = custom_data.get("previously_shown_details", {}) - attrs = [] - if "start" in prev_shown_details: - attrs.append(f'start="{html.escape(prev_shown_details["start"])}"') - if "channel" in prev_shown_details: - attrs.append(f'channel="{html.escape(prev_shown_details["channel"])}"') - attr_str = " " + " ".join(attrs) if attrs else "" - program_xml.append(f" ") - - if custom_data.get("premiere", False): - premiere_text = custom_data.get("premiere_text", "") - if premiere_text: - program_xml.append(f" {html.escape(premiere_text)}") - else: - program_xml.append(" ") - - if custom_data.get("last_chance", False): - last_chance_text = custom_data.get("last_chance_text", "") - if last_chance_text: - program_xml.append(f" {html.escape(last_chance_text)}") - else: - program_xml.append(" ") - - if custom_data.get("new", False): - program_xml.append(" ") - - if custom_data.get('live', False): - program_xml.append(' ') - - program_xml.append(" ") - - # Add to batch - program_batch.extend(program_xml) - - # Send batch when full or send keep-alive - if len(program_batch) >= batch_size: - yield '\n'.join(program_batch) + '\n' - program_batch = [] # Send keep-alive every batch + program_xml.append(" ") + + if custom_data.get("last_chance", False): + last_chance_text = custom_data.get("last_chance_text", "") + if last_chance_text: + program_xml.append(f" {html.escape(last_chance_text)}") + else: + program_xml.append(" ") + + if custom_data.get("new", False): + program_xml.append(" ") + + if custom_data.get('live', False): + program_xml.append(' ') + + program_xml.append(" ") + + # Add to batch + program_batch.extend(program_xml) + + # Send batch when full or send keep-alive + if len(program_batch) >= batch_size: + batch_xml = '\n'.join(program_batch) + '\n' + yield batch_xml + program_batch = [] + + # Move to next chunk + offset += chunk_size # Send remaining programs in batch if program_batch: - yield '\n'.join(program_batch) + '\n' + batch_xml = '\n'.join(program_batch) + '\n' + yield batch_xml # Send final closing tag and completion message - yield "\n" # Return streaming response + yield "\n" + + # Log system event for EPG download after streaming completes (with deduplication based on client) + client_id, client_ip, user_agent = get_client_identifier(request) + event_cache_key = f"epg_download:{user.username if user else 'anonymous'}:{profile_name or 'all'}:{client_id}" + if not cache.get(event_cache_key): + log_system_event( + event_type='epg_download', + profile=profile_name or 'all', + user=user.username if user else 'anonymous', + channels=channels.count(), + client_ip=client_ip, + user_agent=user_agent, + ) + cache.set(event_cache_key, True, 2) # Prevent duplicate events for 2 seconds + + # Wrapper generator that collects content for caching + def caching_generator(): + collected_content = [] + for chunk in epg_generator(): + collected_content.append(chunk) + yield chunk + # After streaming completes, cache the full content + full_content = ''.join(collected_content) + cache.set(content_cache_key, full_content, 300) + logger.debug("Cached EPG content (%d bytes)", len(full_content)) + + # Return streaming response response = StreamingHttpResponse( - streaming_content=epg_generator(), + streaming_content=caching_generator(), content_type="application/xml" ) response["Content-Disposition"] = 'attachment; filename="Dispatcharr.xml"' @@ -781,45 +1971,31 @@ def xc_player_api(request, full=False): if user is None: return JsonResponse({'error': 'Unauthorized'}, status=401) - server_info = xc_get_info(request) - - if not action: - return JsonResponse(server_info) - if action == "get_live_categories": return JsonResponse(xc_get_live_categories(user), safe=False) - if action == "get_live_streams": + elif action == "get_live_streams": return JsonResponse(xc_get_live_streams(request, user, request.GET.get("category_id")), safe=False) - if action == "get_short_epg": + elif action == "get_short_epg": return JsonResponse(xc_get_epg(request, user, short=True), safe=False) - if action == "get_simple_data_table": + elif action == "get_simple_data_table": return JsonResponse(xc_get_epg(request, user, short=False), safe=False) - - # Endpoints not implemented, but still provide a response - if action in [ - "get_vod_categories", - "get_vod_streams", - "get_series", - "get_series_categories", - "get_series_info", - "get_vod_info", - ]: - if action == "get_vod_categories": - return JsonResponse(xc_get_vod_categories(user), safe=False) - elif action == "get_vod_streams": - return JsonResponse(xc_get_vod_streams(request, user, request.GET.get("category_id")), safe=False) - elif action == "get_series_categories": - return JsonResponse(xc_get_series_categories(user), safe=False) - elif action == "get_series": - return JsonResponse(xc_get_series(request, user, request.GET.get("category_id")), safe=False) - elif action == "get_series_info": - return JsonResponse(xc_get_series_info(request, user, request.GET.get("series_id")), safe=False) - elif action == "get_vod_info": - return JsonResponse(xc_get_vod_info(request, user, request.GET.get("vod_id")), safe=False) - else: - return JsonResponse([], safe=False) - - raise Http404() + elif action == "get_vod_categories": + return JsonResponse(xc_get_vod_categories(user), safe=False) + elif action == "get_vod_streams": + return JsonResponse(xc_get_vod_streams(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_series_categories": + return JsonResponse(xc_get_series_categories(user), safe=False) + elif action == "get_series": + return JsonResponse(xc_get_series(request, user, request.GET.get("category_id")), safe=False) + elif action == "get_series_info": + return JsonResponse(xc_get_series_info(request, user, request.GET.get("series_id")), safe=False) + elif action == "get_vod_info": + return JsonResponse(xc_get_vod_info(request, user, request.GET.get("vod_id")), safe=False) + else: + # For any other action (including get_account_info or unknown actions), + # return server_info/account_info to match provider behavior + server_info = xc_get_info(request) + return JsonResponse(server_info, safe=False) def xc_panel_api(request): @@ -836,12 +2012,34 @@ def xc_panel_api(request): def xc_get(request): if not network_access_allowed(request, 'XC_API'): + # Log blocked M3U download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='m3u_blocked', + user=request.GET.get('username', 'unknown'), + reason='Network access denied (XC API)', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({'error': 'Forbidden'}, status=403) action = request.GET.get("action") user = xc_get_user(request) if user is None: + # Log blocked M3U download due to invalid credentials + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='m3u_blocked', + user=request.GET.get('username', 'unknown'), + reason='Invalid XC credentials', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({'error': 'Unauthorized'}, status=401) return generate_m3u(request, None, user) @@ -849,37 +2047,63 @@ def xc_get(request): def xc_xmltv(request): if not network_access_allowed(request, 'XC_API'): + # Log blocked EPG download + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='epg_blocked', + user=request.GET.get('username', 'unknown'), + reason='Network access denied (XC API)', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({'error': 'Forbidden'}, status=403) user = xc_get_user(request) if user is None: + # Log blocked EPG download due to invalid credentials + from core.utils import log_system_event + client_ip = request.META.get('REMOTE_ADDR', 'unknown') + user_agent = request.META.get('HTTP_USER_AGENT', 'unknown') + log_system_event( + event_type='epg_blocked', + user=request.GET.get('username', 'unknown'), + reason='Invalid XC credentials', + client_ip=client_ip, + user_agent=user_agent, + ) return JsonResponse({'error': 'Unauthorized'}, status=401) return generate_epg(request, None, user) def xc_get_live_categories(user): + from django.db.models import Min response = [] if user.user_level == 0: - filters = { - "channels__channelprofilemembership__enabled": True, - "channels__user_level": 0, - } - - if user.channel_profiles.count() != 0: - # Only get data from active profile - channel_profiles = user.channel_profiles.all() - filters["channels__channelprofilemembership__channel_profile__in"] = ( - channel_profiles - ) - - channel_groups = ChannelGroup.objects.filter(**filters).distinct().order_by(Lower("name")) + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channel groups + channel_groups = ChannelGroup.objects.filter( + channels__isnull=False, channels__user_level__lte=user.user_level + ).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') + else: + # User has specific limited profiles assigned + filters = { + "channels__channelprofilemembership__enabled": True, + "channels__user_level": 0, + "channels__channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel_groups = ChannelGroup.objects.filter(**filters).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') else: channel_groups = ChannelGroup.objects.filter( channels__isnull=False, channels__user_level__lte=user.user_level - ).distinct().order_by(Lower("name")) + ).distinct().annotate(min_channel_number=Min('channels__channel_number')).order_by('min_channel_number') for group in channel_groups: response.append( @@ -897,20 +2121,25 @@ def xc_get_live_streams(request, user, category_id=None): streams = [] if user.user_level == 0: - filters = { - "channelprofilemembership__enabled": True, - "user_level__lte": user.user_level, - } - - if user.channel_profiles.count() > 0: - # Only get data from active profile - channel_profiles = user.channel_profiles.all() - filters["channelprofilemembership__channel_profile__in"] = channel_profiles - - if category_id is not None: - filters["channel_group__id"] = category_id - - channels = Channel.objects.filter(**filters).order_by("channel_number") + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + filters = {"user_level__lte": user.user_level} + if category_id is not None: + filters["channel_group__id"] = category_id + channels = Channel.objects.filter(**filters).order_by("channel_number") + else: + # User has specific limited profiles assigned + filters = { + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + if category_id is not None: + filters["channel_group__id"] = category_id + channels = Channel.objects.filter(**filters).distinct().order_by("channel_number") else: if not category_id: channels = Channel.objects.filter(user_level__lte=user.user_level).order_by("channel_number") @@ -919,10 +2148,38 @@ def xc_get_live_streams(request, user, category_id=None): channel_group__id=category_id, user_level__lte=user.user_level ).order_by("channel_number") + # Build collision-free mapping for XC clients (which require integers) + # This ensures channels with float numbers don't conflict with existing integers + channel_num_map = {} # Maps channel.id -> integer channel number for XC + used_numbers = set() # Track all assigned integer channel numbers + + # First pass: assign integers for channels that already have integer numbers for channel in channels: + if channel.channel_number == int(channel.channel_number): + # Already an integer, use it directly + num = int(channel.channel_number) + channel_num_map[channel.id] = num + used_numbers.add(num) + + # Second pass: assign integers for channels with float numbers + # Find next available number to avoid collisions + for channel in channels: + if channel.channel_number != int(channel.channel_number): + # Has decimal component, need to find available integer + # Start from truncated value and increment until we find an unused number + candidate = int(channel.channel_number) + while candidate in used_numbers: + candidate += 1 + channel_num_map[channel.id] = candidate + used_numbers.add(candidate) + + # Build the streams list with the collision-free channel numbers + for channel in channels: + channel_num_int = channel_num_map[channel.id] + streams.append( { - "num": int(channel.channel_number) if channel.channel_number.is_integer() else channel.channel_number, + "num": channel_num_int, "name": channel.name, "stream_type": "live", "stream_id": channel.id, @@ -934,8 +2191,8 @@ def xc_get_live_streams(request, user, category_id=None): reverse("api:channels:logo-cache", args=[channel.logo.id]) ) ), - "epg_channel_id": str(int(channel.channel_number)) if channel.channel_number.is_integer() else str(channel.channel_number), - "added": int(time.time()), # @TODO: make this the actual created date + "epg_channel_id": str(channel_num_int), + "added": int(channel.created_at.timestamp()), "is_adult": 0, "category_id": str(channel.channel_group.id), "category_ids": [channel.channel_group.id], @@ -956,35 +2213,95 @@ def xc_get_epg(request, user, short=False): channel = None if user.user_level < 10: - filters = { - "id": channel_id, - "channelprofilemembership__enabled": True, - "user_level__lte": user.user_level, - } - - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - filters["channelprofilemembership__channel_profile__in"] = channel_profiles + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + channel = Channel.objects.filter( + id=channel_id, + user_level__lte=user.user_level + ).first() + else: + # User has specific limited profiles assigned + filters = { + "id": channel_id, + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel = Channel.objects.filter(**filters).distinct().first() - channel = get_object_or_404(Channel, **filters) + if not channel: + raise Http404() else: channel = get_object_or_404(Channel, id=channel_id) if not channel: raise Http404() + # Calculate the collision-free integer channel number for this channel + # This must match the logic in xc_get_live_streams to ensure consistency + # Get all channels in the same category for collision detection + category_channels = Channel.objects.filter( + channel_group=channel.channel_group + ).order_by("channel_number") + + channel_num_map = {} + used_numbers = set() + + # First pass: assign integers for channels that already have integer numbers + for ch in category_channels: + if ch.channel_number == int(ch.channel_number): + num = int(ch.channel_number) + channel_num_map[ch.id] = num + used_numbers.add(num) + + # Second pass: assign integers for channels with float numbers + for ch in category_channels: + if ch.channel_number != int(ch.channel_number): + candidate = int(ch.channel_number) + while candidate in used_numbers: + candidate += 1 + channel_num_map[ch.id] = candidate + used_numbers.add(candidate) + + # Get the mapped integer for this specific channel + channel_num_int = channel_num_map.get(channel.id, int(channel.channel_number)) + limit = request.GET.get('limit', 4) if channel.epg_data: - if short == False: - programs = channel.epg_data.programs.filter( - start_time__gte=timezone.now() - ).order_by('start_time') + # Check if this is a dummy EPG that generates on-demand + if channel.epg_data.epg_source and channel.epg_data.epg_source.source_type == 'dummy': + if not channel.epg_data.programs.exists(): + # Generate on-demand using custom patterns + programs = generate_dummy_programs( + channel_id=channel_id, + channel_name=channel.name, + epg_source=channel.epg_data.epg_source + ) + else: + # Has stored programs, use them + if short == False: + programs = channel.epg_data.programs.filter( + start_time__gte=django_timezone.now() + ).order_by('start_time') + else: + programs = channel.epg_data.programs.all().order_by('start_time')[:limit] else: - programs = channel.epg_data.programs.all().order_by('start_time')[:limit] + # Regular EPG with stored programs + if short == False: + programs = channel.epg_data.programs.filter( + start_time__gte=django_timezone.now() + ).order_by('start_time') + else: + programs = channel.epg_data.programs.all().order_by('start_time')[:limit] else: - programs = generate_dummy_programs(channel_id=channel_id, channel_name=channel.name) + # No EPG data assigned, generate default dummy + programs = generate_dummy_programs(channel_id=channel_id, channel_name=channel.name, epg_source=None) output = {"epg_listings": []} + for program in programs: id = "0" epg_id = "0" @@ -1002,14 +2319,14 @@ def xc_get_epg(request, user, short=False): "start": start.strftime("%Y%m%d%H%M%S"), "end": end.strftime("%Y%m%d%H%M%S"), "description": base64.b64encode(description.encode()).decode(), - "channel_id": int(channel.channel_number) if channel.channel_number.is_integer() else channel.channel_number, + "channel_id": channel_num_int, "start_timestamp": int(start.timestamp()), "stop_timestamp": int(end.timestamp()), "stream_id": f"{channel_id}", } if short == False: - program_output["now_playing"] = 1 if start <= timezone.now() <= end else 0 + program_output["now_playing"] = 1 if start <= django_timezone.now() <= end else 0 program_output["has_archive"] = "0" output['epg_listings'].append(program_output) @@ -1082,7 +2399,7 @@ def xc_get_vod_streams(request, user, category_id=None): None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), #'stream_icon': movie.logo.url if movie.logo else '', @@ -1152,7 +2469,7 @@ def xc_get_series(request, user, category_id=None): None if not series.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[series.logo.id]) + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) ) ), "plot": series.description or "", @@ -1194,7 +2511,7 @@ def xc_get_series_info(request, user, series_id): try: should_refresh = ( not series_relation.last_episode_refresh or - series_relation.last_episode_refresh < timezone.now() - timedelta(hours=24) + series_relation.last_episode_refresh < django_timezone.now() - timedelta(hours=24) ) # Check if detailed data has been fetched @@ -1345,7 +2662,7 @@ def xc_get_series_info(request, user, series_id): None if not series.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[series.logo.id]) + reverse("api:vod:vodlogo-cache", args=[series.logo.id]) ) ), "plot": series_data['description'], @@ -1473,14 +2790,14 @@ def xc_get_vod_info(request, user, vod_id): None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), "movie_image": ( None if not movie.logo else build_absolute_uri_with_port( request, - reverse("api:channels:logo-cache", args=[movie.logo.id]) + reverse("api:vod:vodlogo-cache", args=[movie.logo.id]) ) ), 'description': movie_data.get('description', ''), @@ -1593,45 +2910,78 @@ def get_host_and_port(request): Returns (host, port) for building absolute URIs. - Prefers X-Forwarded-Host/X-Forwarded-Port (nginx). - Falls back to Host header. - - In dev, if missing, uses 5656 or 8000 as a guess. + - Returns None for port if using standard ports (80/443) to omit from URLs. + - In dev, uses 5656 as a guess if port cannot be determined. """ - # 1. Try X-Forwarded-Host (may include port) + # Determine the scheme first - needed for standard port detection + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + standard_port = "443" if scheme == "https" else "80" + + # 1. Try X-Forwarded-Host (may include port) - set by our nginx xfh = request.META.get("HTTP_X_FORWARDED_HOST") if xfh: if ":" in xfh: host, port = xfh.split(":", 1) + # Omit standard ports from URLs, or omit if port doesn't match standard for scheme + # (e.g., HTTPS but port is 9191 = behind external reverse proxy) + if port == standard_port: + return host, None + # If port doesn't match standard and X-Forwarded-Proto is set, likely behind external RP + if request.META.get("HTTP_X_FORWARDED_PROTO"): + host = xfh.split(":")[0] # Strip port, will check for proper port below + else: + return host, port else: host = xfh - port = request.META.get("HTTP_X_FORWARDED_PORT") + + # Check for X-Forwarded-Port header (if we didn't already find a valid port) + port = request.META.get("HTTP_X_FORWARDED_PORT") if port: - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port + # If X-Forwarded-Proto is set but no valid port, assume standard + if request.META.get("HTTP_X_FORWARDED_PROTO"): + return host, None # 2. Try Host header raw_host = request.get_host() if ":" in raw_host: host, port = raw_host.split(":", 1) - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port else: host = raw_host - # 3. Try X-Forwarded-Port - port = request.META.get("HTTP_X_FORWARDED_PORT") + # 3. Check if we're behind a reverse proxy (X-Forwarded-Proto or X-Forwarded-For present) + # If so, assume standard port for the scheme (don't trust SERVER_PORT in this case) + if request.META.get("HTTP_X_FORWARDED_PROTO") or request.META.get("HTTP_X_FORWARDED_FOR"): + return host, None + + # 4. Try SERVER_PORT from META (only if NOT behind reverse proxy) + port = request.META.get("SERVER_PORT") if port: - return host, port + # Omit standard ports from URLs + return host, None if port == standard_port else port - # 4. Dev fallback: guess port + # 5. Dev fallback: guess port 5656 if os.environ.get("DISPATCHARR_ENV") == "dev" or host in ("localhost", "127.0.0.1"): - guess = "5656" - return host, guess + return host, "5656" - # 5. Fallback to scheme default - port = "443" if request.is_secure() else "9191" - return host, port + # 6. Final fallback: assume standard port for scheme (omit from URL) + return host, None def build_absolute_uri_with_port(request, path): + """ + Build an absolute URI with optional port. + Port is omitted from URL if None (standard port for scheme). + """ host, port = get_host_and_port(request) - scheme = request.scheme - return f"{scheme}://{host}:{port}{path}" + scheme = request.META.get("HTTP_X_FORWARDED_PROTO", request.scheme) + + if port: + return f"{scheme}://{host}:{port}{path}" + else: + return f"{scheme}://{host}{path}" def format_duration_hms(seconds): """ diff --git a/apps/proxy/config.py b/apps/proxy/config.py index 9ce5b66c5..3b1ce9677 100644 --- a/apps/proxy/config.py +++ b/apps/proxy/config.py @@ -1,4 +1,6 @@ """Shared configuration between proxy types""" +import time +from django.db import connection class BaseConfig: DEFAULT_USER_AGENT = 'VLC/3.0.20 LibVLC/3.0.20' # Will only be used if connection to settings fail @@ -12,13 +14,29 @@ class BaseConfig: BUFFERING_TIMEOUT = 15 # Seconds to wait for buffering before switching streams BUFFER_SPEED = 1 # What speed to condsider the stream buffering, 1x is normal speed, 2x is double speed, etc. + # Cache for proxy settings (class-level, shared across all instances) + _proxy_settings_cache = None + _proxy_settings_cache_time = 0 + _proxy_settings_cache_ttl = 10 # Cache for 10 seconds + @classmethod def get_proxy_settings(cls): - """Get proxy settings from CoreSettings JSON data with fallback to defaults""" + """Get proxy settings from CoreSettings JSON data with fallback to defaults (cached)""" + # Check if cache is still valid + now = time.time() + if cls._proxy_settings_cache is not None and (now - cls._proxy_settings_cache_time) < cls._proxy_settings_cache_ttl: + return cls._proxy_settings_cache + + # Cache miss or expired - fetch from database try: from core.models import CoreSettings - return CoreSettings.get_proxy_settings() + settings = CoreSettings.get_proxy_settings() + cls._proxy_settings_cache = settings + cls._proxy_settings_cache_time = now + return settings + except Exception: + # Return defaults if database query fails return { "buffering_timeout": 15, "buffering_speed": 1.0, @@ -27,6 +45,13 @@ def get_proxy_settings(cls): "channel_init_grace_period": 5, } + finally: + # Always close the connection after reading settings + try: + connection.close() + except Exception: + pass + @classmethod def get_redis_chunk_ttl(cls): """Get Redis chunk TTL from database or default""" @@ -69,10 +94,10 @@ class TSConfig(BaseConfig): CLEANUP_INTERVAL = 60 # Check for inactive channels every 60 seconds # Client tracking settings - CLIENT_RECORD_TTL = 5 # How long client records persist in Redis (seconds). Client will be considered MIA after this time. + CLIENT_RECORD_TTL = 60 # How long client records persist in Redis (seconds). Client will be considered MIA after this time. CLEANUP_CHECK_INTERVAL = 1 # How often to check for disconnected clients (seconds) - CLIENT_HEARTBEAT_INTERVAL = 1 # How often to send client heartbeats (seconds) - GHOST_CLIENT_MULTIPLIER = 5.0 # How many heartbeat intervals before client considered ghost (5 would mean 5 secondsif heartbeat interval is 1) + CLIENT_HEARTBEAT_INTERVAL = 5 # How often to send client heartbeats (seconds) + GHOST_CLIENT_MULTIPLIER = 6.0 # How many heartbeat intervals before client considered ghost (6 would mean 36 seconds if heartbeat interval is 6) CLIENT_WAIT_TIMEOUT = 30 # Seconds to wait for client to connect # Stream health and recovery settings diff --git a/apps/proxy/ts_proxy/client_manager.py b/apps/proxy/ts_proxy/client_manager.py index d4b83d3a1..bffecddee 100644 --- a/apps/proxy/ts_proxy/client_manager.py +++ b/apps/proxy/ts_proxy/client_manager.py @@ -8,7 +8,7 @@ from typing import Set, Optional from apps.proxy.config import TSConfig as Config from redis.exceptions import ConnectionError, TimeoutError -from .constants import EventType +from .constants import EventType, ChannelState, ChannelMetadataField from .config_helper import ConfigHelper from .redis_keys import RedisKeys from .utils import get_logger @@ -26,6 +26,7 @@ def __init__(self, channel_id=None, redis_client=None, heartbeat_interval=1, wor self.lock = threading.Lock() self.last_active_time = time.time() self.worker_id = worker_id # Store worker ID as instance variable + self._heartbeat_running = True # Flag to control heartbeat thread # STANDARDIZED KEYS: Move client set under channel namespace self.client_set_key = RedisKeys.clients(channel_id) @@ -33,6 +34,10 @@ def __init__(self, channel_id=None, redis_client=None, heartbeat_interval=1, wor self.heartbeat_interval = ConfigHelper.get('CLIENT_HEARTBEAT_INTERVAL', 10) self.last_heartbeat_time = {} + # Get ProxyServer instance for ownership checks + from .server import ProxyServer + self.proxy_server = ProxyServer.get_instance() + # Start heartbeat thread for local clients self._start_heartbeat_thread() self._registered_clients = set() # Track already registered client IDs @@ -77,56 +82,28 @@ def _trigger_stats_update(self): logger.debug(f"Failed to trigger stats update: {e}") def _start_heartbeat_thread(self): - """Start thread to regularly refresh client presence in Redis""" + """Start thread to regularly refresh client presence in Redis for local clients""" def heartbeat_task(): - no_clients_count = 0 # Track consecutive empty cycles - max_empty_cycles = 3 # Exit after this many consecutive empty checks - logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)") - while True: + while self._heartbeat_running: try: - # Wait for the interval - gevent.sleep(self.heartbeat_interval) + # Wait for the interval, but check stop flag frequently for quick shutdown + # Sleep in 1-second increments to allow faster response to stop signal + for _ in range(int(self.heartbeat_interval)): + if not self._heartbeat_running: + break + time.sleep(1) + + # Final check before doing work + if not self._heartbeat_running: + break # Send heartbeat for all local clients with self.lock: - if not self.clients or not self.redis_client: - # No clients left, increment our counter - no_clients_count += 1 - - # Check if we're in a shutdown delay period before exiting - in_shutdown_delay = False - if self.redis_client: - try: - disconnect_key = RedisKeys.last_client_disconnect(self.channel_id) - disconnect_time_bytes = self.redis_client.get(disconnect_key) - if disconnect_time_bytes: - disconnect_time = float(disconnect_time_bytes.decode('utf-8')) - elapsed = time.time() - disconnect_time - shutdown_delay = ConfigHelper.channel_shutdown_delay() - - if elapsed < shutdown_delay: - in_shutdown_delay = True - logger.debug(f"Channel {self.channel_id} in shutdown delay: {elapsed:.1f}s of {shutdown_delay}s elapsed") - except Exception as e: - logger.debug(f"Error checking shutdown delay: {e}") - - # Only exit if we've seen no clients for several consecutive checks AND we're not in shutdown delay - if no_clients_count >= max_empty_cycles and not in_shutdown_delay: - logger.info(f"No clients for channel {self.channel_id} after {no_clients_count} consecutive checks and not in shutdown delay, exiting heartbeat thread") - return # This exits the thread - - # Skip this cycle if we have no clients but continue if in shutdown delay - if not in_shutdown_delay: - continue - else: - # Reset counter during shutdown delay to prevent premature exit - no_clients_count = 0 - continue - else: - # Reset counter when we see clients - no_clients_count = 0 + # Skip this cycle if we have no local clients + if not self.clients: + continue # IMPROVED GHOST DETECTION: Check for stale clients before sending heartbeats current_time = time.time() @@ -197,11 +174,20 @@ def heartbeat_task(): except Exception as e: logger.error(f"Error in client heartbeat thread: {e}") + logger.debug(f"Heartbeat thread exiting for channel {self.channel_id}") + thread = threading.Thread(target=heartbeat_task, daemon=True) thread.name = f"client-heartbeat-{self.channel_id}" thread.start() logger.debug(f"Started client heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)") + def stop(self): + """Stop the heartbeat thread and cleanup""" + logger.debug(f"Stopping ClientManager for channel {self.channel_id}") + self._heartbeat_running = False + # Give the thread a moment to exit gracefully + # Note: We don't join() here because it's a daemon thread and will exit on its own + def _execute_redis_command(self, command_func): """Execute Redis command with error handling""" if not self.redis_client: @@ -355,16 +341,30 @@ def remove_client(self, client_id): self._notify_owner_of_activity() - # Publish client disconnected event - event_data = json.dumps({ - "event": EventType.CLIENT_DISCONNECTED, # Use constant instead of string - "channel_id": self.channel_id, - "client_id": client_id, - "worker_id": self.worker_id or "unknown", - "timestamp": time.time(), - "remaining_clients": remaining - }) - self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data) + # Check if we're the owner - if so, handle locally; if not, publish event + am_i_owner = self.proxy_server and self.proxy_server.am_i_owner(self.channel_id) + + if am_i_owner: + # We're the owner - handle the disconnect directly + logger.debug(f"Owner handling CLIENT_DISCONNECTED for client {client_id} locally (not publishing)") + if remaining == 0: + # Trigger shutdown check directly via ProxyServer method + logger.debug(f"No clients left - triggering immediate shutdown check") + # Spawn greenlet to avoid blocking + import gevent + gevent.spawn(self.proxy_server.handle_client_disconnect, self.channel_id) + else: + # We're not the owner - publish event so owner can handle it + logger.debug(f"Non-owner publishing CLIENT_DISCONNECTED event for client {client_id} on channel {self.channel_id} from worker {self.worker_id}") + event_data = json.dumps({ + "event": EventType.CLIENT_DISCONNECTED, + "channel_id": self.channel_id, + "client_id": client_id, + "worker_id": self.worker_id or "unknown", + "timestamp": time.time(), + "remaining_clients": remaining + }) + self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data) # Trigger channel stats update via WebSocket self._trigger_stats_update() diff --git a/apps/proxy/ts_proxy/config_helper.py b/apps/proxy/ts_proxy/config_helper.py index d59fa1f9e..d7d335586 100644 --- a/apps/proxy/ts_proxy/config_helper.py +++ b/apps/proxy/ts_proxy/config_helper.py @@ -100,3 +100,12 @@ def buffering_speed(): def channel_init_grace_period(): """Get channel initialization grace period in seconds""" return Config.get_channel_init_grace_period() + + @staticmethod + def chunk_timeout(): + """ + Get chunk timeout in seconds (used for both socket and HTTP read timeouts). + This controls how long we wait for each chunk before timing out. + Set this higher (e.g., 30s) for slow providers that may have intermittent delays. + """ + return ConfigHelper.get('CHUNK_TIMEOUT', 5) # Default 5 seconds diff --git a/apps/proxy/ts_proxy/constants.py b/apps/proxy/ts_proxy/constants.py index a72cbfc5b..7baa9e1c7 100644 --- a/apps/proxy/ts_proxy/constants.py +++ b/apps/proxy/ts_proxy/constants.py @@ -33,6 +33,8 @@ class EventType: # Stream types class StreamType: HLS = "hls" + RTSP = "rtsp" + UDP = "udp" TS = "ts" UNKNOWN = "unknown" diff --git a/apps/proxy/ts_proxy/http_streamer.py b/apps/proxy/ts_proxy/http_streamer.py new file mode 100644 index 000000000..147d2c931 --- /dev/null +++ b/apps/proxy/ts_proxy/http_streamer.py @@ -0,0 +1,138 @@ +""" +HTTP Stream Reader - Thread-based HTTP stream reader that writes to a pipe. +This allows us to use the same fetch_chunk() path for both transcode and HTTP streams. +""" + +import threading +import os +import requests +from requests.adapters import HTTPAdapter +from .utils import get_logger + +logger = get_logger() + + +class HTTPStreamReader: + """Thread-based HTTP stream reader that writes to a pipe""" + + def __init__(self, url, user_agent=None, chunk_size=8192): + self.url = url + self.user_agent = user_agent + self.chunk_size = chunk_size + self.session = None + self.response = None + self.thread = None + self.pipe_read = None + self.pipe_write = None + self.running = False + + def start(self): + """Start the HTTP stream reader thread""" + # Create a pipe (works on Windows and Unix) + self.pipe_read, self.pipe_write = os.pipe() + + # Start the reader thread + self.running = True + self.thread = threading.Thread(target=self._read_stream, daemon=True) + self.thread.start() + + logger.info(f"Started HTTP stream reader thread for {self.url}") + return self.pipe_read + + def _read_stream(self): + """Thread worker that reads HTTP stream and writes to pipe""" + try: + # Build headers + headers = {} + if self.user_agent: + headers['User-Agent'] = self.user_agent + + logger.info(f"HTTP reader connecting to {self.url}") + + # Create session + self.session = requests.Session() + + # Disable retries for faster failure detection + adapter = HTTPAdapter(max_retries=0, pool_connections=1, pool_maxsize=1) + self.session.mount('http://', adapter) + self.session.mount('https://', adapter) + + # Stream the URL + self.response = self.session.get( + self.url, + headers=headers, + stream=True, + timeout=(5, 30) # 5s connect, 30s read + ) + + if self.response.status_code != 200: + logger.error(f"HTTP {self.response.status_code} from {self.url}") + return + + logger.info(f"HTTP reader connected successfully, streaming data...") + + # Stream chunks to pipe + chunk_count = 0 + for chunk in self.response.iter_content(chunk_size=self.chunk_size): + if not self.running: + break + + if chunk: + try: + # Write binary data to pipe + os.write(self.pipe_write, chunk) + chunk_count += 1 + + # Log progress periodically + if chunk_count % 1000 == 0: + logger.debug(f"HTTP reader streamed {chunk_count} chunks") + except OSError as e: + logger.error(f"Pipe write error: {e}") + break + + logger.info("HTTP stream ended") + + except requests.exceptions.RequestException as e: + logger.error(f"HTTP reader request error: {e}") + except Exception as e: + logger.error(f"HTTP reader unexpected error: {e}", exc_info=True) + finally: + self.running = False + # Close write end of pipe to signal EOF + try: + if self.pipe_write is not None: + os.close(self.pipe_write) + self.pipe_write = None + except: + pass + + def stop(self): + """Stop the HTTP stream reader""" + logger.info("Stopping HTTP stream reader") + self.running = False + + # Close response + if self.response: + try: + self.response.close() + except: + pass + + # Close session + if self.session: + try: + self.session.close() + except: + pass + + # Close write end of pipe + if self.pipe_write is not None: + try: + os.close(self.pipe_write) + self.pipe_write = None + except: + pass + + # Wait for thread + if self.thread and self.thread.is_alive(): + self.thread.join(timeout=2.0) diff --git a/apps/proxy/ts_proxy/server.py b/apps/proxy/ts_proxy/server.py index da5daaa7e..db5b3d57f 100644 --- a/apps/proxy/ts_proxy/server.py +++ b/apps/proxy/ts_proxy/server.py @@ -19,7 +19,7 @@ from typing import Dict, Optional, Set from apps.proxy.config import TSConfig as Config from apps.channels.models import Channel, Stream -from core.utils import RedisClient +from core.utils import RedisClient, log_system_event from redis.exceptions import ConnectionError, TimeoutError from .stream_manager import StreamManager from .stream_buffer import StreamBuffer @@ -131,6 +131,8 @@ def event_listener(): max_retries = 10 base_retry_delay = 1 # Start with 1 second delay max_retry_delay = 30 # Cap at 30 seconds + pubsub_client = None + pubsub = None while True: try: @@ -192,35 +194,11 @@ def event_listener(): self.redis_client.delete(disconnect_key) elif event_type == EventType.CLIENT_DISCONNECTED: - logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}") - # Check if any clients remain - if channel_id in self.client_managers: - # VERIFY REDIS CLIENT COUNT DIRECTLY - client_set_key = RedisKeys.clients(channel_id) - total = self.redis_client.scard(client_set_key) or 0 - - if total == 0: - logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}") - # Set the disconnect timer for other workers to see - disconnect_key = RedisKeys.last_client_disconnect(channel_id) - self.redis_client.setex(disconnect_key, 60, str(time.time())) - - # Get configured shutdown delay or default - shutdown_delay = ConfigHelper.channel_shutdown_delay() - - if shutdown_delay > 0: - logger.info(f"Waiting {shutdown_delay}s before stopping channel...") - gevent.sleep(shutdown_delay) # REPLACE: time.sleep(shutdown_delay) - - # Re-check client count before stopping - total = self.redis_client.scard(client_set_key) or 0 - if total > 0: - logger.info(f"New clients connected during shutdown delay - aborting shutdown") - self.redis_client.delete(disconnect_key) - return - - # Stop the channel directly - self.stop_channel(channel_id) + client_id = data.get("client_id") + worker_id = data.get("worker_id") + logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}, client {client_id} from worker {worker_id}") + # Delegate to dedicated method + self.handle_client_disconnect(channel_id) elif event_type == EventType.STREAM_SWITCH: @@ -339,20 +317,27 @@ def event_listener(): logger.error(f"Error in event listener: {e}. Retrying in {final_delay:.1f}s (attempt {retry_count})") gevent.sleep(final_delay) # REPLACE: time.sleep(final_delay) - # Try to clean up the old connection - try: - if 'pubsub' in locals(): - pubsub.close() - if 'pubsub_client' in locals(): - pubsub_client.close() - except: - pass - except Exception as e: logger.error(f"Error in event listener: {e}") # Add a short delay to prevent rapid retries on persistent errors gevent.sleep(5) # REPLACE: time.sleep(5) + finally: + # Always clean up PubSub connections in all error paths + try: + if pubsub: + pubsub.close() + pubsub = None + except Exception as e: + logger.debug(f"Error closing pubsub: {e}") + + try: + if pubsub_client: + pubsub_client.close() + pubsub_client = None + except Exception as e: + logger.debug(f"Error closing pubsub_client: {e}") + thread = threading.Thread(target=event_listener, daemon=True) thread.name = "redis-event-listener" thread.start() @@ -486,17 +471,18 @@ def initialize_channel(self, url, channel_id, user_agent=None, transcode=False, ) return True - # Create buffer and client manager instances - buffer = StreamBuffer(channel_id, redis_client=self.redis_client) - client_manager = ClientManager( - channel_id, - redis_client=self.redis_client, - worker_id=self.worker_id - ) + # Create buffer and client manager instances (or reuse if they exist) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Store in local tracking - self.stream_buffers[channel_id] = buffer - self.client_managers[channel_id] = client_manager + if channel_id not in self.client_managers: + client_manager = ClientManager( + channel_id, + redis_client=self.redis_client, + worker_id=self.worker_id + ) + self.client_managers[channel_id] = client_manager # IMPROVED: Set initializing state in Redis BEFORE any other operations if self.redis_client: @@ -550,13 +536,15 @@ def initialize_channel(self, url, channel_id, user_agent=None, transcode=False, logger.info(f"Channel {channel_id} already owned by worker {current_owner}") logger.info(f"This worker ({self.worker_id}) will read from Redis buffer only") - # Create buffer but not stream manager - buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) - self.stream_buffers[channel_id] = buffer + # Create buffer but not stream manager (only if not already exists) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Create client manager with channel_id and redis_client - client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) - self.client_managers[channel_id] = client_manager + # Create client manager with channel_id and redis_client (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) + self.client_managers[channel_id] = client_manager return True @@ -571,13 +559,15 @@ def initialize_channel(self, url, channel_id, user_agent=None, transcode=False, # Another worker just acquired ownership logger.info(f"Another worker just acquired ownership of channel {channel_id}") - # Create buffer but not stream manager - buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) - self.stream_buffers[channel_id] = buffer + # Create buffer but not stream manager (only if not already exists) + if channel_id not in self.stream_buffers: + buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client) + self.stream_buffers[channel_id] = buffer - # Create client manager with channel_id and redis_client - client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) - self.client_managers[channel_id] = client_manager + # Create client manager with channel_id and redis_client (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id) + self.client_managers[channel_id] = client_manager return True @@ -596,7 +586,7 @@ def initialize_channel(self, url, channel_id, user_agent=None, transcode=False, if channel_user_agent: metadata["user_agent"] = channel_user_agent - # CRITICAL FIX: Make sure stream_id is always set in metadata and properly logged + # Make sure stream_id is always set in metadata and properly logged if channel_stream_id: metadata["stream_id"] = str(channel_stream_id) logger.info(f"Storing stream_id {channel_stream_id} in metadata for channel {channel_id}") @@ -632,13 +622,37 @@ def initialize_channel(self, url, channel_id, user_agent=None, transcode=False, logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}") self.stream_managers[channel_id] = stream_manager - # Create client manager with channel_id, redis_client AND worker_id - client_manager = ClientManager( - channel_id=channel_id, - redis_client=self.redis_client, - worker_id=self.worker_id - ) - self.client_managers[channel_id] = client_manager + # Log channel start event + try: + channel_obj = Channel.objects.get(uuid=channel_id) + + # Get stream name if stream_id is available + stream_name = None + if channel_stream_id: + try: + stream_obj = Stream.objects.get(id=channel_stream_id) + stream_name = stream_obj.name + except Exception: + pass + + log_system_event( + 'channel_start', + channel_id=channel_id, + channel_name=channel_obj.name, + stream_name=stream_name, + stream_id=channel_stream_id + ) + except Exception as e: + logger.error(f"Could not log channel start event: {e}") + + # Create client manager with channel_id, redis_client AND worker_id (only if not already exists) + if channel_id not in self.client_managers: + client_manager = ClientManager( + channel_id=channel_id, + redis_client=self.redis_client, + worker_id=self.worker_id + ) + self.client_managers[channel_id] = client_manager # Start stream manager thread only for the owner thread = threading.Thread(target=stream_manager.run, daemon=True) @@ -688,9 +702,10 @@ def check_if_channel_exists(self, channel_id): state = metadata.get(b'state', b'unknown').decode('utf-8') owner = metadata.get(b'owner', b'').decode('utf-8') - # States that indicate the channel is running properly + # States that indicate the channel is running properly or shutting down valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS, - ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING] + ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING, + ChannelState.STOPPING] # If the channel is in a valid state, check if the owner is still active if state in valid_states: @@ -703,12 +718,24 @@ def check_if_channel_exists(self, channel_id): else: # This is a zombie channel - owner is gone but metadata still exists logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active") + + # Check if there are any clients connected + client_set_key = RedisKeys.clients(channel_id) + client_count = self.redis_client.scard(client_set_key) or 0 + + if client_count > 0: + logger.warning(f"Zombie channel {channel_id} has {client_count} clients - attempting ownership takeover") + # Could potentially take ownership here in the future + # For now, just clean it up to be safe + else: + logger.warning(f"Zombie channel {channel_id} has no clients - cleaning up") + self._clean_zombie_channel(channel_id, metadata) return False - elif state in [ChannelState.STOPPING, ChannelState.STOPPED, ChannelState.ERROR]: - # These states indicate the channel should be reinitialized - logger.info(f"Channel {channel_id} exists but in terminal state: {state}") - return True + elif state in [ChannelState.STOPPED, ChannelState.ERROR]: + # These terminal states indicate the channel should be cleaned up and reinitialized + logger.info(f"Channel {channel_id} in terminal state {state} - returning False to trigger cleanup") + return False else: # Unknown or initializing state, check how long it's been in this state if b'state_changed_at' in metadata: @@ -772,6 +799,44 @@ def _clean_zombie_channel(self, channel_id, metadata=None): logger.error(f"Error cleaning zombie channel {channel_id}: {e}", exc_info=True) return False + def handle_client_disconnect(self, channel_id): + """ + Handle client disconnect event - check if channel should shut down. + Can be called directly by owner or via PubSub from non-owner workers. + """ + if channel_id not in self.client_managers: + return + + try: + # VERIFY REDIS CLIENT COUNT DIRECTLY + client_set_key = RedisKeys.clients(channel_id) + total = self.redis_client.scard(client_set_key) or 0 + + if total == 0: + logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}") + # Set the disconnect timer for other workers to see + disconnect_key = RedisKeys.last_client_disconnect(channel_id) + self.redis_client.setex(disconnect_key, 60, str(time.time())) + + # Get configured shutdown delay or default + shutdown_delay = ConfigHelper.channel_shutdown_delay() + + if shutdown_delay > 0: + logger.info(f"Waiting {shutdown_delay}s before stopping channel...") + gevent.sleep(shutdown_delay) + + # Re-check client count before stopping + total = self.redis_client.scard(client_set_key) or 0 + if total > 0: + logger.info(f"New clients connected during shutdown delay - aborting shutdown") + self.redis_client.delete(disconnect_key) + return + + # Stop the channel directly + self.stop_channel(channel_id) + except Exception as e: + logger.error(f"Error handling client disconnect for channel {channel_id}: {e}") + def stop_channel(self, channel_id): """Stop a channel with proper ownership handling""" try: @@ -819,6 +884,41 @@ def stop_channel(self, channel_id): self.release_ownership(channel_id) logger.info(f"Released ownership of channel {channel_id}") + # Log channel stop event (after cleanup, before releasing ownership section ends) + try: + channel_obj = Channel.objects.get(uuid=channel_id) + + # Calculate runtime and get total bytes from metadata + runtime = None + total_bytes = None + if self.redis_client: + metadata_key = RedisKeys.channel_metadata(channel_id) + metadata = self.redis_client.hgetall(metadata_key) + if metadata: + # Calculate runtime from init_time + if b'init_time' in metadata: + try: + init_time = float(metadata[b'init_time'].decode('utf-8')) + runtime = round(time.time() - init_time, 2) + except Exception: + pass + # Get total bytes transferred + if b'total_bytes' in metadata: + try: + total_bytes = int(metadata[b'total_bytes'].decode('utf-8')) + except Exception: + pass + + log_system_event( + 'channel_stop', + channel_id=channel_id, + channel_name=channel_obj.name, + runtime=runtime, + total_bytes=total_bytes + ) + except Exception as e: + logger.error(f"Could not log channel stop event: {e}") + # Always clean up local resources - WITH SAFE CHECKS if channel_id in self.stream_managers: del self.stream_managers[channel_id] @@ -846,6 +946,10 @@ def stop_channel(self, channel_id): # Clean up client manager - SAFE CHECK HERE TOO if channel_id in self.client_managers: try: + client_manager = self.client_managers[channel_id] + # Stop the heartbeat thread before deleting + if hasattr(client_manager, 'stop'): + client_manager.stop() del self.client_managers[channel_id] logger.info(f"Removed client manager for channel {channel_id}") except KeyError: @@ -920,6 +1024,15 @@ def cleanup_task(): if channel_id in self.client_managers: client_manager = self.client_managers[channel_id] total_clients = client_manager.get_total_client_count() + else: + # This can happen during reconnection attempts or crashes + # Check Redis directly for any connected clients + if self.redis_client: + client_set_key = RedisKeys.clients(channel_id) + total_clients = self.redis_client.scard(client_set_key) or 0 + + if total_clients == 0: + logger.warning(f"Channel {channel_id} is missing client_manager but we're the owner with 0 clients - will trigger cleanup") # Log client count periodically if time.time() % 30 < 1: # Every ~30 seconds @@ -927,7 +1040,14 @@ def cleanup_task(): # If in connecting or waiting_for_clients state, check grace period if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]: - # Get connection ready time from metadata + # Check if channel is already stopping + if self.redis_client: + stop_key = RedisKeys.channel_stopping(channel_id) + if self.redis_client.exists(stop_key): + logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown") + continue + + # Get connection_ready_time from metadata (indicates if channel reached ready state) connection_ready_time = None if metadata and b'connection_ready_time' in metadata: try: @@ -935,17 +1055,60 @@ def cleanup_task(): except (ValueError, TypeError): pass - # If still connecting, give it more time - if channel_state == ChannelState.CONNECTING: - logger.debug(f"Channel {channel_id} still connecting - not checking for clients yet") - continue - - # If waiting for clients, check grace period - if connection_ready_time: + if total_clients == 0: + # Check if we have a connection_attempt timestamp (set when CONNECTING starts) + connection_attempt_time = None + attempt_key = RedisKeys.connection_attempt(channel_id) + if self.redis_client: + attempt_value = self.redis_client.get(attempt_key) + if attempt_value: + try: + connection_attempt_time = float(attempt_value.decode('utf-8')) + except (ValueError, TypeError): + pass + + # Also get init time as a fallback + init_time = None + if metadata and b'init_time' in metadata: + try: + init_time = float(metadata[b'init_time'].decode('utf-8')) + except (ValueError, TypeError): + pass + + # Use whichever timestamp we have (prefer connection_attempt as it's more recent) + start_time = connection_attempt_time or init_time + + if start_time: + # Check which timeout to apply based on channel lifecycle + if connection_ready_time: + # Already reached ready - use shutdown_delay + time_since_ready = time.time() - connection_ready_time + shutdown_delay = ConfigHelper.channel_shutdown_delay() + + if time_since_ready > shutdown_delay: + logger.warning( + f"Channel {channel_id} in {channel_state} state with 0 clients for {time_since_ready:.1f}s " + f"(after reaching ready, shutdown_delay: {shutdown_delay}s) - stopping channel" + ) + self.stop_channel(channel_id) + continue + else: + # Never reached ready - use grace_period timeout + time_since_start = time.time() - start_time + connecting_timeout = ConfigHelper.channel_init_grace_period() + + if time_since_start > connecting_timeout: + logger.warning( + f"Channel {channel_id} stuck in {channel_state} state for {time_since_start:.1f}s " + f"with no clients (timeout: {connecting_timeout}s) - stopping channel due to upstream issues" + ) + self.stop_channel(channel_id) + continue + elif connection_ready_time: + # We have clients now, but check grace period for state transition grace_period = ConfigHelper.channel_init_grace_period() time_since_ready = time.time() - connection_ready_time - # Add this debug log logger.debug(f"GRACE PERIOD CHECK: Channel {channel_id} in {channel_state} state, " f"time_since_ready={time_since_ready:.1f}s, grace_period={grace_period}s, " f"total_clients={total_clients}") @@ -954,16 +1117,9 @@ def cleanup_task(): # Still within grace period logger.debug(f"Channel {channel_id} in grace period - {time_since_ready:.1f}s of {grace_period}s elapsed") continue - elif total_clients == 0: - # Grace period expired with no clients - logger.info(f"Grace period expired ({time_since_ready:.1f}s > {grace_period}s) with no clients - stopping channel {channel_id}") - self.stop_channel(channel_id) else: - # Grace period expired but we have clients - mark channel as active + # Grace period expired with clients - mark channel as active logger.info(f"Grace period expired with {total_clients} clients - marking channel {channel_id} as active") - old_state = "unknown" - if metadata and b'state' in metadata: - old_state = metadata[b'state'].decode('utf-8') if self.update_channel_state(channel_id, ChannelState.ACTIVE, { "grace_period_ended_at": str(time.time()), "clients_at_activation": str(total_clients) @@ -971,6 +1127,13 @@ def cleanup_task(): logger.info(f"Channel {channel_id} activated with {total_clients} clients after grace period") # If active and no clients, start normal shutdown procedure elif channel_state not in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS] and total_clients == 0: + # Check if channel is already stopping + if self.redis_client: + stop_key = RedisKeys.channel_stopping(channel_id) + if self.redis_client.exists(stop_key): + logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown") + continue + # Check if there's a pending no-clients timeout disconnect_key = RedisKeys.last_client_disconnect(channel_id) disconnect_time = None @@ -1030,14 +1193,30 @@ def cleanup_task(): continue # Check for local client count - if zero, clean up our local resources - if self.client_managers[channel_id].get_client_count() == 0: - # We're not the owner, and we have no local clients - clean up our resources - logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources") + if channel_id in self.client_managers: + if self.client_managers[channel_id].get_client_count() == 0: + # We're not the owner, and we have no local clients - clean up our resources + logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources") + self._cleanup_local_resources(channel_id) + else: + # This shouldn't happen, but clean up anyway + logger.warning(f"Non-owner cleanup: Channel {channel_id} has no client_manager entry, cleaning up local resources") self._cleanup_local_resources(channel_id) except Exception as e: logger.error(f"Error in cleanup thread: {e}", exc_info=True) + # Periodically check for orphaned channels (every 30 seconds) + if hasattr(self, '_last_orphan_check'): + if time.time() - self._last_orphan_check > 30: + try: + self._check_orphaned_metadata() + self._last_orphan_check = time.time() + except Exception as orphan_error: + logger.error(f"Error checking orphaned metadata: {orphan_error}", exc_info=True) + else: + self._last_orphan_check = time.time() + gevent.sleep(ConfigHelper.cleanup_check_interval()) # REPLACE: time.sleep(ConfigHelper.cleanup_check_interval()) thread = threading.Thread(target=cleanup_task, daemon=True) @@ -1059,10 +1238,6 @@ def _check_orphaned_channels(self): try: channel_id = key.decode('utf-8').split(':')[2] - # Skip channels we already have locally - if channel_id in self.stream_buffers: - continue - # Check if this channel has an owner owner = self.get_channel_owner(channel_id) @@ -1077,13 +1252,84 @@ def _check_orphaned_channels(self): else: # Orphaned channel with no clients - clean it up logger.info(f"Cleaning up orphaned channel {channel_id}") - self._clean_redis_keys(channel_id) + + # If we have it locally, stop it properly to clean up processes + if channel_id in self.stream_managers or channel_id in self.client_managers: + logger.info(f"Orphaned channel {channel_id} is local - calling stop_channel") + self.stop_channel(channel_id) + else: + # Just clean up Redis keys for remote channels + self._clean_redis_keys(channel_id) except Exception as e: logger.error(f"Error processing channel key {key}: {e}") except Exception as e: logger.error(f"Error checking orphaned channels: {e}") + def _check_orphaned_metadata(self): + """ + Check for metadata entries that have no owner and no clients. + This catches zombie channels that weren't cleaned up properly. + """ + if not self.redis_client: + return + + try: + # Get all channel metadata keys + channel_pattern = "ts_proxy:channel:*:metadata" + channel_keys = self.redis_client.keys(channel_pattern) + + for key in channel_keys: + try: + channel_id = key.decode('utf-8').split(':')[2] + + # Get metadata first + metadata = self.redis_client.hgetall(key) + if not metadata: + # Empty metadata - clean it up + logger.warning(f"Found empty metadata for channel {channel_id} - cleaning up") + # If we have it locally, stop it properly + if channel_id in self.stream_managers or channel_id in self.client_managers: + self.stop_channel(channel_id) + else: + self._clean_redis_keys(channel_id) + continue + + # Get owner + owner = metadata.get(b'owner', b'').decode('utf-8') if b'owner' in metadata else '' + + # Check if owner is still alive + owner_alive = False + if owner: + owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" + owner_alive = self.redis_client.exists(owner_heartbeat_key) + + # Check client count + client_set_key = RedisKeys.clients(channel_id) + client_count = self.redis_client.scard(client_set_key) or 0 + + # If no owner and no clients, clean it up + if not owner_alive and client_count == 0: + state = metadata.get(b'state', b'unknown').decode('utf-8') if b'state' in metadata else 'unknown' + logger.warning(f"Found orphaned metadata for channel {channel_id} (state: {state}, owner: {owner}, clients: {client_count}) - cleaning up") + + # If we have it locally, stop it properly to clean up transcode/proxy processes + if channel_id in self.stream_managers or channel_id in self.client_managers: + logger.info(f"Channel {channel_id} is local - calling stop_channel to clean up processes") + self.stop_channel(channel_id) + else: + # Just clean up Redis keys for remote channels + self._clean_redis_keys(channel_id) + elif not owner_alive and client_count > 0: + # Owner is gone but clients remain - just log for now + logger.warning(f"Found orphaned channel {channel_id} with {client_count} clients but no owner - may need ownership takeover") + + except Exception as e: + logger.error(f"Error processing metadata key {key}: {e}", exc_info=True) + + except Exception as e: + logger.error(f"Error checking orphaned metadata: {e}", exc_info=True) + def _clean_redis_keys(self, channel_id): """Clean up all Redis keys for a channel more efficiently""" # Release the channel, stream, and profile keys from the channel diff --git a/apps/proxy/ts_proxy/services/channel_service.py b/apps/proxy/ts_proxy/services/channel_service.py index 932479eae..6484cd3ff 100644 --- a/apps/proxy/ts_proxy/services/channel_service.py +++ b/apps/proxy/ts_proxy/services/channel_service.py @@ -14,6 +14,7 @@ from ..redis_keys import RedisKeys from ..constants import EventType, ChannelState, ChannelMetadataField from ..url_utils import get_stream_info_for_switch +from core.utils import log_system_event logger = logging.getLogger("ts_proxy") @@ -597,32 +598,41 @@ def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, f @staticmethod def _update_stream_stats_in_db(stream_id, **stats): """Update stream stats in database""" + from django.db import connection + try: from apps.channels.models import Stream from django.utils import timezone - + stream = Stream.objects.get(id=stream_id) - + # Get existing stats or create new dict current_stats = stream.stream_stats or {} - + # Update with new stats for key, value in stats.items(): if value is not None: current_stats[key] = value - + # Save updated stats and timestamp stream.stream_stats = current_stats stream.stream_stats_updated_at = timezone.now() stream.save(update_fields=['stream_stats', 'stream_stats_updated_at']) - + logger.debug(f"Updated stream stats in database for stream {stream_id}: {stats}") return True - + except Exception as e: logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}") return False + finally: + # Always close database connection after update + try: + connection.close() + except Exception: + pass + # Helper methods for Redis operations @staticmethod @@ -678,7 +688,7 @@ def _publish_stream_switch_event(channel_id, new_url, user_agent=None, stream_id switch_request = { "event": EventType.STREAM_SWITCH, - "channel_id": channel_id, + "channel_id": str(channel_id), "url": new_url, "user_agent": user_agent, "stream_id": stream_id, @@ -691,6 +701,7 @@ def _publish_stream_switch_event(channel_id, new_url, user_agent=None, stream_id RedisKeys.events_channel(channel_id), json.dumps(switch_request) ) + return True @staticmethod @@ -703,7 +714,7 @@ def _publish_channel_stop_event(channel_id): stop_request = { "event": EventType.CHANNEL_STOP, - "channel_id": channel_id, + "channel_id": str(channel_id), "requester_worker_id": proxy_server.worker_id, "timestamp": time.time() } @@ -726,7 +737,7 @@ def _publish_client_stop_event(channel_id, client_id): stop_request = { "event": EventType.CLIENT_STOP, - "channel_id": channel_id, + "channel_id": str(channel_id), "client_id": client_id, "requester_worker_id": proxy_server.worker_id, "timestamp": time.time() diff --git a/apps/proxy/ts_proxy/stream_buffer.py b/apps/proxy/ts_proxy/stream_buffer.py index a5169c3a1..85feb5ddb 100644 --- a/apps/proxy/ts_proxy/stream_buffer.py +++ b/apps/proxy/ts_proxy/stream_buffer.py @@ -303,6 +303,14 @@ def get_optimized_client_data(self, client_index): # Retrieve chunks chunks = self.get_chunks_exact(client_index, chunk_count) + # Check if we got significantly fewer chunks than expected (likely due to expiration) + # Only check if we expected multiple chunks and got none or very few + if chunk_count > 3 and len(chunks) == 0 and chunks_behind > 10: + # Chunks are missing - likely expired from Redis + # Return empty list to signal client should skip forward + logger.debug(f"Chunks missing for client at index {client_index}, buffer at {self.index} ({chunks_behind} behind)") + return [], client_index + # Check total size total_size = sum(len(c) for c in chunks) @@ -316,7 +324,7 @@ def get_optimized_client_data(self, client_index): additional_size = sum(len(c) for c in more_chunks) if total_size + additional_size <= MAX_SIZE: chunks.extend(more_chunks) - chunk_count += additional + chunk_count += len(more_chunks) # Fixed: count actual additional chunks retrieved return chunks, client_index + chunk_count diff --git a/apps/proxy/ts_proxy/stream_generator.py b/apps/proxy/ts_proxy/stream_generator.py index 817a7b820..50404f1df 100644 --- a/apps/proxy/ts_proxy/stream_generator.py +++ b/apps/proxy/ts_proxy/stream_generator.py @@ -8,6 +8,8 @@ import threading import gevent # Add this import at the top of your file from apps.proxy.config import TSConfig as Config +from apps.channels.models import Channel +from core.utils import log_system_event from .server import ProxyServer from .utils import create_ts_packet, get_logger from .redis_keys import RedisKeys @@ -52,6 +54,10 @@ def __init__(self, channel_id, client_id, client_ip, client_user_agent, channel_ self.last_stats_bytes = 0 self.current_rate = 0.0 + # TTL refresh tracking + self.last_ttl_refresh = time.time() + self.ttl_refresh_interval = 3 # Refresh TTL every 3 seconds of active streaming + def generate(self): """ Generator function that produces the stream content for the client. @@ -84,6 +90,20 @@ def generate(self): if not self._setup_streaming(): return + # Log client connect event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'client_connect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + client_ip=self.client_ip, + client_id=self.client_id, + user_agent=self.client_user_agent[:100] if self.client_user_agent else None + ) + except Exception as e: + logger.error(f"Could not log client connect event: {e}") + # Main streaming loop for chunk in self._stream_data_generator(): yield chunk @@ -204,6 +224,18 @@ def _stream_data_generator(self): self.empty_reads += 1 self.consecutive_empty += 1 + # Check if we're too far behind (chunks expired from Redis) + chunks_behind = self.buffer.index - self.local_index + if chunks_behind > 50: # If more than 50 chunks behind, jump forward + # Calculate new position: stay a few chunks behind current buffer + initial_behind = ConfigHelper.initial_behind_chunks() + new_index = max(self.local_index, self.buffer.index - initial_behind) + + logger.warning(f"[{self.client_id}] Client too far behind ({chunks_behind} chunks), jumping from {self.local_index} to {new_index}") + self.local_index = new_index + self.consecutive_empty = 0 # Reset since we're repositioning + continue # Try again immediately with new position + if self._should_send_keepalive(self.local_index): keepalive_packet = create_ts_packet('keepalive') logger.debug(f"[{self.client_id}] Sending keepalive packet while waiting at buffer head") @@ -324,7 +356,20 @@ def _process_chunks(self, chunks, next_index): ChannelMetadataField.STATS_UPDATED_AT: str(current_time) } proxy_server.redis_client.hset(client_key, mapping=stats) - # No need to set expiration as client heartbeat will refresh this key + + # Refresh TTL periodically while actively streaming + # This provides proof-of-life independent of heartbeat thread + if current_time - self.last_ttl_refresh > self.ttl_refresh_interval: + try: + # Refresh TTL on client key + proxy_server.redis_client.expire(client_key, Config.CLIENT_RECORD_TTL) + # Also refresh the client set TTL + client_set_key = f"ts_proxy:channel:{self.channel_id}:clients" + proxy_server.redis_client.expire(client_set_key, Config.CLIENT_RECORD_TTL) + self.last_ttl_refresh = current_time + logger.debug(f"[{self.client_id}] Refreshed client TTL (active streaming)") + except Exception as ttl_error: + logger.debug(f"[{self.client_id}] Failed to refresh TTL: {ttl_error}") except Exception as e: logger.warning(f"[{self.client_id}] Failed to store stats in Redis: {e}") @@ -410,6 +455,22 @@ def _cleanup(self): total_clients = client_manager.get_total_client_count() logger.info(f"[{self.client_id}] Disconnected after {elapsed:.2f}s (local: {local_clients}, total: {total_clients})") + # Log client disconnect event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'client_disconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + client_ip=self.client_ip, + client_id=self.client_id, + user_agent=self.client_user_agent[:100] if self.client_user_agent else None, + duration=round(elapsed, 2), + bytes_sent=self.bytes_sent + ) + except Exception as e: + logger.error(f"Could not log client disconnect event: {e}") + # Schedule channel shutdown if no clients left if not stream_released: # Only if we haven't already released the stream self._schedule_channel_shutdown_if_needed(local_clients) diff --git a/apps/proxy/ts_proxy/stream_manager.py b/apps/proxy/ts_proxy/stream_manager.py index e80d4527e..bbeb4bb75 100644 --- a/apps/proxy/ts_proxy/stream_manager.py +++ b/apps/proxy/ts_proxy/stream_manager.py @@ -9,11 +9,14 @@ import gevent import re from typing import Optional, List +from django.db import connection from django.shortcuts import get_object_or_404 +from urllib3.exceptions import ReadTimeoutError from apps.proxy.config import TSConfig as Config from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile from core.models import UserAgent, CoreSettings +from core.utils import log_system_event from .stream_buffer import StreamBuffer from .utils import detect_stream_type, get_logger from .redis_keys import RedisKeys @@ -91,11 +94,13 @@ def __init__(self, channel_id, url, buffer, user_agent=None, transcode=False, st self.tried_stream_ids.add(self.current_stream_id) logger.info(f"Loaded stream ID {self.current_stream_id} from Redis for channel {buffer.channel_id}") else: - logger.warning(f"No stream_id found in Redis for channel {channel_id}") + logger.warning(f"No stream_id found in Redis for channel {channel_id}. " + f"Stream switching will rely on URL comparison to avoid selecting the same stream.") except Exception as e: logger.warning(f"Error loading stream ID from Redis: {e}") else: - logger.warning(f"Unable to get stream ID for channel {channel_id} - stream switching may not work correctly") + logger.warning(f"Unable to get stream ID for channel {channel_id}. " + f"Stream switching will rely on URL comparison to avoid selecting the same stream.") logger.info(f"Initialized stream manager for channel {buffer.channel_id}") @@ -111,6 +116,9 @@ def __init__(self, channel_id, url, buffer, user_agent=None, transcode=False, st self.stderr_reader_thread = None self.ffmpeg_input_phase = True # Track if we're still reading input info + # Add HTTP reader thread property + self.http_reader = None + def _create_session(self): """Create and configure requests session with optimal settings""" session = requests.Session() @@ -220,11 +228,12 @@ def run(self): # Continue with normal flow # Check stream type before connecting - stream_type = detect_stream_type(self.url) - if self.transcode == False and stream_type == StreamType.HLS: - logger.info(f"Detected HLS stream: {self.url} for channel {self.channel_id}") - logger.info(f"HLS streams will be handled with FFmpeg for now - future version will support HLS natively for channel {self.channel_id}") - # Enable transcoding for HLS streams + self.stream_type = detect_stream_type(self.url) + if self.transcode == False and self.stream_type in (StreamType.HLS, StreamType.RTSP, StreamType.UDP): + stream_type_name = "HLS" if self.stream_type == StreamType.HLS else ("RTSP/RTP" if self.stream_type == StreamType.RTSP else "UDP") + logger.info(f"Detected {stream_type_name} stream: {self.url} for channel {self.channel_id}") + logger.info(f"{stream_type_name} streams require FFmpeg for channel {self.channel_id}") + # Enable transcoding for HLS, RTSP/RTP, and UDP streams self.transcode = True # We'll override the stream profile selection with ffmpeg in the transcoding section self.force_ffmpeg = True @@ -252,6 +261,20 @@ def run(self): # Store connection start time to measure success duration connection_start_time = time.time() + # Log reconnection event if this is a retry (not first attempt) + if self.retry_count > 0: + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_reconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + attempt=self.retry_count + 1, + max_attempts=self.max_retries + ) + except Exception as e: + logger.error(f"Could not log reconnection event: {e}") + # Successfully connected - read stream data until disconnect/error self._process_stream_data() # If we get here, the connection was closed/failed @@ -281,6 +304,20 @@ def run(self): if self.retry_count >= self.max_retries: url_failed = True logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url} for channel: {self.channel_id}") + + # Log connection error event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_error', + channel_id=self.channel_id, + channel_name=channel_obj.name, + error_type='connection_failed', + url=self.url[:100] if self.url else None, + attempts=self.max_retries + ) + except Exception as e: + logger.error(f"Could not log connection error event: {e}") else: # Wait with exponential backoff before retrying timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds @@ -294,6 +331,21 @@ def run(self): if self.retry_count >= self.max_retries: url_failed = True + + # Log connection error event with exception details + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_error', + channel_id=self.channel_id, + channel_name=channel_obj.name, + error_type='connection_exception', + error_message=str(e)[:200], + url=self.url[:100] if self.url else None, + attempts=self.max_retries + ) + except Exception as log_error: + logger.error(f"Could not log connection error event: {log_error}") else: # Wait with exponential backoff before retrying timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds @@ -378,6 +430,12 @@ def run(self): except Exception as e: logger.error(f"Failed to update channel state in Redis: {e} for channel {self.channel_id}", exc_info=True) + # Close database connection for this thread + try: + connection.close() + except Exception: + pass + logger.info(f"Stream manager stopped for channel {self.channel_id}") def _establish_transcode_connection(self): @@ -407,7 +465,7 @@ def _establish_transcode_connection(self): from core.models import StreamProfile try: stream_profile = StreamProfile.objects.get(name='ffmpeg', locked=True) - logger.info("Using FFmpeg stream profile for HLS content") + logger.info("Using FFmpeg stream profile for unsupported proxy content (HLS/RTSP/UDP)") except StreamProfile.DoesNotExist: # Fall back to channel's profile if FFmpeg not found stream_profile = channel.get_stream_profile() @@ -417,6 +475,13 @@ def _establish_transcode_connection(self): # Build and start transcode command self.transcode_cmd = stream_profile.build_command(self.url, self.user_agent) + + # For UDP streams, remove any user_agent parameters from the command + if hasattr(self, 'stream_type') and self.stream_type == StreamType.UDP: + # Filter out any arguments that contain the user_agent value or related headers + self.transcode_cmd = [arg for arg in self.transcode_cmd if self.user_agent not in arg and 'user-agent' not in arg.lower() and 'user_agent' not in arg.lower()] + logger.debug(f"Removed user_agent parameters from UDP stream command for channel: {self.channel_id}") + logger.debug(f"Starting transcode process: {self.transcode_cmd} for channel: {self.channel_id}") # Modified to capture stderr instead of discarding it @@ -681,6 +746,19 @@ def _parse_ffmpeg_stats(self, stats_line): # Reset buffering state self.buffering = False self.buffering_start_time = None + + # Log failover event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_failover', + channel_id=self.channel_id, + channel_name=channel_obj.name, + reason='buffering_timeout', + duration=buffering_duration + ) + except Exception as e: + logger.error(f"Could not log failover event: {e}") else: logger.error(f"Failed to switch to next stream for channel {self.channel_id} after buffering timeout") else: @@ -688,6 +766,19 @@ def _parse_ffmpeg_stats(self, stats_line): self.buffering = True self.buffering_start_time = time.time() logger.warning(f"Buffering started for channel {self.channel_id} - speed: {ffmpeg_speed}x") + + # Log system event for buffering + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_buffering', + channel_id=self.channel_id, + channel_name=channel_obj.name, + speed=ffmpeg_speed + ) + except Exception as e: + logger.error(f"Could not log buffering event: {e}") + # Log buffering warning logger.debug(f"FFmpeg speed on channel {self.channel_id} is below {self.buffering_speed} ({ffmpeg_speed}x) - buffering detected") # Set channel state to buffering @@ -737,9 +828,9 @@ def _update_ffmpeg_stats_in_redis(self, speed, fps, actual_fps, output_bitrate): def _establish_http_connection(self): - """Establish a direct HTTP connection to the stream""" + """Establish HTTP connection using thread-based reader (same as transcode path)""" try: - logger.debug(f"Using TS Proxy to connect to stream: {self.url}") + logger.debug(f"Using HTTP streamer thread to connect to stream: {self.url}") # Check if we already have active HTTP connections if self.current_response or self.current_session: @@ -756,41 +847,39 @@ def _establish_http_connection(self): logger.debug(f"Closing existing transcode process before establishing HTTP connection for channel {self.channel_id}") self._close_socket() - # Create new session for each connection attempt - session = self._create_session() - self.current_session = session + # Use HTTPStreamReader to fetch stream and pipe to a readable file descriptor + # This allows us to use the same fetch_chunk() path as transcode + from .http_streamer import HTTPStreamReader - # Stream the URL with proper timeout handling - response = session.get( - self.url, - stream=True, - timeout=(10, 60) # 10s connect timeout, 60s read timeout + # Create and start the HTTP stream reader + self.http_reader = HTTPStreamReader( + url=self.url, + user_agent=self.user_agent, + chunk_size=self.chunk_size ) - self.current_response = response - if response.status_code == 200: - self.connected = True - self.healthy = True - logger.info(f"Successfully connected to stream source for channel {self.channel_id}") + # Start the reader thread and get the read end of the pipe + pipe_fd = self.http_reader.start() - # Store connection start time for stability tracking - self.connection_start_time = time.time() + # Wrap the file descriptor in a file object (same as transcode stdout) + import os + self.socket = os.fdopen(pipe_fd, 'rb', buffering=0) + self.connected = True + self.healthy = True - # Set channel state to waiting for clients - self._set_waiting_for_clients() + logger.info(f"Successfully started HTTP streamer thread for channel {self.channel_id}") + + # Store connection start time for stability tracking + self.connection_start_time = time.time() + + # Set channel state to waiting for clients + self._set_waiting_for_clients() + + return True - return True - else: - logger.error(f"Failed to connect to stream for channel {self.channel_id}: HTTP {response.status_code}") - self._close_connection() - return False - except requests.exceptions.RequestException as e: - logger.error(f"HTTP request error: {e}") - self._close_connection() - return False except Exception as e: logger.error(f"Error establishing HTTP connection for channel {self.channel_id}: {e}", exc_info=True) - self._close_connection() + self._close_socket() return False def _update_bytes_processed(self, chunk_size): @@ -818,48 +907,19 @@ def _update_bytes_processed(self, chunk_size): logger.error(f"Error updating bytes processed: {e}") def _process_stream_data(self): - """Process stream data until disconnect or error""" + """Process stream data until disconnect or error - unified path for both transcode and HTTP""" try: - if self.transcode: - # Handle transcoded stream data - while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch: - if self.fetch_chunk(): - self.last_data_time = time.time() - else: - if not self.running: - break - gevent.sleep(0.1) # REPLACE time.sleep(0.1) - else: - # Handle direct HTTP connection - chunk_count = 0 - try: - for chunk in self.current_response.iter_content(chunk_size=self.chunk_size): - # Check if we've been asked to stop - if self.stop_requested or self.url_switching or self.needs_stream_switch: - break - - if chunk: - # Track chunk size before adding to buffer - chunk_size = len(chunk) - self._update_bytes_processed(chunk_size) - - # Add chunk to buffer with TS packet alignment - success = self.buffer.add_chunk(chunk) - - if success: - self.last_data_time = time.time() - chunk_count += 1 - - # Update last data timestamp in Redis - if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: - last_data_key = RedisKeys.last_data(self.buffer.channel_id) - self.buffer.redis_client.set(last_data_key, str(time.time()), ex=60) - except (AttributeError, ConnectionError) as e: - if self.stop_requested or self.url_switching: - logger.debug(f"Expected connection error during shutdown/URL switch for channel {self.channel_id}: {e}") - else: - logger.error(f"Unexpected stream error for channel {self.channel_id}: {e}") - raise + # Both transcode and HTTP now use the same subprocess/socket approach + # This gives us perfect control: check flags between chunks, timeout just returns False + while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch: + if self.fetch_chunk(): + self.last_data_time = time.time() + else: + # fetch_chunk() returned False - could be timeout, no data, or error + if not self.running: + break + # Brief sleep before retry to avoid tight loop + gevent.sleep(0.1) except Exception as e: logger.error(f"Error processing stream data for channel {self.channel_id}: {e}", exc_info=True) @@ -948,6 +1008,7 @@ def update_url(self, new_url, stream_id=None, m3u_profile_id=None): # Import both models for proper resource management from apps.channels.models import Stream, Channel + from django.db import connection # Update stream profile if we're switching streams if self.current_stream_id and stream_id and self.current_stream_id != stream_id: @@ -965,9 +1026,17 @@ def update_url(self, new_url, stream_id=None, m3u_profile_id=None): logger.debug(f"Updated m3u profile for channel {self.channel_id} to use profile from stream {stream_id}") else: logger.warning(f"Failed to update stream profile for channel {self.channel_id}") + except Exception as e: logger.error(f"Error updating stream profile for channel {self.channel_id}: {e}") + finally: + # Always close database connection after profile update + try: + connection.close() + except Exception: + pass + # CRITICAL: Set a flag to prevent immediate reconnection with old URL self.url_switching = True self.url_switch_start_time = time.time() @@ -1005,6 +1074,19 @@ def update_url(self, new_url, stream_id=None, m3u_profile_id=None): except Exception as e: logger.warning(f"Failed to reset buffer position: {e}") + # Log stream switch event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'stream_switch', + channel_id=self.channel_id, + channel_name=channel_obj.name, + new_url=new_url[:100] if new_url else None, + stream_id=stream_id + ) + except Exception as e: + logger.error(f"Could not log stream switch event: {e}") + return True except Exception as e: logger.error(f"Error during URL update for channel {self.channel_id}: {e}", exc_info=True) @@ -1123,6 +1205,19 @@ def _attempt_reconnect(self): if connection_result: self.connection_start_time = time.time() logger.info(f"Reconnect successful for channel {self.channel_id}") + + # Log reconnection event + try: + channel_obj = Channel.objects.get(uuid=self.channel_id) + log_system_event( + 'channel_reconnect', + channel_id=self.channel_id, + channel_name=channel_obj.name, + reason='health_monitor' + ) + except Exception as e: + logger.error(f"Could not log reconnection event: {e}") + return True else: logger.warning(f"Reconnect failed for channel {self.channel_id}") @@ -1183,6 +1278,15 @@ def _close_socket(self): if self.current_response or self.current_session: self._close_connection() + # Stop HTTP reader thread if it exists + if hasattr(self, 'http_reader') and self.http_reader: + try: + logger.debug(f"Stopping HTTP reader thread for channel {self.channel_id}") + self.http_reader.stop() + self.http_reader = None + except Exception as e: + logger.debug(f"Error stopping HTTP reader for channel {self.channel_id}: {e}") + # Otherwise handle socket and transcode resources if self.socket: try: @@ -1191,25 +1295,17 @@ def _close_socket(self): logger.debug(f"Error closing socket for channel {self.channel_id}: {e}") pass - # Enhanced transcode process cleanup with more aggressive termination + # Enhanced transcode process cleanup with immediate termination if self.transcode_process: try: - # First try polite termination - logger.debug(f"Terminating transcode process for channel {self.channel_id}") - self.transcode_process.terminate() + logger.debug(f"Killing transcode process for channel {self.channel_id}") + self.transcode_process.kill() - # Give it a short time to terminate gracefully + # Give it a very short time to die try: - self.transcode_process.wait(timeout=1.0) + self.transcode_process.wait(timeout=0.5) except subprocess.TimeoutExpired: - # If it doesn't terminate quickly, kill it - logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully for channel {self.channel_id}") - self.transcode_process.kill() - - try: - self.transcode_process.wait(timeout=1.0) - except subprocess.TimeoutExpired: - logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}") + logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}") except Exception as e: logger.debug(f"Error terminating transcode process for channel {self.channel_id}: {e}") @@ -1219,6 +1315,30 @@ def _close_socket(self): except Exception as e: logger.error(f"Final kill attempt failed for channel {self.channel_id}: {e}") + # Explicitly close all subprocess pipes to prevent file descriptor leaks + try: + if self.transcode_process.stdin: + self.transcode_process.stdin.close() + if self.transcode_process.stdout: + self.transcode_process.stdout.close() + if self.transcode_process.stderr: + self.transcode_process.stderr.close() + logger.debug(f"Closed all subprocess pipes for channel {self.channel_id}") + except Exception as e: + logger.debug(f"Error closing subprocess pipes for channel {self.channel_id}: {e}") + + # Join stderr reader thread to ensure it's fully terminated + if hasattr(self, 'stderr_reader_thread') and self.stderr_reader_thread and self.stderr_reader_thread.is_alive(): + try: + logger.debug(f"Waiting for stderr reader thread to terminate for channel {self.channel_id}") + self.stderr_reader_thread.join(timeout=2.0) + if self.stderr_reader_thread.is_alive(): + logger.warning(f"Stderr reader thread did not terminate within timeout for channel {self.channel_id}") + except Exception as e: + logger.debug(f"Error joining stderr reader thread for channel {self.channel_id}: {e}") + finally: + self.stderr_reader_thread = None + self.transcode_process = None self.transcode_process_active = False # Reset the flag @@ -1250,7 +1370,7 @@ def fetch_chunk(self): try: # Set timeout for chunk reads - chunk_timeout = ConfigHelper.get('CHUNK_TIMEOUT', 10) # Default 10 seconds + chunk_timeout = ConfigHelper.chunk_timeout() # Use centralized timeout configuration try: # Handle different socket types with timeout @@ -1333,7 +1453,17 @@ def _set_waiting_for_clients(self): # Only update if not already past connecting if not current_state or current_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]: # NEW CODE: Check if buffer has enough chunks - current_buffer_index = getattr(self.buffer, 'index', 0) + # IMPORTANT: Read from Redis, not local buffer.index, because in multi-worker setup + # each worker has its own StreamBuffer instance with potentially stale local index + buffer_index_key = RedisKeys.buffer_index(channel_id) + current_buffer_index = 0 + try: + redis_index = redis_client.get(buffer_index_key) + if redis_index: + current_buffer_index = int(redis_index) + except Exception as e: + logger.error(f"Error reading buffer index from Redis: {e}") + initial_chunks_needed = ConfigHelper.initial_behind_chunks() if current_buffer_index < initial_chunks_needed: @@ -1381,10 +1511,21 @@ def _check_buffer_and_set_state(self): # Clean up completed timers self._buffer_check_timers = [t for t in self._buffer_check_timers if t.is_alive()] - if hasattr(self.buffer, 'index') and hasattr(self.buffer, 'channel_id'): - current_buffer_index = self.buffer.index - initial_chunks_needed = getattr(Config, 'INITIAL_BEHIND_CHUNKS', 10) + if hasattr(self.buffer, 'channel_id') and hasattr(self.buffer, 'redis_client'): channel_id = self.buffer.channel_id + redis_client = self.buffer.redis_client + + # IMPORTANT: Read from Redis, not local buffer.index + buffer_index_key = RedisKeys.buffer_index(channel_id) + current_buffer_index = 0 + try: + redis_index = redis_client.get(buffer_index_key) + if redis_index: + current_buffer_index = int(redis_index) + except Exception as e: + logger.error(f"Error reading buffer index from Redis: {e}") + + initial_chunks_needed = ConfigHelper.initial_behind_chunks() # Use ConfigHelper for consistency if current_buffer_index >= initial_chunks_needed: # We now have enough buffer, call _set_waiting_for_clients again @@ -1409,6 +1550,7 @@ def _check_buffer_and_set_state(self): def _try_next_stream(self): """ Try to switch to the next available stream for this channel. + Will iterate through multiple alternate streams if needed to find one with a different URL. Returns: bool: True if successfully switched to a new stream, False otherwise @@ -1434,60 +1576,71 @@ def _try_next_stream(self): logger.warning(f"All {len(alternate_streams)} alternate streams have been tried for channel {self.channel_id}") return False - # Get the next stream to try - next_stream = untried_streams[0] - stream_id = next_stream['stream_id'] - profile_id = next_stream['profile_id'] # This is the M3U profile ID we need + # IMPROVED: Try multiple streams until we find one with a different URL + for next_stream in untried_streams: + stream_id = next_stream['stream_id'] + profile_id = next_stream['profile_id'] # This is the M3U profile ID we need - # Add to tried streams - self.tried_stream_ids.add(stream_id) + # Add to tried streams + self.tried_stream_ids.add(stream_id) - # Get stream info including URL using the profile_id we already have - logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}") - stream_info = get_stream_info_for_switch(self.channel_id, stream_id) + # Get stream info including URL using the profile_id we already have + logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}") + stream_info = get_stream_info_for_switch(self.channel_id, stream_id) - if 'error' in stream_info or not stream_info.get('url'): - logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}") - return False + if 'error' in stream_info or not stream_info.get('url'): + logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}") + continue # Try next stream instead of giving up - # Update URL and user agent - new_url = stream_info['url'] - new_user_agent = stream_info['user_agent'] - new_transcode = stream_info['transcode'] + # Update URL and user agent + new_url = stream_info['url'] + new_user_agent = stream_info['user_agent'] + new_transcode = stream_info['transcode'] - logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}") + # CRITICAL FIX: Check if the new URL is the same as current URL + # This can happen when current_stream_id is None and we accidentally select the same stream + if new_url == self.url: + logger.warning(f"Stream ID {stream_id} generates the same URL as current stream ({new_url}). " + f"Skipping this stream and trying next alternative.") + continue # Try next stream instead of giving up - # IMPORTANT: Just update the URL, don't stop the channel or release resources - switch_result = self.update_url(new_url, stream_id, profile_id) - if not switch_result: - logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}") - return False + logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}") - # Update stream ID tracking - self.current_stream_id = stream_id + # IMPORTANT: Just update the URL, don't stop the channel or release resources + switch_result = self.update_url(new_url, stream_id, profile_id) + if not switch_result: + logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}") + continue # Try next stream - # Store the new user agent and transcode settings - self.user_agent = new_user_agent - self.transcode = new_transcode + # Update stream ID tracking + self.current_stream_id = stream_id - # Update stream metadata in Redis - use the profile_id we got from get_alternate_streams - if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: - metadata_key = RedisKeys.channel_metadata(self.channel_id) - self.buffer.redis_client.hset(metadata_key, mapping={ - ChannelMetadataField.URL: new_url, - ChannelMetadataField.USER_AGENT: new_user_agent, - ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'], - ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams - ChannelMetadataField.STREAM_ID: str(stream_id), - ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()), - ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded" - }) - - # Log the switch - logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}") - - logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}") - return True + # Store the new user agent and transcode settings + self.user_agent = new_user_agent + self.transcode = new_transcode + + # Update stream metadata in Redis - use the profile_id we got from get_alternate_streams + if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client: + metadata_key = RedisKeys.channel_metadata(self.channel_id) + self.buffer.redis_client.hset(metadata_key, mapping={ + ChannelMetadataField.URL: new_url, + ChannelMetadataField.USER_AGENT: new_user_agent, + ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'], + ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams + ChannelMetadataField.STREAM_ID: str(stream_id), + ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()), + ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded" + }) + + # Log the switch + logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}") + + logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}") + return True + + # If we get here, we tried all streams but none worked + logger.error(f"Tried {len(untried_streams)} alternate streams but none were suitable for channel {self.channel_id}") + return False except Exception as e: logger.error(f"Error trying next stream for channel {self.channel_id}: {e}", exc_info=True) diff --git a/apps/proxy/ts_proxy/url_utils.py b/apps/proxy/ts_proxy/url_utils.py index 75e7653ee..3b05c9f29 100644 --- a/apps/proxy/ts_proxy/url_utils.py +++ b/apps/proxy/ts_proxy/url_utils.py @@ -8,7 +8,7 @@ from django.shortcuts import get_object_or_404 from apps.channels.models import Channel, Stream from apps.m3u.models import M3UAccount, M3UAccountProfile -from core.models import UserAgent, CoreSettings +from core.models import UserAgent, CoreSettings, StreamProfile from .utils import get_logger from uuid import UUID import requests @@ -26,16 +26,100 @@ def get_stream_object(id: str): def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]]: """ - Generate the appropriate stream URL for a channel based on its profile settings. + Generate the appropriate stream URL for a channel or stream based on its profile settings. Args: - channel_id: The UUID of the channel + channel_id: The UUID of the channel or stream hash Returns: Tuple[str, str, bool, Optional[int]]: (stream_url, user_agent, transcode_flag, profile_id) """ try: - channel = get_stream_object(channel_id) + channel_or_stream = get_stream_object(channel_id) + + # Handle direct stream preview (custom streams) + if isinstance(channel_or_stream, Stream): + from core.utils import RedisClient + + stream = channel_or_stream + logger.info(f"Previewing stream directly: {stream.id} ({stream.name})") + + # For custom streams, we need to get the M3U account and profile + m3u_account = stream.m3u_account + if not m3u_account: + logger.error(f"Stream {stream.id} has no M3U account") + return None, None, False, None + + # Get active profiles for this M3U account + m3u_profiles = m3u_account.profiles.filter(is_active=True) + default_profile = next((obj for obj in m3u_profiles if obj.is_default), None) + + if not default_profile: + logger.error(f"No default active profile found for M3U account {m3u_account.id}") + return None, None, False, None + + # Check profiles in order: default first, then others + profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default] + + # Try to find an available profile with connection capacity + redis_client = RedisClient.get_client() + selected_profile = None + + for profile in profiles: + logger.info(profile) + + # Check connection availability + if redis_client: + profile_connections_key = f"profile_connections:{profile.id}" + current_connections = int(redis_client.get(profile_connections_key) or 0) + + # Check if profile has available slots (or unlimited connections) + if profile.max_streams == 0 or current_connections < profile.max_streams: + selected_profile = profile + logger.debug(f"Selected profile {profile.id} with {current_connections}/{profile.max_streams} connections for stream preview") + break + else: + logger.debug(f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}") + else: + # No Redis available, use first active profile + selected_profile = profile + break + + if not selected_profile: + logger.error(f"No profiles available with connection capacity for M3U account {m3u_account.id}") + return None, None, False, None + + # Get the appropriate user agent + stream_user_agent = m3u_account.get_user_agent().user_agent + if stream_user_agent is None: + stream_user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id()) + logger.debug(f"No user agent found for account, using default: {stream_user_agent}") + + # Get stream URL with the selected profile's URL transformation + stream_url = transform_url(stream.url, selected_profile.search_pattern, selected_profile.replace_pattern) + + # Check if the stream has its own stream_profile set, otherwise use default + if stream.stream_profile: + stream_profile = stream.stream_profile + logger.debug(f"Using stream's own stream profile: {stream_profile.name}") + else: + stream_profile = StreamProfile.objects.get( + id=CoreSettings.get_default_stream_profile_id() + ) + logger.debug(f"Using default stream profile: {stream_profile.name}") + + # Check if transcoding is needed + if stream_profile.is_proxy() or stream_profile is None: + transcode = False + else: + transcode = True + + stream_profile_id = stream_profile.id + + return stream_url, stream_user_agent, transcode, stream_profile_id + + # Handle channel preview (existing logic) + channel = channel_or_stream # Get stream and profile for this channel # Note: get_stream now returns 3 values (stream_id, profile_id, error_reason) @@ -351,6 +435,9 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): """ Validate if a stream URL is accessible without downloading the full content. + Note: UDP/RTP/RTSP streams are automatically considered valid as they cannot + be validated via HTTP methods. + Args: url (str): The URL to validate user_agent (str): User agent to use for the request @@ -359,6 +446,12 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)): Returns: tuple: (is_valid, final_url, status_code, message) """ + # Check if URL uses non-HTTP protocols (UDP/RTP/RTSP) + # These cannot be validated via HTTP methods, so we skip validation + if url.startswith(('udp://', 'rtp://', 'rtsp://')): + logger.info(f"Skipping HTTP validation for non-HTTP protocol: {url}") + return True, url, 200, "Non-HTTP protocol (UDP/RTP/RTSP) - validation skipped" + try: # Create session with proper headers session = requests.Session() diff --git a/apps/proxy/ts_proxy/utils.py b/apps/proxy/ts_proxy/utils.py index b568b804f..20a6e1409 100644 --- a/apps/proxy/ts_proxy/utils.py +++ b/apps/proxy/ts_proxy/utils.py @@ -7,19 +7,27 @@ def detect_stream_type(url): """ - Detect if stream URL is HLS or TS format. + Detect if stream URL is HLS, RTSP/RTP, UDP, or TS format. Args: url (str): The stream URL to analyze Returns: - str: 'hls' or 'ts' depending on detected format + str: 'hls', 'rtsp', 'udp', or 'ts' depending on detected format """ if not url: return 'unknown' url_lower = url.lower() + # Check for UDP streams (requires FFmpeg) + if url_lower.startswith('udp://'): + return 'udp' + + # Check for RTSP/RTP streams (requires FFmpeg) + if url_lower.startswith('rtsp://') or url_lower.startswith('rtp://'): + return 'rtsp' + # Look for common HLS indicators if (url_lower.endswith('.m3u8') or '.m3u8?' in url_lower or diff --git a/apps/proxy/ts_proxy/views.py b/apps/proxy/ts_proxy/views.py index e31d0418e..91f254a77 100644 --- a/apps/proxy/ts_proxy/views.py +++ b/apps/proxy/ts_proxy/views.py @@ -4,7 +4,7 @@ import random import re import pathlib -from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect +from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect, HttpResponse from django.views.decorators.csrf import csrf_exempt from django.shortcuts import get_object_or_404 from apps.proxy.config import TSConfig as Config @@ -84,11 +84,18 @@ def stream_ts(request, channel_id): if state_field in metadata: channel_state = metadata[state_field].decode("utf-8") - if channel_state: - # Channel is being initialized or already active - no need for reinitialization + # Active/running states - channel is operational, don't reinitialize + if channel_state in [ + ChannelState.ACTIVE, + ChannelState.WAITING_FOR_CLIENTS, + ChannelState.BUFFERING, + ChannelState.INITIALIZING, + ChannelState.CONNECTING, + ChannelState.STOPPING, + ]: needs_initialization = False logger.debug( - f"[{client_id}] Channel {channel_id} already in state {channel_state}, skipping initialization" + f"[{client_id}] Channel {channel_id} in state {channel_state}, skipping initialization" ) # Special handling for initializing/connecting states @@ -98,19 +105,34 @@ def stream_ts(request, channel_id): ]: channel_initializing = True logger.debug( - f"[{client_id}] Channel {channel_id} is still initializing, client will wait for completion" + f"[{client_id}] Channel {channel_id} is still initializing, client will wait" ) + # Terminal states - channel needs cleanup before reinitialization + elif channel_state in [ + ChannelState.ERROR, + ChannelState.STOPPED, + ]: + needs_initialization = True + logger.info( + f"[{client_id}] Channel {channel_id} in terminal state {channel_state}, will reinitialize" + ) + # Unknown/empty state - check if owner is alive else: - # Only check for owner if channel is in a valid state owner_field = ChannelMetadataField.OWNER.encode("utf-8") if owner_field in metadata: owner = metadata[owner_field].decode("utf-8") owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat" if proxy_server.redis_client.exists(owner_heartbeat_key): - # Owner is still active, so we don't need to reinitialize + # Owner is still active with unknown state - don't reinitialize needs_initialization = False logger.debug( - f"[{client_id}] Channel {channel_id} has active owner {owner}" + f"[{client_id}] Channel {channel_id} has active owner {owner}, skipping init" + ) + else: + # Owner dead - needs reinitialization + needs_initialization = True + logger.warning( + f"[{client_id}] Channel {channel_id} owner {owner} is dead, will reinitialize" ) # Start initialization if needed @@ -128,7 +150,7 @@ def stream_ts(request, channel_id): ChannelService.stop_channel(channel_id) # Use fixed retry interval and timeout - retry_timeout = 1.5 # 1.5 seconds total timeout + retry_timeout = 3 # 3 seconds total timeout retry_interval = 0.1 # 100ms between attempts wait_start_time = time.time() @@ -138,9 +160,10 @@ def stream_ts(request, channel_id): profile_value = None error_reason = None attempt = 0 + should_retry = True # Try to get a stream with fixed interval retries - while time.time() - wait_start_time < retry_timeout: + while should_retry and time.time() - wait_start_time < retry_timeout: attempt += 1 stream_url, stream_user_agent, transcode, profile_value = ( generate_stream_url(channel_id) @@ -152,35 +175,53 @@ def stream_ts(request, channel_id): ) break - # If we failed because there are no streams assigned, don't retry - _, _, error_reason = channel.get_stream() - if error_reason and "maximum connection limits" not in error_reason: - logger.warning( - f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}" - ) - break + # On first failure, check if the error is retryable + if attempt == 1: + _, _, error_reason = channel.get_stream() + if error_reason and "maximum connection limits" not in error_reason: + logger.warning( + f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}" + ) + should_retry = False + break - # Wait 100ms before retrying + # Check if we have time remaining for another sleep cycle elapsed_time = time.time() - wait_start_time remaining_time = retry_timeout - elapsed_time - if remaining_time > retry_interval: + + # If we don't have enough time for the next sleep interval, break + # but only after we've already made an attempt (the while condition will try one more time) + if remaining_time <= retry_interval: + logger.info( + f"[{client_id}] Insufficient time ({remaining_time:.1f}s) for another sleep cycle, will make one final attempt" + ) + break + + # Wait before retrying + logger.info( + f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)" + ) + gevent.sleep(retry_interval) + retry_interval += 0.025 # Increase wait time by 25ms for next attempt + + # Make one final attempt if we still don't have a stream, should retry, and haven't exceeded timeout + if stream_url is None and should_retry and time.time() - wait_start_time < retry_timeout: + attempt += 1 + logger.info( + f"[{client_id}] Making final attempt {attempt} at timeout boundary" + ) + stream_url, stream_user_agent, transcode, profile_value = ( + generate_stream_url(channel_id) + ) + if stream_url is not None: logger.info( - f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)" + f"[{client_id}] Successfully obtained stream on final attempt for channel {channel_id}" ) - gevent.sleep(retry_interval) - retry_interval += 0.025 # Increase wait time by 25ms for next attempt if stream_url is None: - # Make sure to release any stream locks that might have been acquired - if hasattr(channel, "streams") and channel.streams.exists(): - for stream in channel.streams.all(): - try: - stream.release_stream() - logger.info( - f"[{client_id}] Released stream {stream.id} for channel {channel_id}" - ) - except Exception as e: - logger.error(f"[{client_id}] Error releasing stream: {e}") + # Release the channel's stream lock if one was acquired + # Note: Only call this if get_stream() actually assigned a stream + # In our case, if stream_url is None, no stream was ever assigned, so don't release # Get the specific error message if available wait_duration = f"{int(time.time() - wait_start_time)}s" @@ -189,6 +230,9 @@ def stream_ts(request, channel_id): if error_reason else "No available streams for this channel" ) + logger.info( + f"[{client_id}] Failed to obtain stream after {attempt} attempts over {wait_duration}: {error_msg}" + ) return JsonResponse( {"error": error_msg, "waited": wait_duration}, status=503 ) # 503 Service Unavailable is appropriate here @@ -270,6 +314,15 @@ def stream_ts(request, channel_id): logger.info( f"[{client_id}] Redirecting to validated URL: {final_url} ({message})" ) + + # For non-HTTP protocols (RTSP/RTP/UDP), we need to manually create the redirect + # because Django's HttpResponseRedirect blocks them for security + if final_url.startswith(('rtsp://', 'rtp://', 'udp://')): + logger.info(f"[{client_id}] Using manual redirect for non-HTTP protocol") + response = HttpResponse(status=301) + response['Location'] = final_url + return response + return HttpResponseRedirect(final_url) else: logger.error( @@ -474,24 +527,33 @@ def stream_xc(request, username, password, channel_id): print(f"Fetchin channel with ID: {channel_id}") if user.user_level < 10: - filters = { - "id": int(channel_id), - "channelprofilemembership__enabled": True, - "user_level__lte": user.user_level, - } - - if user.channel_profiles.count() > 0: - channel_profiles = user.channel_profiles.all() - filters["channelprofilemembership__channel_profile__in"] = channel_profiles + user_profile_count = user.channel_profiles.count() + + # If user has ALL profiles or NO profiles, give unrestricted access + if user_profile_count == 0: + # No profile filtering - user sees all channels based on user_level + filters = { + "id": int(channel_id), + "user_level__lte": user.user_level + } + channel = Channel.objects.filter(**filters).first() + else: + # User has specific limited profiles assigned + filters = { + "id": int(channel_id), + "channelprofilemembership__enabled": True, + "user_level__lte": user.user_level, + "channelprofilemembership__channel_profile__in": user.channel_profiles.all() + } + channel = Channel.objects.filter(**filters).distinct().first() - channel = Channel.objects.filter(**filters).distinct().first() if not channel: return JsonResponse({"error": "Not found"}, status=404) else: channel = get_object_or_404(Channel, id=channel_id) # @TODO: we've got the file 'type' via extension, support this when we support multiple outputs - return stream_ts(request._request, channel.uuid) + return stream_ts(request._request, str(channel.uuid)) @csrf_exempt diff --git a/apps/vod/api_urls.py b/apps/vod/api_urls.py index ffccc3f5f..e897bd28b 100644 --- a/apps/vod/api_urls.py +++ b/apps/vod/api_urls.py @@ -6,6 +6,7 @@ SeriesViewSet, VODCategoryViewSet, UnifiedContentViewSet, + VODLogoViewSet, ) app_name = 'vod' @@ -16,5 +17,6 @@ router.register(r'series', SeriesViewSet, basename='series') router.register(r'categories', VODCategoryViewSet, basename='vodcategory') router.register(r'all', UnifiedContentViewSet, basename='unified-content') +router.register(r'vodlogos', VODLogoViewSet, basename='vodlogo') urlpatterns = router.urls diff --git a/apps/vod/api_views.py b/apps/vod/api_views.py index 517038a63..8cc55a119 100644 --- a/apps/vod/api_views.py +++ b/apps/vod/api_views.py @@ -3,23 +3,29 @@ from rest_framework.decorators import action from rest_framework.filters import SearchFilter, OrderingFilter from rest_framework.pagination import PageNumberPagination +from rest_framework.permissions import AllowAny from django_filters.rest_framework import DjangoFilterBackend from django.shortcuts import get_object_or_404 +from django.http import StreamingHttpResponse, HttpResponse, FileResponse +from django.db.models import Q import django_filters import logging +import os +import requests from apps.accounts.permissions import ( Authenticated, permission_classes_by_action, ) from .models import ( - Series, VODCategory, Movie, Episode, - M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation + Series, VODCategory, Movie, Episode, VODLogo, + M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation ) from .serializers import ( MovieSerializer, EpisodeSerializer, SeriesSerializer, VODCategorySerializer, + VODLogoSerializer, M3UMovieRelationSerializer, M3USeriesRelationSerializer, M3UEpisodeRelationSerializer @@ -470,6 +476,59 @@ def get_permissions(self): except KeyError: return [Authenticated()] + def list(self, request, *args, **kwargs): + """Override list to ensure Uncategorized categories and relations exist for all XC accounts with VOD enabled""" + from apps.m3u.models import M3UAccount + + # Ensure Uncategorized categories exist + movie_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="movie", + defaults={} + ) + + series_category, _ = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="series", + defaults={} + ) + + # Get all active XC accounts with VOD enabled + xc_accounts = M3UAccount.objects.filter( + account_type=M3UAccount.Types.XC, + is_active=True + ) + + for account in xc_accounts: + if account.custom_properties: + custom_props = account.custom_properties or {} + vod_enabled = custom_props.get("enable_vod", False) + + if vod_enabled: + # Ensure relations exist for this account + auto_enable_new = custom_props.get("auto_enable_new_groups_vod", True) + + M3UVODCategoryRelation.objects.get_or_create( + category=movie_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + M3UVODCategoryRelation.objects.get_or_create( + category=series_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + # Now proceed with normal list operation + return super().list(request, *args, **kwargs) + class UnifiedContentViewSet(viewsets.ReadOnlyModelViewSet): """ViewSet that combines Movies and Series for unified 'All' view""" @@ -564,7 +623,7 @@ def list(self, request, *args, **kwargs): logo.url as logo_url, 'movie' as content_type FROM vod_movie movies - LEFT JOIN dispatcharr_channels_logo logo ON movies.logo_id = logo.id + LEFT JOIN vod_vodlogo logo ON movies.logo_id = logo.id WHERE {where_conditions[0]} UNION ALL @@ -586,7 +645,7 @@ def list(self, request, *args, **kwargs): logo.url as logo_url, 'series' as content_type FROM vod_series series - LEFT JOIN dispatcharr_channels_logo logo ON series.logo_id = logo.id + LEFT JOIN vod_vodlogo logo ON series.logo_id = logo.id WHERE {where_conditions[1]} ) SELECT * FROM unified_content @@ -613,10 +672,10 @@ def list(self, request, *args, **kwargs): 'id': item_dict['logo_id'], 'name': item_dict['logo_name'], 'url': item_dict['logo_url'], - 'cache_url': f"/media/logo_cache/{item_dict['logo_id']}.png" if item_dict['logo_id'] else None, - 'channel_count': 0, # We don't need this for VOD - 'is_used': True, - 'channel_names': [] # We don't need this for VOD + 'cache_url': f"/api/vod/vodlogos/{item_dict['logo_id']}/cache/", + 'movie_count': 0, # We don't calculate this in raw SQL + 'series_count': 0, # We don't calculate this in raw SQL + 'is_used': True } # Convert to the format expected by frontend @@ -668,4 +727,173 @@ def list(self, request, *args, **kwargs): logger.error(f"Error in UnifiedContentViewSet.list(): {e}") import traceback logger.error(traceback.format_exc()) - return Response({'error': str(e)}, status=500) \ No newline at end of file + return Response({'error': str(e)}, status=500) + + +class VODLogoPagination(PageNumberPagination): + page_size = 100 + page_size_query_param = "page_size" + max_page_size = 1000 + + +class VODLogoViewSet(viewsets.ModelViewSet): + """ViewSet for VOD Logo management""" + queryset = VODLogo.objects.all() + serializer_class = VODLogoSerializer + pagination_class = VODLogoPagination + filter_backends = [SearchFilter, OrderingFilter] + search_fields = ['name', 'url'] + ordering_fields = ['name', 'id'] + ordering = ['name'] + + def get_permissions(self): + try: + return [perm() for perm in permission_classes_by_action[self.action]] + except KeyError: + if self.action == 'cache': + return [AllowAny()] + return [Authenticated()] + + def get_queryset(self): + """Optimize queryset with prefetch and add filtering""" + queryset = VODLogo.objects.prefetch_related('movie', 'series').order_by('name') + + # Filter by specific IDs + ids = self.request.query_params.getlist('ids') + if ids: + try: + id_list = [int(id_str) for id_str in ids if id_str.isdigit()] + if id_list: + queryset = queryset.filter(id__in=id_list) + except (ValueError, TypeError): + queryset = VODLogo.objects.none() + + # Filter by usage + used_filter = self.request.query_params.get('used', None) + if used_filter == 'true': + # Return logos that are used by movies OR series + queryset = queryset.filter( + Q(movie__isnull=False) | Q(series__isnull=False) + ).distinct() + elif used_filter == 'false': + # Return logos that are NOT used by either + queryset = queryset.filter( + movie__isnull=True, + series__isnull=True + ) + elif used_filter == 'movies': + # Return logos that are used by movies (may also be used by series) + queryset = queryset.filter(movie__isnull=False).distinct() + elif used_filter == 'series': + # Return logos that are used by series (may also be used by movies) + queryset = queryset.filter(series__isnull=False).distinct() + + + # Filter by name + name_query = self.request.query_params.get('name', None) + if name_query: + queryset = queryset.filter(name__icontains=name_query) + + # No pagination mode + if self.request.query_params.get('no_pagination', 'false').lower() == 'true': + self.pagination_class = None + + return queryset + + @action(detail=True, methods=["get"], permission_classes=[AllowAny]) + def cache(self, request, pk=None): + """Streams the VOD logo file, whether it's local or remote.""" + logo = self.get_object() + + if not logo.url: + return HttpResponse(status=404) + + # Check if this is a local file path + if logo.url.startswith('/data/'): + # It's a local file + file_path = logo.url + if not os.path.exists(file_path): + logger.error(f"VOD logo file not found: {file_path}") + return HttpResponse(status=404) + + try: + return FileResponse(open(file_path, 'rb'), content_type='image/png') + except Exception as e: + logger.error(f"Error serving VOD logo file {file_path}: {str(e)}") + return HttpResponse(status=500) + else: + # It's a remote URL - proxy it + try: + response = requests.get(logo.url, stream=True, timeout=10) + response.raise_for_status() + + content_type = response.headers.get('Content-Type', 'image/png') + + return StreamingHttpResponse( + response.iter_content(chunk_size=8192), + content_type=content_type + ) + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching remote VOD logo {logo.url}: {str(e)}") + return HttpResponse(status=404) + + @action(detail=False, methods=["delete"], url_path="bulk-delete") + def bulk_delete(self, request): + """Delete multiple VOD logos at once""" + logo_ids = request.data.get('logo_ids', []) + + if not logo_ids: + return Response( + {"error": "No logo IDs provided"}, + status=status.HTTP_400_BAD_REQUEST + ) + + try: + # Get logos to delete + logos = VODLogo.objects.filter(id__in=logo_ids) + deleted_count = logos.count() + + # Delete them + logos.delete() + + return Response({ + "deleted_count": deleted_count, + "message": f"Successfully deleted {deleted_count} VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during bulk VOD logo deletion: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + + @action(detail=False, methods=["post"]) + def cleanup(self, request): + """Delete all VOD logos that are not used by any movies or series""" + try: + # Find unused logos + unused_logos = VODLogo.objects.filter( + movie__isnull=True, + series__isnull=True + ) + + deleted_count = unused_logos.count() + logo_names = list(unused_logos.values_list('name', flat=True)) + + # Delete them + unused_logos.delete() + + logger.info(f"Cleaned up {deleted_count} unused VOD logos: {logo_names}") + + return Response({ + "deleted_count": deleted_count, + "deleted_logos": logo_names, + "message": f"Successfully deleted {deleted_count} unused VOD logo(s)" + }) + except Exception as e: + logger.error(f"Error during VOD logo cleanup: {str(e)}") + return Response( + {"error": str(e)}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR + ) + diff --git a/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py new file mode 100644 index 000000000..1bd2c4185 --- /dev/null +++ b/apps/vod/migrations/0003_vodlogo_alter_movie_logo_alter_series_logo.py @@ -0,0 +1,264 @@ +# Generated by Django 5.2.4 on 2025-11-06 23:01 + +import django.db.models.deletion +from django.db import migrations, models + + +def migrate_vod_logos_forward(apps, schema_editor): + """ + Migrate VOD logos from the Logo table to the new VODLogo table. + This copies all logos referenced by movies or series to VODLogo. + Uses pure SQL for maximum performance. + """ + from django.db import connection + + print("\n" + "="*80) + print("Starting VOD logo migration...") + print("="*80) + + with connection.cursor() as cursor: + # Step 1: Copy unique logos from Logo table to VODLogo table + # Only copy logos that are used by movies or series + print("Copying logos to VODLogo table...") + cursor.execute(""" + INSERT INTO vod_vodlogo (name, url) + SELECT DISTINCT l.name, l.url + FROM dispatcharr_channels_logo l + WHERE l.id IN ( + SELECT DISTINCT logo_id FROM vod_movie WHERE logo_id IS NOT NULL + UNION + SELECT DISTINCT logo_id FROM vod_series WHERE logo_id IS NOT NULL + ) + ON CONFLICT (url) DO NOTHING + """) + print(f"Created VODLogo entries") + + # Step 2: Update movies to point to VODLogo IDs using JOIN + print("Updating movie references...") + cursor.execute(""" + UPDATE vod_movie m + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE m.logo_id = l.id + AND m.logo_id IS NOT NULL + """) + movie_count = cursor.rowcount + print(f"Updated {movie_count} movies with new VOD logo references") + + # Step 3: Update series to point to VODLogo IDs using JOIN + print("Updating series references...") + cursor.execute(""" + UPDATE vod_series s + SET logo_id = v.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + WHERE s.logo_id = l.id + AND s.logo_id IS NOT NULL + """) + series_count = cursor.rowcount + print(f"Updated {series_count} series with new VOD logo references") + + print("="*80) + print("VOD logo migration completed successfully!") + print(f"Summary: Updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def migrate_vod_logos_backward(apps, schema_editor): + """ + Reverse migration - moves VODLogos back to Logo table. + This recreates Logo entries for all VODLogos and updates Movie/Series references. + """ + Logo = apps.get_model('dispatcharr_channels', 'Logo') + VODLogo = apps.get_model('vod', 'VODLogo') + Movie = apps.get_model('vod', 'Movie') + Series = apps.get_model('vod', 'Series') + + print("\n" + "="*80) + print("REVERSE: Moving VOD logos back to Logo table...") + print("="*80) + + # Get all VODLogos + vod_logos = VODLogo.objects.all() + print(f"Found {vod_logos.count()} VOD logos to reverse migrate") + + # Create Logo entries for each VODLogo + logos_to_create = [] + vod_to_logo_mapping = {} # VODLogo ID -> Logo ID + + for vod_logo in vod_logos: + # Check if a Logo with this URL already exists + existing_logo = Logo.objects.filter(url=vod_logo.url).first() + + if existing_logo: + # Logo already exists, just map to it + vod_to_logo_mapping[vod_logo.id] = existing_logo.id + print(f"Logo already exists for URL: {vod_logo.url[:50]}... (using existing)") + else: + # Create new Logo entry + new_logo = Logo(name=vod_logo.name, url=vod_logo.url) + logos_to_create.append(new_logo) + + # Bulk create new Logo entries + if logos_to_create: + print(f"Creating {len(logos_to_create)} new Logo entries...") + Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + print("Logo entries created") + + # Get the created Logo instances with their IDs + for vod_logo in vod_logos: + if vod_logo.id not in vod_to_logo_mapping: + try: + logo = Logo.objects.get(url=vod_logo.url) + vod_to_logo_mapping[vod_logo.id] = logo.id + except Logo.DoesNotExist: + print(f"Warning: Could not find Logo for URL: {vod_logo.url[:100]}...") + + print(f"Created mapping for {len(vod_to_logo_mapping)} VOD logos -> Logos") + + # Update movies to point back to Logo table + movie_count = 0 + for movie in Movie.objects.exclude(logo__isnull=True): + if movie.logo_id in vod_to_logo_mapping: + movie.logo_id = vod_to_logo_mapping[movie.logo_id] + movie.save(update_fields=['logo_id']) + movie_count += 1 + print(f"Updated {movie_count} movies to use Logo table") + + # Update series to point back to Logo table + series_count = 0 + for series in Series.objects.exclude(logo__isnull=True): + if series.logo_id in vod_to_logo_mapping: + series.logo_id = vod_to_logo_mapping[series.logo_id] + series.save(update_fields=['logo_id']) + series_count += 1 + print(f"Updated {series_count} series to use Logo table") + + # Delete VODLogos (they're now redundant) + vod_logo_count = vod_logos.count() + vod_logos.delete() + print(f"Deleted {vod_logo_count} VOD logos") + + print("="*80) + print("Reverse migration completed!") + print(f"Summary: Created/reused {len(vod_to_logo_mapping)} logos, updated {movie_count} movies and {series_count} series") + print("="*80 + "\n") + + +def cleanup_migrated_logos(apps, schema_editor): + """ + Delete Logo entries that were successfully migrated to VODLogo. + + Uses efficient JOIN-based approach with LEFT JOIN to exclude channel usage. + """ + from django.db import connection + + print("\n" + "="*80) + print("Cleaning up migrated Logo entries...") + print("="*80) + + with connection.cursor() as cursor: + # Single efficient query using JOINs: + # - JOIN with vod_vodlogo to find migrated logos + # - LEFT JOIN with channels to find which aren't used + cursor.execute(""" + DELETE FROM dispatcharr_channels_logo + WHERE id IN ( + SELECT l.id + FROM dispatcharr_channels_logo l + INNER JOIN vod_vodlogo v ON l.url = v.url + LEFT JOIN dispatcharr_channels_channel c ON c.logo_id = l.id + WHERE c.id IS NULL + ) + """) + deleted_count = cursor.rowcount + + print(f"✓ Deleted {deleted_count} migrated Logo entries (not used by channels)") + print("="*80 + "\n") + + +class Migration(migrations.Migration): + + dependencies = [ + ('vod', '0002_add_last_seen_with_default'), + ('dispatcharr_channels', '0013_alter_logo_url'), # Ensure Logo table exists + ] + + operations = [ + # Step 1: Create the VODLogo model + migrations.CreateModel( + name='VODLogo', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(max_length=255)), + ('url', models.TextField(unique=True)), + ], + options={ + 'verbose_name': 'VOD Logo', + 'verbose_name_plural': 'VOD Logos', + }, + ), + + # Step 2: Remove foreign key constraints temporarily (so we can change the IDs) + # We need to find and drop the actual constraint names dynamically + migrations.RunSQL( + sql=[ + # Drop movie logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_movie'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_movie DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + # Drop series logo constraint (find it dynamically) + """ + DO $$ + DECLARE + constraint_name text; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'vod_series'::regclass + AND conname LIKE '%logo_id%fk%'; + + IF constraint_name IS NOT NULL THEN + EXECUTE 'ALTER TABLE vod_series DROP CONSTRAINT ' || constraint_name; + END IF; + END $$; + """, + ], + reverse_sql=[ + # The AlterField operations will recreate the constraints pointing to VODLogo, + # so we don't need to manually recreate them in reverse + migrations.RunSQL.noop, + ], + ), + + # Step 3: Migrate the data (this copies logos and updates references) + migrations.RunPython(migrate_vod_logos_forward, migrate_vod_logos_backward), + + # Step 4: Now we can safely alter the foreign keys to point to VODLogo + migrations.AlterField( + model_name='movie', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='movie', to='vod.vodlogo'), + ), + migrations.AlterField( + model_name='series', + name='logo', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='series', to='vod.vodlogo'), + ), + + # Step 5: Clean up migrated Logo entries + migrations.RunPython(cleanup_migrated_logos, migrations.RunPython.noop), + ] diff --git a/apps/vod/models.py b/apps/vod/models.py index f0825ba23..69aed808e 100644 --- a/apps/vod/models.py +++ b/apps/vod/models.py @@ -4,10 +4,22 @@ from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from apps.m3u.models import M3UAccount -from apps.channels.models import Logo import uuid +class VODLogo(models.Model): + """Logo model specifically for VOD content (movies and series)""" + name = models.CharField(max_length=255) + url = models.TextField(unique=True) + + def __str__(self): + return self.name + + class Meta: + verbose_name = 'VOD Logo' + verbose_name_plural = 'VOD Logos' + + class VODCategory(models.Model): """Categories for organizing VODs (e.g., Action, Comedy, Drama)""" @@ -69,7 +81,7 @@ class Series(models.Model): year = models.IntegerField(blank=True, null=True) rating = models.CharField(max_length=10, blank=True, null=True) genre = models.CharField(max_length=255, blank=True, null=True) - logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='series') # Metadata IDs for deduplication - these should be globally unique when present tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") @@ -108,7 +120,7 @@ class Movie(models.Model): rating = models.CharField(max_length=10, blank=True, null=True) genre = models.CharField(max_length=255, blank=True, null=True) duration_secs = models.IntegerField(blank=True, null=True, help_text="Duration in seconds") - logo = models.ForeignKey(Logo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') + logo = models.ForeignKey(VODLogo, on_delete=models.SET_NULL, null=True, blank=True, related_name='movie') # Metadata IDs for deduplication - these should be globally unique when present tmdb_id = models.CharField(max_length=50, blank=True, null=True, unique=True, help_text="TMDB ID for metadata") diff --git a/apps/vod/serializers.py b/apps/vod/serializers.py index 5a672b330..7747cb888 100644 --- a/apps/vod/serializers.py +++ b/apps/vod/serializers.py @@ -1,12 +1,79 @@ from rest_framework import serializers +from django.urls import reverse from .models import ( - Series, VODCategory, Movie, Episode, + Series, VODCategory, Movie, Episode, VODLogo, M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation ) -from apps.channels.serializers import LogoSerializer from apps.m3u.serializers import M3UAccountSerializer +class VODLogoSerializer(serializers.ModelSerializer): + cache_url = serializers.SerializerMethodField() + movie_count = serializers.SerializerMethodField() + series_count = serializers.SerializerMethodField() + is_used = serializers.SerializerMethodField() + item_names = serializers.SerializerMethodField() + + class Meta: + model = VODLogo + fields = ["id", "name", "url", "cache_url", "movie_count", "series_count", "is_used", "item_names"] + + def validate_url(self, value): + """Validate that the URL is unique for creation or update""" + if self.instance and self.instance.url == value: + return value + + if VODLogo.objects.filter(url=value).exists(): + raise serializers.ValidationError("A VOD logo with this URL already exists.") + + return value + + def create(self, validated_data): + """Handle logo creation with proper URL validation""" + return VODLogo.objects.create(**validated_data) + + def update(self, instance, validated_data): + """Handle logo updates""" + for attr, value in validated_data.items(): + setattr(instance, attr, value) + instance.save() + return instance + + def get_cache_url(self, obj): + request = self.context.get("request") + if request: + return request.build_absolute_uri( + reverse("api:vod:vodlogo-cache", args=[obj.id]) + ) + return reverse("api:vod:vodlogo-cache", args=[obj.id]) + + def get_movie_count(self, obj): + """Get the number of movies using this logo""" + return obj.movie.count() if hasattr(obj, 'movie') else 0 + + def get_series_count(self, obj): + """Get the number of series using this logo""" + return obj.series.count() if hasattr(obj, 'series') else 0 + + def get_is_used(self, obj): + """Check if this logo is used by any movies or series""" + return (hasattr(obj, 'movie') and obj.movie.exists()) or (hasattr(obj, 'series') and obj.series.exists()) + + def get_item_names(self, obj): + """Get the list of movies and series using this logo""" + names = [] + + if hasattr(obj, 'movie'): + for movie in obj.movie.all()[:10]: # Limit to 10 items for performance + names.append(f"Movie: {movie.name}") + + if hasattr(obj, 'series'): + for series in obj.series.all()[:10]: # Limit to 10 items for performance + names.append(f"Series: {series.name}") + + return names + + class M3UVODCategoryRelationSerializer(serializers.ModelSerializer): category = serializers.IntegerField(source="category.id") m3u_account = serializers.IntegerField(source="m3u_account.id") @@ -31,7 +98,7 @@ class Meta: ] class SeriesSerializer(serializers.ModelSerializer): - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) episode_count = serializers.SerializerMethodField() class Meta: @@ -43,7 +110,7 @@ def get_episode_count(self, obj): class MovieSerializer(serializers.ModelSerializer): - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) class Meta: model = Movie @@ -225,7 +292,7 @@ def get_quality_info(self, obj): class EnhancedSeriesSerializer(serializers.ModelSerializer): """Enhanced serializer for series with provider information""" - logo = LogoSerializer(read_only=True) + logo = VODLogoSerializer(read_only=True) providers = M3USeriesRelationSerializer(source='m3u_relations', many=True, read_only=True) episode_count = serializers.SerializerMethodField() diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py index 504b09fc3..1170543a9 100644 --- a/apps/vod/tasks.py +++ b/apps/vod/tasks.py @@ -5,10 +5,9 @@ from apps.m3u.models import M3UAccount from core.xtream_codes import Client as XtreamCodesClient from .models import ( - VODCategory, Series, Movie, Episode, + VODCategory, Series, Movie, Episode, VODLogo, M3USeriesRelation, M3UMovieRelation, M3UEpisodeRelation, M3UVODCategoryRelation ) -from apps.channels.models import Logo from datetime import datetime import logging import json @@ -128,6 +127,37 @@ def refresh_movies(client, account, categories_by_provider, relations, scan_star """Refresh movie content using single API call for all movies""" logger.info(f"Refreshing movies for account {account.name}") + # Ensure "Uncategorized" category exists for movies without a category + uncategorized_category, created = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="movie", + defaults={} + ) + + # Ensure there's a relation for the Uncategorized category + account_custom_props = account.custom_properties or {} + auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True) + + uncategorized_relation, rel_created = M3UVODCategoryRelation.objects.get_or_create( + category=uncategorized_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + if created: + logger.info(f"Created 'Uncategorized' category for movies") + if rel_created: + logger.info(f"Created relation for 'Uncategorized' category (enabled={auto_enable_new})") + + # Add uncategorized category to relations dict for easy access + relations[uncategorized_category.id] = uncategorized_relation + + # Add to categories_by_provider with a special key for items without category + categories_by_provider['__uncategorized__'] = uncategorized_category + # Get all movies in a single API call logger.info("Fetching all movies from provider...") all_movies_data = client.get_vod_streams() # No category_id = get all movies @@ -151,6 +181,37 @@ def refresh_series(client, account, categories_by_provider, relations, scan_star """Refresh series content using single API call for all series""" logger.info(f"Refreshing series for account {account.name}") + # Ensure "Uncategorized" category exists for series without a category + uncategorized_category, created = VODCategory.objects.get_or_create( + name="Uncategorized", + category_type="series", + defaults={} + ) + + # Ensure there's a relation for the Uncategorized category + account_custom_props = account.custom_properties or {} + auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True) + + uncategorized_relation, rel_created = M3UVODCategoryRelation.objects.get_or_create( + category=uncategorized_category, + m3u_account=account, + defaults={ + 'enabled': auto_enable_new, + 'custom_properties': {} + } + ) + + if created: + logger.info(f"Created 'Uncategorized' category for series") + if rel_created: + logger.info(f"Created relation for 'Uncategorized' category (enabled={auto_enable_new})") + + # Add uncategorized category to relations dict for easy access + relations[uncategorized_category.id] = uncategorized_relation + + # Add to categories_by_provider with a special key for items without category + categories_by_provider['__uncategorized__'] = uncategorized_category + # Get all series in a single API call logger.info("Fetching all series from provider...") all_series_data = client.get_series() # No category_id = get all series @@ -187,16 +248,28 @@ def batch_create_categories(categories_data, category_type, account): logger.debug(f"Found {len(existing_categories)} existing categories") + # Check if we should auto-enable new categories based on account settings + account_custom_props = account.custom_properties or {} + if category_type == 'movie': + auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True) + else: # series + auto_enable_new = account_custom_props.get("auto_enable_new_groups_series", True) + # Create missing categories in batch new_categories = [] + for name in category_names: if name not in existing_categories: + # Always create new categories new_categories.append(VODCategory(name=name, category_type=category_type)) else: + # Existing category - create relationship with enabled based on auto_enable setting + # (category exists globally but is new to this account) relations_to_create.append(M3UVODCategoryRelation( category=existing_categories[name], m3u_account=account, custom_properties={}, + enabled=auto_enable_new, )) logger.debug(f"{len(new_categories)} new categories found") @@ -204,24 +277,69 @@ def batch_create_categories(categories_data, category_type, account): if new_categories: logger.debug("Creating new categories...") - created_categories = VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True) - # Convert to dictionary for easy lookup - newly_created = {cat.name: cat for cat in created_categories} + created_categories = list(VODCategory.bulk_create_and_fetch(new_categories, ignore_conflicts=True)) - relations_to_create += [ - M3UVODCategoryRelation( - category=cat, - m3u_account=account, - custom_properties={}, - ) for cat in newly_created.values() - ] + # Create relations for newly created categories with enabled based on auto_enable setting + for cat in created_categories: + if not auto_enable_new: + logger.info(f"New {category_type} category '{cat.name}' created but DISABLED - auto_enable_new_groups is disabled for account {account.id}") + relations_to_create.append( + M3UVODCategoryRelation( + category=cat, + m3u_account=account, + custom_properties={}, + enabled=auto_enable_new, + ) + ) + + # Convert to dictionary for easy lookup + newly_created = {cat.name: cat for cat in created_categories} existing_categories.update(newly_created) # Create missing relations logger.debug("Updating category account relations...") M3UVODCategoryRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) + # Delete orphaned category relationships (categories no longer in the M3U source) + # Exclude "Uncategorized" from cleanup as it's a special category we manage + current_category_ids = set(existing_categories[name].id for name in category_names) + existing_relations = M3UVODCategoryRelation.objects.filter( + m3u_account=account, + category__category_type=category_type + ).select_related('category') + + relations_to_delete = [ + rel for rel in existing_relations + if rel.category_id not in current_category_ids and rel.category.name != "Uncategorized" + ] + + if relations_to_delete: + M3UVODCategoryRelation.objects.filter( + id__in=[rel.id for rel in relations_to_delete] + ).delete() + logger.info(f"Deleted {len(relations_to_delete)} orphaned {category_type} category relationships for account {account.id}: {[rel.category.name for rel in relations_to_delete]}") + + # Check if any of the deleted relationships left categories with no remaining associations + orphaned_category_ids = [] + for rel in relations_to_delete: + category = rel.category + + # Check if this category has any remaining M3U account relationships + remaining_relationships = M3UVODCategoryRelation.objects.filter( + category=category + ).exists() + + # If no relationships remain, it's safe to delete the category + if not remaining_relationships: + orphaned_category_ids.append(category.id) + logger.debug(f"Category '{category.name}' has no remaining associations and will be deleted") + + # Delete orphaned categories + if orphaned_category_ids: + VODCategory.objects.filter(id__in=orphaned_category_ids).delete() + logger.info(f"Deleted {len(orphaned_category_ids)} orphaned {category_type} categories with no remaining associations") + # 🔑 Fetch all relations for this account, for all categories # relations = { rel.id: rel for rel in M3UVODCategoryRelation.objects # .filter(category__in=existing_categories.values(), m3u_account=account) @@ -276,7 +394,16 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N logger.debug("Skipping disabled category") continue else: - logger.warning(f"No category ID provided for movie {name}") + # Assign to Uncategorized category if no category_id provided + logger.debug(f"No category ID provided for movie {name}, assigning to 'Uncategorized'") + category = categories.get('__uncategorized__') + if category: + movie_data['_category_id'] = category.id + # Check if uncategorized is disabled + relation = relations.get(category.id, None) + if relation and not relation.enabled: + logger.debug("Skipping disabled 'Uncategorized' category") + continue # Extract metadata year = extract_year_from_data(movie_data, 'name') @@ -303,7 +430,7 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N # Prepare movie properties description = movie_data.get('description') or movie_data.get('plot') or '' - rating = movie_data.get('rating') or movie_data.get('vote_average') or '' + rating = normalize_rating(movie_data.get('rating') or movie_data.get('vote_average')) genre = movie_data.get('genre') or movie_data.get('category_name') or '' duration_secs = extract_duration_from_data(movie_data) trailer_raw = movie_data.get('trailer') or movie_data.get('youtube_trailer') or '' @@ -347,7 +474,7 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N # Get existing logos existing_logos = { - logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) } if logo_urls else {} # Create missing logos @@ -355,20 +482,20 @@ def process_movie_batch(account, batch, categories, relations, scan_start_time=N for logo_url in logo_urls: if logo_url not in existing_logos: movie_name = logo_url_to_name.get(logo_url, 'Unknown Movie') - logos_to_create.append(Logo(url=logo_url, name=movie_name)) + logos_to_create.append(VODLogo(url=logo_url, name=movie_name)) if logos_to_create: try: - Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) # Refresh existing_logos with newly created ones new_logo_urls = [logo.url for logo in logos_to_create] newly_created = { - logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) } existing_logos.update(newly_created) - logger.info(f"Created {len(newly_created)} new logos for movies") + logger.info(f"Created {len(newly_created)} new VOD logos for movies") except Exception as e: - logger.warning(f"Failed to create logos: {e}") + logger.warning(f"Failed to create VOD logos: {e}") # Get existing movies based on our keys existing_movies = {} @@ -578,7 +705,16 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= logger.debug("Skipping disabled category") continue else: - logger.warning(f"No category ID provided for series {name}") + # Assign to Uncategorized category if no category_id provided + logger.debug(f"No category ID provided for series {name}, assigning to 'Uncategorized'") + category = categories.get('__uncategorized__') + if category: + series_data['_category_id'] = category.id + # Check if uncategorized is disabled + relation = relations.get(category.id, None) + if relation and not relation.enabled: + logger.debug("Skipping disabled 'Uncategorized' category") + continue # Extract metadata year = extract_year(series_data.get('releaseDate', '')) @@ -608,7 +744,7 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= # Prepare series properties description = series_data.get('plot', '') - rating = series_data.get('rating', '') + rating = normalize_rating(series_data.get('rating')) genre = series_data.get('genre', '') logo_url = series_data.get('cover') or '' @@ -669,7 +805,7 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= # Get existing logos existing_logos = { - logo.url: logo for logo in Logo.objects.filter(url__in=logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=logo_urls) } if logo_urls else {} # Create missing logos @@ -677,20 +813,20 @@ def process_series_batch(account, batch, categories, relations, scan_start_time= for logo_url in logo_urls: if logo_url not in existing_logos: series_name = logo_url_to_name.get(logo_url, 'Unknown Series') - logos_to_create.append(Logo(url=logo_url, name=series_name)) + logos_to_create.append(VODLogo(url=logo_url, name=series_name)) if logos_to_create: try: - Logo.objects.bulk_create(logos_to_create, ignore_conflicts=True) + VODLogo.objects.bulk_create(logos_to_create, ignore_conflicts=True) # Refresh existing_logos with newly created ones new_logo_urls = [logo.url for logo in logos_to_create] newly_created = { - logo.url: logo for logo in Logo.objects.filter(url__in=new_logo_urls) + logo.url: logo for logo in VODLogo.objects.filter(url__in=new_logo_urls) } existing_logos.update(newly_created) - logger.info(f"Created {len(newly_created)} new logos for series") + logger.info(f"Created {len(newly_created)} new VOD logos for series") except Exception as e: - logger.warning(f"Failed to create logos: {e}") + logger.warning(f"Failed to create VOD logos: {e}") # Get existing series based on our keys - same pattern as movies existing_series = {} @@ -896,6 +1032,33 @@ def extract_duration_from_data(movie_data): return duration_secs +def normalize_rating(rating_value): + """Normalize rating value by converting commas to decimals and validating as float""" + if not rating_value: + return None + + try: + # Convert to string for processing + rating_str = str(rating_value).strip() + + if not rating_str or rating_str == '': + return None + + # Replace comma with decimal point (European format) + rating_str = rating_str.replace(',', '.') + + # Try to convert to float + rating_float = float(rating_str) + + # Return as string to maintain compatibility with existing code + # but ensure it's a valid numeric format + return str(rating_float) + except (ValueError, TypeError, AttributeError): + # If conversion fails, discard the rating + logger.debug(f"Invalid rating value discarded: {rating_value}") + return None + + def extract_year(date_string): """Extract year from date string""" if not date_string: @@ -1021,9 +1184,9 @@ def refresh_series_episodes(account, series, external_series_id, episodes_data=N if should_update_field(series.description, info.get('plot')): series.description = extract_string_from_array_or_string(info.get('plot')) updated = True - if (info.get('rating') and str(info.get('rating')).strip() and - (not series.rating or not str(series.rating).strip())): - series.rating = info.get('rating') + normalized_rating = normalize_rating(info.get('rating')) + if normalized_rating and (not series.rating or not str(series.rating).strip()): + series.rating = normalized_rating updated = True if should_update_field(series.genre, info.get('genre')): series.genre = extract_string_from_array_or_string(info.get('genre')) @@ -1124,7 +1287,7 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) # Extract episode metadata description = info.get('plot') or info.get('overview', '') if info else '' - rating = info.get('rating', '') if info else '' + rating = normalize_rating(info.get('rating')) if info else None air_date = extract_date_from_data(info) if info else None duration_secs = info.get('duration_secs') if info else None tmdb_id = info.get('tmdb_id') if info else None @@ -1341,21 +1504,21 @@ def cleanup_orphaned_vod_content(stale_days=0, scan_start_time=None, account_id= stale_episode_count = stale_episode_relations.count() stale_episode_relations.delete() - # Clean up movies with no relations (orphaned) - only if no account_id specified (global cleanup) - if not account_id: - orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) - orphaned_movie_count = orphaned_movies.count() + # Clean up movies with no relations (orphaned) + # Safe to delete even during account-specific cleanup because if ANY account + # has a relation, m3u_relations will not be null + orphaned_movies = Movie.objects.filter(m3u_relations__isnull=True) + orphaned_movie_count = orphaned_movies.count() + if orphaned_movie_count > 0: + logger.info(f"Deleting {orphaned_movie_count} orphaned movies with no M3U relations") orphaned_movies.delete() - # Clean up series with no relations (orphaned) - only if no account_id specified (global cleanup) - orphaned_series = Series.objects.filter(m3u_relations__isnull=True) - orphaned_series_count = orphaned_series.count() + # Clean up series with no relations (orphaned) + orphaned_series = Series.objects.filter(m3u_relations__isnull=True) + orphaned_series_count = orphaned_series.count() + if orphaned_series_count > 0: + logger.info(f"Deleting {orphaned_series_count} orphaned series with no M3U relations") orphaned_series.delete() - else: - # When cleaning up for specific account, we don't remove orphaned content - # as other accounts might still reference it - orphaned_movie_count = 0 - orphaned_series_count = 0 # Episodes will be cleaned up via CASCADE when series are deleted @@ -1797,8 +1960,9 @@ def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False): if info.get('plot') and info.get('plot') != movie.description: movie.description = info.get('plot') updated = True - if info.get('rating') and info.get('rating') != movie.rating: - movie.rating = info.get('rating') + normalized_rating = normalize_rating(info.get('rating')) + if normalized_rating and normalized_rating != movie.rating: + movie.rating = normalized_rating updated = True if info.get('genre') and info.get('genre') != movie.genre: movie.genre = info.get('genre') @@ -1915,7 +2079,7 @@ def refresh_movie_advanced_data(m3u_movie_relation_id, force_refresh=False): def validate_logo_reference(obj, obj_type="object"): """ - Validate that a logo reference exists in the database. + Validate that a VOD logo reference exists in the database. If not, set it to None to prevent foreign key constraint violations. Args: @@ -1935,9 +2099,9 @@ def validate_logo_reference(obj, obj_type="object"): try: # Verify the logo exists in the database - Logo.objects.get(pk=obj.logo.pk) + VODLogo.objects.get(pk=obj.logo.pk) return True - except Logo.DoesNotExist: - logger.warning(f"Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") + except VODLogo.DoesNotExist: + logger.warning(f"VOD Logo with ID {obj.logo.pk} does not exist in database for {obj_type} '{getattr(obj, 'name', 'Unknown')}', setting to None") obj.logo = None return False diff --git a/core/api_urls.py b/core/api_urls.py index 00e20a6e4..75257db14 100644 --- a/core/api_urls.py +++ b/core/api_urls.py @@ -2,7 +2,16 @@ from django.urls import path, include from rest_framework.routers import DefaultRouter -from .api_views import UserAgentViewSet, StreamProfileViewSet, CoreSettingsViewSet, environment, version, rehash_streams_endpoint +from .api_views import ( + UserAgentViewSet, + StreamProfileViewSet, + CoreSettingsViewSet, + environment, + version, + rehash_streams_endpoint, + TimezoneListView, + get_system_events +) router = DefaultRouter() router.register(r'useragents', UserAgentViewSet, basename='useragent') @@ -12,5 +21,7 @@ path('settings/env/', environment, name='token_refresh'), path('version/', version, name='version'), path('rehash-streams/', rehash_streams_endpoint, name='rehash_streams'), + path('timezones/', TimezoneListView.as_view(), name='timezones'), + path('system-events/', get_system_events, name='system_events'), path('', include(router.urls)), ] diff --git a/core/api_views.py b/core/api_views.py index 9de5aa5a6..c50d7fa60 100644 --- a/core/api_views.py +++ b/core/api_views.py @@ -5,10 +5,12 @@ import logging from rest_framework import viewsets, status from rest_framework.response import Response +from rest_framework.views import APIView from django.shortcuts import get_object_or_404 from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import api_view, permission_classes, action from drf_yasg.utils import swagger_auto_schema +from drf_yasg import openapi from .models import ( UserAgent, StreamProfile, @@ -328,25 +330,130 @@ def rehash_streams_endpoint(request): # Get the current hash keys from settings hash_key_setting = CoreSettings.objects.get(key=STREAM_HASH_KEY) hash_keys = hash_key_setting.value.split(",") - + # Queue the rehash task task = rehash_streams.delay(hash_keys) - + return Response({ "success": True, "message": "Stream rehashing task has been queued", "task_id": task.id }, status=status.HTTP_200_OK) - + except CoreSettings.DoesNotExist: return Response({ "success": False, "message": "Hash key settings not found" }, status=status.HTTP_400_BAD_REQUEST) - + except Exception as e: logger.error(f"Error triggering rehash streams: {e}") return Response({ "success": False, "message": "Failed to trigger rehash task" }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + +# ───────────────────────────── +# Timezone List API +# ───────────────────────────── +class TimezoneListView(APIView): + """ + API endpoint that returns all available timezones supported by pytz. + Returns a list of timezone names grouped by region for easy selection. + This is a general utility endpoint that can be used throughout the application. + """ + + def get_permissions(self): + return [Authenticated()] + + @swagger_auto_schema( + operation_description="Get list of all supported timezones", + responses={200: openapi.Response('List of timezones with grouping by region')} + ) + def get(self, request): + import pytz + + # Get all common timezones (excludes deprecated ones) + all_timezones = sorted(pytz.common_timezones) + + # Group by region for better UX + grouped = {} + for tz in all_timezones: + if '/' in tz: + region = tz.split('/')[0] + if region not in grouped: + grouped[region] = [] + grouped[region].append(tz) + else: + # Handle special zones like UTC, GMT, etc. + if 'Other' not in grouped: + grouped['Other'] = [] + grouped['Other'].append(tz) + + return Response({ + 'timezones': all_timezones, + 'grouped': grouped, + 'count': len(all_timezones) + }) + + +# ───────────────────────────── +# System Events API +# ───────────────────────────── +@api_view(['GET']) +@permission_classes([IsAuthenticated]) +def get_system_events(request): + """ + Get recent system events (channel start/stop, buffering, client connections, etc.) + + Query Parameters: + limit: Number of events to return per page (default: 100, max: 1000) + offset: Number of events to skip (for pagination, default: 0) + event_type: Filter by specific event type (optional) + """ + from core.models import SystemEvent + + try: + # Get pagination params + limit = min(int(request.GET.get('limit', 100)), 1000) + offset = int(request.GET.get('offset', 0)) + + # Start with all events + events = SystemEvent.objects.all() + + # Filter by event_type if provided + event_type = request.GET.get('event_type') + if event_type: + events = events.filter(event_type=event_type) + + # Get total count before applying pagination + total_count = events.count() + + # Apply offset and limit for pagination + events = events[offset:offset + limit] + + # Serialize the data + events_data = [{ + 'id': event.id, + 'event_type': event.event_type, + 'event_type_display': event.get_event_type_display(), + 'timestamp': event.timestamp.isoformat(), + 'channel_id': str(event.channel_id) if event.channel_id else None, + 'channel_name': event.channel_name, + 'details': event.details + } for event in events] + + return Response({ + 'events': events_data, + 'count': len(events_data), + 'total': total_count, + 'offset': offset, + 'limit': limit + }) + + except Exception as e: + logger.error(f"Error fetching system events: {e}") + return Response({ + 'error': 'Failed to fetch system events' + }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) diff --git a/core/migrations/0017_systemevent.py b/core/migrations/0017_systemevent.py new file mode 100644 index 000000000..9b97213c4 --- /dev/null +++ b/core/migrations/0017_systemevent.py @@ -0,0 +1,28 @@ +# Generated by Django 5.2.4 on 2025-11-20 20:47 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0016_update_dvr_template_paths'), + ] + + operations = [ + migrations.CreateModel( + name='SystemEvent', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('event_type', models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded')], db_index=True, max_length=50)), + ('timestamp', models.DateTimeField(auto_now_add=True, db_index=True)), + ('channel_id', models.UUIDField(blank=True, db_index=True, null=True)), + ('channel_name', models.CharField(blank=True, max_length=255, null=True)), + ('details', models.JSONField(blank=True, default=dict)), + ], + options={ + 'ordering': ['-timestamp'], + 'indexes': [models.Index(fields=['-timestamp'], name='core_system_timesta_c6c3d1_idx'), models.Index(fields=['event_type', '-timestamp'], name='core_system_event_t_4267d9_idx')], + }, + ), + ] diff --git a/core/migrations/0018_alter_systemevent_event_type.py b/core/migrations/0018_alter_systemevent_event_type.py new file mode 100644 index 000000000..3fe4eecd8 --- /dev/null +++ b/core/migrations/0018_alter_systemevent_event_type.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-11-21 15:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0017_systemevent'), + ] + + operations = [ + migrations.AlterField( + model_name='systemevent', + name='event_type', + field=models.CharField(choices=[('channel_start', 'Channel Started'), ('channel_stop', 'Channel Stopped'), ('channel_buffering', 'Channel Buffering'), ('channel_failover', 'Channel Failover'), ('channel_reconnect', 'Channel Reconnected'), ('channel_error', 'Channel Error'), ('client_connect', 'Client Connected'), ('client_disconnect', 'Client Disconnected'), ('recording_start', 'Recording Started'), ('recording_end', 'Recording Ended'), ('stream_switch', 'Stream Switched'), ('m3u_refresh', 'M3U Refreshed'), ('m3u_download', 'M3U Downloaded'), ('epg_refresh', 'EPG Refreshed'), ('epg_download', 'EPG Downloaded'), ('login_success', 'Login Successful'), ('login_failed', 'Login Failed'), ('logout', 'User Logged Out'), ('m3u_blocked', 'M3U Download Blocked'), ('epg_blocked', 'EPG Download Blocked')], db_index=True, max_length=50), + ), + ] diff --git a/core/models.py b/core/models.py index ba0406664..b9166f661 100644 --- a/core/models.py +++ b/core/models.py @@ -1,4 +1,5 @@ # core/models.py +from django.conf import settings from django.db import models from django.utils.text import slugify from django.core.exceptions import ValidationError @@ -158,8 +159,10 @@ def _replace_in_part(self, part, replacements): DVR_TV_FALLBACK_TEMPLATE_KEY = slugify("DVR TV Fallback Template") DVR_MOVIE_FALLBACK_TEMPLATE_KEY = slugify("DVR Movie Fallback Template") DVR_COMSKIP_ENABLED_KEY = slugify("DVR Comskip Enabled") +DVR_COMSKIP_CUSTOM_PATH_KEY = slugify("DVR Comskip Custom Path") DVR_PRE_OFFSET_MINUTES_KEY = slugify("DVR Pre-Offset Minutes") DVR_POST_OFFSET_MINUTES_KEY = slugify("DVR Post-Offset Minutes") +SYSTEM_TIME_ZONE_KEY = slugify("System Time Zone") class CoreSettings(models.Model): @@ -274,6 +277,27 @@ def get_dvr_comskip_enabled(cls): except cls.DoesNotExist: return False + @classmethod + def get_dvr_comskip_custom_path(cls): + """Return configured comskip.ini path or empty string if unset.""" + try: + return cls.objects.get(key=DVR_COMSKIP_CUSTOM_PATH_KEY).value + except cls.DoesNotExist: + return "" + + @classmethod + def set_dvr_comskip_custom_path(cls, path: str | None): + """Persist the comskip.ini path setting, normalizing nulls to empty string.""" + value = (path or "").strip() + obj, _ = cls.objects.get_or_create( + key=DVR_COMSKIP_CUSTOM_PATH_KEY, + defaults={"name": "DVR Comskip Custom Path", "value": value}, + ) + if obj.value != value: + obj.value = value + obj.save(update_fields=["value"]) + return value + @classmethod def get_dvr_pre_offset_minutes(cls): """Minutes to start recording before scheduled start (default 0).""" @@ -302,6 +326,30 @@ def get_dvr_post_offset_minutes(cls): except Exception: return 0 + @classmethod + def get_system_time_zone(cls): + """Return configured system time zone or fall back to Django settings.""" + try: + value = cls.objects.get(key=SYSTEM_TIME_ZONE_KEY).value + if value: + return value + except cls.DoesNotExist: + pass + return getattr(settings, "TIME_ZONE", "UTC") or "UTC" + + @classmethod + def set_system_time_zone(cls, tz_name: str | None): + """Persist the desired system time zone identifier.""" + value = (tz_name or "").strip() or getattr(settings, "TIME_ZONE", "UTC") or "UTC" + obj, _ = cls.objects.get_or_create( + key=SYSTEM_TIME_ZONE_KEY, + defaults={"name": "System Time Zone", "value": value}, + ) + if obj.value != value: + obj.value = value + obj.save(update_fields=["value"]) + return value + @classmethod def get_dvr_series_rules(cls): """Return list of series recording rules. Each: {tvg_id, title, mode: 'all'|'new'}""" @@ -327,3 +375,48 @@ def set_dvr_series_rules(cls, rules): return rules except Exception: return rules + + +class SystemEvent(models.Model): + """ + Tracks system events like channel start/stop, buffering, failover, client connections. + Maintains a rolling history based on max_system_events setting. + """ + EVENT_TYPES = [ + ('channel_start', 'Channel Started'), + ('channel_stop', 'Channel Stopped'), + ('channel_buffering', 'Channel Buffering'), + ('channel_failover', 'Channel Failover'), + ('channel_reconnect', 'Channel Reconnected'), + ('channel_error', 'Channel Error'), + ('client_connect', 'Client Connected'), + ('client_disconnect', 'Client Disconnected'), + ('recording_start', 'Recording Started'), + ('recording_end', 'Recording Ended'), + ('stream_switch', 'Stream Switched'), + ('m3u_refresh', 'M3U Refreshed'), + ('m3u_download', 'M3U Downloaded'), + ('epg_refresh', 'EPG Refreshed'), + ('epg_download', 'EPG Downloaded'), + ('login_success', 'Login Successful'), + ('login_failed', 'Login Failed'), + ('logout', 'User Logged Out'), + ('m3u_blocked', 'M3U Download Blocked'), + ('epg_blocked', 'EPG Download Blocked'), + ] + + event_type = models.CharField(max_length=50, choices=EVENT_TYPES, db_index=True) + timestamp = models.DateTimeField(auto_now_add=True, db_index=True) + channel_id = models.UUIDField(null=True, blank=True, db_index=True) + channel_name = models.CharField(max_length=255, null=True, blank=True) + details = models.JSONField(default=dict, blank=True) + + class Meta: + ordering = ['-timestamp'] + indexes = [ + models.Index(fields=['-timestamp']), + models.Index(fields=['event_type', '-timestamp']), + ] + + def __str__(self): + return f"{self.event_type} - {self.channel_name or 'N/A'} @ {self.timestamp}" diff --git a/core/utils.py b/core/utils.py index 36ac5fef4..7b6dd9b04 100644 --- a/core/utils.py +++ b/core/utils.py @@ -377,12 +377,59 @@ def validate_flexible_url(value): import re # More flexible pattern for non-FQDN hostnames with paths - # Matches: http://hostname, http://hostname/, http://hostname:port/path/to/file.xml - non_fqdn_pattern = r'^https?://[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\:[0-9]+)?(/[^\s]*)?$' + # Matches: http://hostname, https://hostname/, http://hostname:port/path/to/file.xml, rtp://192.168.2.1, rtsp://192.168.178.1, udp://239.0.0.1:1234 + # Also matches FQDNs for rtsp/rtp/udp protocols: rtsp://FQDN/path?query=value + # Also supports authentication: rtsp://user:pass@hostname/path + non_fqdn_pattern = r'^(rts?p|https?|udp)://([a-zA-Z0-9_\-\.]+:[^\s@]+@)?([a-zA-Z0-9]([a-zA-Z0-9\-\.]{0,61}[a-zA-Z0-9])?|[0-9.]+)?(\:[0-9]+)?(/[^\s]*)?$' non_fqdn_match = re.match(non_fqdn_pattern, value) if non_fqdn_match: - return # Accept non-FQDN hostnames + return # Accept non-FQDN hostnames and rtsp/rtp/udp URLs with optional authentication # If it doesn't match our flexible patterns, raise the original error raise ValidationError("Enter a valid URL.") + + +def log_system_event(event_type, channel_id=None, channel_name=None, **details): + """ + Log a system event and maintain the configured max history. + + Args: + event_type: Type of event (e.g., 'channel_start', 'client_connect') + channel_id: Optional UUID of the channel + channel_name: Optional name of the channel + **details: Additional details to store in the event (stored as JSON) + + Example: + log_system_event('channel_start', channel_id=uuid, channel_name='CNN', + stream_url='http://...', user='admin') + """ + from core.models import SystemEvent, CoreSettings + + try: + # Create the event + SystemEvent.objects.create( + event_type=event_type, + channel_id=channel_id, + channel_name=channel_name, + details=details + ) + + # Get max events from settings (default 100) + try: + max_events_setting = CoreSettings.objects.filter(key='max-system-events').first() + max_events = int(max_events_setting.value) if max_events_setting else 100 + except Exception: + max_events = 100 + + # Delete old events beyond the limit (keep it efficient with a single query) + total_count = SystemEvent.objects.count() + if total_count > max_events: + # Get the ID of the event at the cutoff point + cutoff_event = SystemEvent.objects.values_list('id', flat=True)[max_events] + # Delete all events with ID less than cutoff (older events) + SystemEvent.objects.filter(id__lt=cutoff_event).delete() + + except Exception as e: + # Don't let event logging break the main application + logger.error(f"Failed to log system event {event_type}: {e}") diff --git a/debian_install.sh b/debian_install.sh index 7f35075b9..bda506b1a 100755 --- a/debian_install.sh +++ b/debian_install.sh @@ -68,15 +68,16 @@ install_packages() { echo ">>> Installing system packages..." apt-get update declare -a packages=( - git curl wget build-essential gcc libpcre3-dev libpq-dev + git curl wget build-essential gcc libpq-dev python3-dev python3-venv python3-pip nginx redis-server postgresql postgresql-contrib ffmpeg procps streamlink + sudo ) apt-get install -y --no-install-recommends "${packages[@]}" if ! command -v node >/dev/null 2>&1; then echo ">>> Installing Node.js..." - curl -sL https://deb.nodesource.com/setup_23.x | bash - + curl -sL https://deb.nodesource.com/setup_24.x | bash - apt-get install -y nodejs fi @@ -186,7 +187,32 @@ EOSU } ############################################################################## -# 8) Django Migrations & Static +# 8) Create Directories +############################################################################## + +create_directories() { + mkdir -p /data/logos + mkdir -p /data/recordings + mkdir -p /data/uploads/m3us + mkdir -p /data/uploads/epgs + mkdir -p /data/m3us + mkdir -p /data/epgs + mkdir -p /data/plugins + mkdir -p /data/db + + # Needs to own ALL of /data except db + chown -R $DISPATCH_USER:$DISPATCH_GROUP /data + chown -R postgres:postgres /data/db + chmod +x /data + + mkdir -p "$APP_DIR"/logo_cache + mkdir -p "$APP_DIR"/media + chown -R $DISPATCH_USER:$DISPATCH_GROUP "$APP_DIR"/logo_cache + chown -R $DISPATCH_USER:$DISPATCH_GROUP "$APP_DIR"/media +} + +############################################################################## +# 9) Django Migrations & Static ############################################################################## django_migrate_collectstatic() { @@ -204,7 +230,7 @@ EOSU } ############################################################################## -# 9) Configure Services & Nginx +# 10) Configure Services & Nginx ############################################################################## configure_services() { @@ -360,7 +386,7 @@ EOF } ############################################################################## -# 10) Start Services +# 11) Start Services ############################################################################## start_services() { @@ -371,7 +397,7 @@ start_services() { } ############################################################################## -# 11) Summary +# 12) Summary ############################################################################## show_summary() { @@ -408,10 +434,11 @@ main() { clone_dispatcharr_repo setup_python_env build_frontend + create_directories django_migrate_collectstatic configure_services start_services show_summary } -main "$@" +main "$@" \ No newline at end of file diff --git a/dispatcharr/celery.py b/dispatcharr/celery.py index 98c6210b9..c845dafe9 100644 --- a/dispatcharr/celery.py +++ b/dispatcharr/celery.py @@ -50,13 +50,21 @@ def get_effective_log_level(): ) # Add memory cleanup after task completion -#@task_postrun.connect # Use the imported signal +@task_postrun.connect # Use the imported signal def cleanup_task_memory(**kwargs): - """Clean up memory after each task completes""" + """Clean up memory and database connections after each task completes""" + from django.db import connection + # Get task name from kwargs task_name = kwargs.get('task').name if kwargs.get('task') else '' - # Only run cleanup for memory-intensive tasks + # Close database connection for this Celery worker process + try: + connection.close() + except Exception: + pass + + # Only run memory cleanup for memory-intensive tasks memory_intensive_tasks = [ 'apps.m3u.tasks.refresh_single_m3u_account', 'apps.m3u.tasks.refresh_m3u_accounts', diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index 289c67942..d6c29dd9c 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -51,6 +51,11 @@ EPG_MEMORY_LIMIT = 512 # Memory limit in MB before forcing garbage collection EPG_ENABLE_MEMORY_MONITORING = True # Whether to monitor memory usage during processing +# XtreamCodes Rate Limiting Settings +# Delay between profile authentications when refreshing multiple profiles +# This prevents providers from temporarily banning users with many profiles +XC_PROFILE_REFRESH_DELAY = float(os.environ.get('XC_PROFILE_REFRESH_DELAY', '2.5')) # seconds between profile refreshes + # Database optimization settings DATABASE_STATEMENT_TIMEOUT = 300 # Seconds before timing out long-running queries DATABASE_CONN_MAX_AGE = ( @@ -134,6 +139,7 @@ "PASSWORD": os.environ.get("POSTGRES_PASSWORD", "secret"), "HOST": os.environ.get("POSTGRES_HOST", "localhost"), "PORT": int(os.environ.get("POSTGRES_PORT", 5432)), + "CONN_MAX_AGE": DATABASE_CONN_MAX_AGE, } } @@ -211,6 +217,10 @@ "task": "core.tasks.scan_and_process_files", # Direct task call "schedule": 20.0, # Every 20 seconds }, + "maintain-recurring-recordings": { + "task": "apps.channels.tasks.maintain_recurring_recordings", + "schedule": 3600.0, # Once an hour ensure recurring schedules stay ahead + }, } MEDIA_ROOT = BASE_DIR / "media" diff --git a/docker/docker-compose.aio.yml b/docker/docker-compose.aio.yml index 90cd8654e..fe5e1507d 100644 --- a/docker/docker-compose.aio.yml +++ b/docker/docker-compose.aio.yml @@ -14,6 +14,15 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=info + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE # Optional for hardware acceleration #devices: # - /dev/dri:/dev/dri # For Intel/AMD GPU acceleration (VA-API) diff --git a/docker/docker-compose.debug.yml b/docker/docker-compose.debug.yml index 163ebf6a4..d9dbef0e6 100644 --- a/docker/docker-compose.debug.yml +++ b/docker/docker-compose.debug.yml @@ -18,3 +18,12 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=trace + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index 00394d55b..d1bb36808 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -17,6 +17,15 @@ services: - REDIS_HOST=localhost - CELERY_BROKER_URL=redis://localhost:6379/0 - DISPATCHARR_LOG_LEVEL=debug + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE pgadmin: image: dpage/pgadmin4 diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index dd989c81d..aaa63990d 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -17,6 +17,15 @@ services: - REDIS_HOST=redis - CELERY_BROKER_URL=redis://redis:6379/0 - DISPATCHARR_LOG_LEVEL=info + # Process Priority Configuration (Optional) + # Lower values = higher priority. Range: -20 (highest) to 19 (lowest) + # Negative values require cap_add: SYS_NICE (uncomment below) + #- UWSGI_NICE_LEVEL=-5 # uWSGI/FFmpeg/Streaming (default: 0, recommended: -5 for high priority) + #- CELERY_NICE_LEVEL=5 # Celery/EPG/Background tasks (default: 5, low priority) + # + # Uncomment to enable high priority for streaming (required if UWSGI_NICE_LEVEL < 0) + #cap_add: + # - SYS_NICE # Optional for hardware acceleration #group_add: # - video diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index fd0a883dd..fa0eea01f 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -40,6 +40,18 @@ export REDIS_DB=${REDIS_DB:-0} export DISPATCHARR_PORT=${DISPATCHARR_PORT:-9191} export LIBVA_DRIVERS_PATH='/usr/local/lib/x86_64-linux-gnu/dri' export LD_LIBRARY_PATH='/usr/local/lib' + +# Process priority configuration +# UWSGI_NICE_LEVEL: Absolute nice value for uWSGI/streaming (default: 0 = normal priority) +# CELERY_NICE_LEVEL: Absolute nice value for Celery/background tasks (default: 5 = low priority) +# Note: The script will automatically calculate the relative offset for Celery since it's spawned by uWSGI +export UWSGI_NICE_LEVEL=${UWSGI_NICE_LEVEL:-0} +CELERY_NICE_ABSOLUTE=${CELERY_NICE_LEVEL:-5} + +# Calculate relative nice value for Celery (since nice is relative to parent process) +# Celery is spawned by uWSGI, so we need to add the offset to reach the desired absolute value +export CELERY_NICE_LEVEL=$((CELERY_NICE_ABSOLUTE - UWSGI_NICE_LEVEL)) + # Set LIBVA_DRIVER_NAME if user has specified it if [ -v LIBVA_DRIVER_NAME ]; then export LIBVA_DRIVER_NAME @@ -78,6 +90,7 @@ if [[ ! -f /etc/profile.d/dispatcharr.sh ]]; then DISPATCHARR_ENV DISPATCHARR_DEBUG DISPATCHARR_LOG_LEVEL REDIS_HOST REDIS_DB POSTGRES_DIR DISPATCHARR_PORT DISPATCHARR_VERSION DISPATCHARR_TIMESTAMP LIBVA_DRIVERS_PATH LIBVA_DRIVER_NAME LD_LIBRARY_PATH + CELERY_NICE_LEVEL UWSGI_NICE_LEVEL ) # Process each variable for both profile.d and environment @@ -96,7 +109,16 @@ fi chmod +x /etc/profile.d/dispatcharr.sh -pip install django-filter +# Ensure root's .bashrc sources the profile.d scripts for interactive non-login shells +if ! grep -q "profile.d/dispatcharr.sh" /root/.bashrc 2>/dev/null; then + cat >> /root/.bashrc << 'EOF' + +# Source Dispatcharr environment variables +if [ -f /etc/profile.d/dispatcharr.sh ]; then + . /etc/profile.d/dispatcharr.sh +fi +EOF +fi # Run init scripts echo "Starting user setup..." @@ -161,10 +183,12 @@ if [ "$DISPATCHARR_DEBUG" != "true" ]; then uwsgi_args+=" --disable-logging" fi -# Launch uwsgi -p passes environment variables to the process -su -p - $POSTGRES_USER -c "cd /app && uwsgi $uwsgi_args &" -uwsgi_pid=$(pgrep uwsgi | sort | head -n1) -echo "✅ uwsgi started with PID $uwsgi_pid" +# Launch uwsgi with configurable nice level (default: 0 for normal priority) +# Users can override via UWSGI_NICE_LEVEL environment variable in docker-compose +# Start with nice as root, then use setpriv to drop privileges to dispatch user +# This preserves both the nice value and environment variables +nice -n $UWSGI_NICE_LEVEL su -p - "$POSTGRES_USER" -c "cd /app && exec uwsgi $uwsgi_args" & uwsgi_pid=$! +echo "✅ uwsgi started with PID $uwsgi_pid (nice $UWSGI_NICE_LEVEL)" pids+=("$uwsgi_pid") # sed -i 's/protected-mode yes/protected-mode no/g' /etc/redis/redis.conf @@ -209,7 +233,7 @@ echo "🔍 Running hardware acceleration check..." # Wait for at least one process to exit and log the process that exited first if [ ${#pids[@]} -gt 0 ]; then - echo "⏳ Waiting for processes to exit..." + echo "⏳ Dispatcharr is running. Monitoring processes..." while kill -0 "${pids[@]}" 2>/dev/null; do sleep 1 # Wait for a second before checking again done diff --git a/docker/init/03-init-dispatcharr.sh b/docker/init/03-init-dispatcharr.sh index 629c5a51d..5fbef23d1 100644 --- a/docker/init/03-init-dispatcharr.sh +++ b/docker/init/03-init-dispatcharr.sh @@ -1,25 +1,67 @@ #!/bin/bash -mkdir -p /data/logos -mkdir -p /data/recordings -mkdir -p /data/uploads/m3us -mkdir -p /data/uploads/epgs -mkdir -p /data/m3us -mkdir -p /data/epgs -mkdir -p /data/plugins -mkdir -p /app/logo_cache -mkdir -p /app/media +# Define directories that need to exist and be owned by PUID:PGID +DATA_DIRS=( + "/data/logos" + "/data/recordings" + "/data/uploads/m3us" + "/data/uploads/epgs" + "/data/m3us" + "/data/epgs" + "/data/plugins" + "/data/models" +) + +APP_DIRS=( + "/app/logo_cache" + "/app/media" +) + +# Create all directories +for dir in "${DATA_DIRS[@]}" "${APP_DIRS[@]}"; do + mkdir -p "$dir" +done + +# Ensure /app itself is owned by PUID:PGID (needed for uwsgi socket creation) +if [ "$(id -u)" = "0" ] && [ -d "/app" ]; then + if [ "$(stat -c '%u:%g' /app)" != "$PUID:$PGID" ]; then + echo "Fixing ownership for /app (non-recursive)" + chown $PUID:$PGID /app + fi +fi sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default # NOTE: mac doesn't run as root, so only manage permissions # if this script is running as root if [ "$(id -u)" = "0" ]; then - # Needs to own ALL of /data except db, we handle that below - chown -R $PUID:$PGID /data - chown -R $PUID:$PGID /app + # Fix data directories (non-recursive to avoid touching user files) + for dir in "${DATA_DIRS[@]}"; do + if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then + echo "Fixing ownership for $dir" + chown $PUID:$PGID "$dir" + fi + done + + # Fix app directories (recursive since they're managed by the app) + for dir in "${APP_DIRS[@]}"; do + if [ -d "$dir" ] && [ "$(stat -c '%u:%g' "$dir")" != "$PUID:$PGID" ]; then + echo "Fixing ownership for $dir (recursive)" + chown -R $PUID:$PGID "$dir" + fi + done + + # Database permissions + if [ -d /data/db ] && [ "$(stat -c '%u' /data/db)" != "$(id -u postgres)" ]; then + echo "Fixing ownership for /data/db" + chown -R postgres:postgres /data/db + fi + + # Fix /data directory ownership (non-recursive) + if [ -d "/data" ] && [ "$(stat -c '%u:%g' /data)" != "$PUID:$PGID" ]; then + echo "Fixing ownership for /data (non-recursive)" + chown $PUID:$PGID /data + fi - # Permissions - chown -R postgres:postgres /data/db chmod +x /data -fi +fi \ No newline at end of file diff --git a/docker/uwsgi.debug.ini b/docker/uwsgi.debug.ini index fa94df929..3de890a53 100644 --- a/docker/uwsgi.debug.ini +++ b/docker/uwsgi.debug.ini @@ -7,9 +7,10 @@ exec-before = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = nice -n 5 celery -A dispatcharr beat +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev diff --git a/docker/uwsgi.dev.ini b/docker/uwsgi.dev.ini index 6eca871df..e476e216c 100644 --- a/docker/uwsgi.dev.ini +++ b/docker/uwsgi.dev.ini @@ -9,9 +9,10 @@ exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = nice -n 5 celery -A dispatcharr beat +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application attach-daemon = cd /app/frontend && npm run dev diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index f763c3bc4..f8fe8ab7c 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -9,9 +9,10 @@ exec-pre = python /app/scripts/wait_for_redis.py ; Start Redis first attach-daemon = redis-server -; Then start other services -attach-daemon = nice -n 5 celery -A dispatcharr worker --autoscale=6,1 -attach-daemon = nice -n 5 celery -A dispatcharr beat +; Then start other services with configurable nice level (default: 5 for low priority) +; Users can override via CELERY_NICE_LEVEL environment variable in docker-compose +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr worker --autoscale=6,1 +attach-daemon = nice -n $(CELERY_NICE_LEVEL) celery -A dispatcharr beat attach-daemon = daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application # Core settings diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 4b701533b..3c7c38778 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -112,15 +112,21 @@ const App = () => { height: 0, }} navbar={{ - width: open ? drawerWidth : miniDrawerWidth, + width: isAuthenticated + ? open + ? drawerWidth + : miniDrawerWidth + : 0, }} > - + {isAuthenticated && ( + + )} { const updateEPG = useEPGsStore((s) => s.updateEPG); const updateEPGProgress = useEPGsStore((s) => s.updateEPGProgress); - const playlists = usePlaylistsStore((s) => s.playlists); const updatePlaylist = usePlaylistsStore((s) => s.updatePlaylist); // Calculate reconnection delay with exponential backoff @@ -247,10 +246,14 @@ export const WebsocketProvider = ({ children }) => { // Update the playlist status whenever we receive a status update // Not just when progress is 100% or status is pending_setup if (parsedEvent.data.status && parsedEvent.data.account) { - // Check if playlists is an object with IDs as keys or an array - const playlist = Array.isArray(playlists) - ? playlists.find((p) => p.id === parsedEvent.data.account) - : playlists[parsedEvent.data.account]; + // Get fresh playlists from store to avoid stale state from React render cycle + const currentPlaylists = usePlaylistsStore.getState().playlists; + const isArray = Array.isArray(currentPlaylists); + const playlist = isArray + ? currentPlaylists.find( + (p) => p.id === parsedEvent.data.account + ) + : currentPlaylists[parsedEvent.data.account]; if (playlist) { // When we receive a "success" status with 100% progress, this is a completed refresh @@ -273,19 +276,19 @@ export const WebsocketProvider = ({ children }) => { 'M3U refresh completed successfully:', updateData ); + fetchPlaylists(); // Refresh playlists to ensure UI is up-to-date + fetchChannelProfiles(); // Ensure channel profiles are updated } updatePlaylist(updateData); - fetchPlaylists(); // Refresh playlists to ensure UI is up-to-date - fetchChannelProfiles(); // Ensure channel profiles are updated } else { - // Log when playlist can't be found for debugging purposes - console.warn( - `Received update for unknown playlist ID: ${parsedEvent.data.account}`, - Array.isArray(playlists) - ? 'playlists is array' - : 'playlists is object', - Object.keys(playlists).length + // Playlist not in store yet - this happens when backend sends websocket + // updates immediately after creating the playlist, before the API response + // returns. The frontend will receive a 'playlist_created' event shortly + // which will trigger a fetchPlaylists() to sync the store. + console.log( + `Received update for playlist ID ${parsedEvent.data.account} not yet in store. ` + + `Waiting for playlist_created event to sync...` ); } } @@ -566,14 +569,22 @@ export const WebsocketProvider = ({ children }) => { break; case 'epg_refresh': - // Update the store with progress information - updateEPGProgress(parsedEvent.data); - - // If we have source_id/account info, update the EPG source status - if (parsedEvent.data.source_id || parsedEvent.data.account) { + // If we have source/account info, check if EPG exists before processing + if (parsedEvent.data.source || parsedEvent.data.account) { const sourceId = - parsedEvent.data.source_id || parsedEvent.data.account; + parsedEvent.data.source || parsedEvent.data.account; const epg = epgs[sourceId]; + + // Only update progress if the EPG still exists in the store + // This prevents crashes when receiving updates for deleted EPGs + if (epg) { + // Update the store with progress information + updateEPGProgress(parsedEvent.data); + } else { + // EPG was deleted, ignore this update + console.debug(`Ignoring EPG refresh update for deleted EPG ${sourceId}`); + break; + } if (epg) { // Check for any indication of an error (either via status or error field) @@ -639,6 +650,16 @@ export const WebsocketProvider = ({ children }) => { } break; + case 'epg_data_created': + // A new EPG data entry was created (e.g., for a dummy EPG) + // Fetch EPG data so the channel form can immediately assign it + try { + await fetchEPGData(); + } catch (e) { + console.warn('Failed to refresh EPG data after creation:', e); + } + break; + case 'stream_rehash': // Handle stream rehash progress updates if (parsedEvent.data.action === 'starting') { @@ -739,6 +760,14 @@ export const WebsocketProvider = ({ children }) => { break; + case 'playlist_created': + // Backend signals that a new playlist has been created and we should refresh + console.log( + 'Playlist created event received, refreshing playlists...' + ); + fetchPlaylists(); + break; + case 'bulk_channel_creation_progress': { // Handle progress updates with persistent notifications like stream rehash const data = parsedEvent.data; diff --git a/frontend/src/api.js b/frontend/src/api.js index 01186bf6f..7eda6a3f1 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -170,7 +170,7 @@ export default class API { static async logout() { return await request(`${host}/api/accounts/auth/logout/`, { - auth: false, + auth: true, // Send JWT token so backend can identify the user method: 'POST', }); } @@ -462,7 +462,16 @@ export default class API { } ); - // Don't automatically update the store here - let the caller handle it + // Show success notification + if (response.message) { + notifications.show({ + title: 'Channels Updated', + message: response.message, + color: 'green', + autoClose: 4000, + }); + } + return response; } catch (e) { errorNotification('Failed to update channels', e); @@ -562,6 +571,29 @@ export default class API { } } + static async setChannelTvgIdsFromEpg(channelIds) { + try { + const response = await request( + `${host}/api/channels/channels/set-tvg-ids-from-epg/`, + { + method: 'POST', + body: { channel_ids: channelIds }, + } + ); + + notifications.show({ + title: 'Task Started', + message: response.message, + color: 'blue', + }); + + return response; + } catch (e) { + errorNotification('Failed to start EPG TVG-ID setting task', e); + throw e; + } + } + static async assignChannelNumbers(channelIds, startingNum = 1) { try { const response = await request(`${host}/api/channels/channels/assign/`, { @@ -1021,8 +1053,20 @@ export default class API { } static async updateEPG(values, isToggle = false) { + // Validate that values is an object + if (!values || typeof values !== 'object') { + console.error('updateEPG called with invalid values:', values); + return; + } + const { id, ...payload } = values; + // Validate that we have an ID and payload is an object + if (!id || typeof payload !== 'object') { + console.error('updateEPG: invalid id or payload', { id, payload }); + return; + } + try { // If this is just toggling the active state, make a simpler request if ( @@ -1095,6 +1139,21 @@ export default class API { } } + static async getTimezones() { + try { + const response = await request(`${host}/api/core/timezones/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve timezones', e); + // Return fallback data instead of throwing + return { + timezones: ['UTC', 'US/Eastern', 'US/Central', 'US/Mountain', 'US/Pacific'], + grouped: {}, + count: 5 + }; + } + } + static async getStreamProfiles() { try { const response = await request(`${host}/api/core/streamprofiles/`); @@ -1750,6 +1809,77 @@ export default class API { } } + // VOD Logo Methods + static async getVODLogos(params = {}) { + try { + // Transform usage filter to match backend expectations + const apiParams = { ...params }; + if (apiParams.usage === 'used') { + apiParams.used = 'true'; + delete apiParams.usage; + } else if (apiParams.usage === 'unused') { + apiParams.used = 'false'; + delete apiParams.usage; + } else if (apiParams.usage === 'movies') { + apiParams.used = 'movies'; + delete apiParams.usage; + } else if (apiParams.usage === 'series') { + apiParams.used = 'series'; + delete apiParams.usage; + } + + const queryParams = new URLSearchParams(apiParams); + const response = await request( + `${host}/api/vod/vodlogos/?${queryParams.toString()}` + ); + + return response; + } catch (e) { + errorNotification('Failed to retrieve VOD logos', e); + throw e; + } + } + + static async deleteVODLogo(id) { + try { + await request(`${host}/api/vod/vodlogos/${id}/`, { + method: 'DELETE', + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logo', e); + throw e; + } + } + + static async deleteVODLogos(ids) { + try { + await request(`${host}/api/vod/vodlogos/bulk-delete/`, { + method: 'DELETE', + body: { logo_ids: ids }, + }); + + return true; + } catch (e) { + errorNotification('Failed to delete VOD logos', e); + throw e; + } + } + + static async cleanupUnusedVODLogos() { + try { + const response = await request(`${host}/api/vod/vodlogos/cleanup/`, { + method: 'POST', + }); + + return response; + } catch (e) { + errorNotification('Failed to cleanup unused VOD logos', e); + throw e; + } + } + static async getChannelProfiles() { try { const response = await request(`${host}/api/channels/profiles/`); @@ -1873,6 +2003,83 @@ export default class API { } } + static async updateRecording(id, values) { + try { + const response = await request(`${host}/api/channels/recordings/${id}/`, { + method: 'PATCH', + body: values, + }); + useChannelsStore.getState().fetchRecordings(); + return response; + } catch (e) { + errorNotification(`Failed to update recording ${id}`, e); + } + } + + static async getComskipConfig() { + try { + return await request(`${host}/api/channels/dvr/comskip-config/`); + } catch (e) { + errorNotification('Failed to retrieve comskip configuration', e); + } + } + + static async uploadComskipIni(file) { + try { + const formData = new FormData(); + formData.append('file', file); + return await request(`${host}/api/channels/dvr/comskip-config/`, { + method: 'POST', + body: formData, + }); + } catch (e) { + errorNotification('Failed to upload comskip.ini', e); + } + } + + static async listRecurringRules() { + try { + const response = await request(`${host}/api/channels/recurring-rules/`); + return response; + } catch (e) { + errorNotification('Failed to retrieve recurring DVR rules', e); + } + } + + static async createRecurringRule(payload) { + try { + const response = await request(`${host}/api/channels/recurring-rules/`, { + method: 'POST', + body: payload, + }); + return response; + } catch (e) { + errorNotification('Failed to create recurring DVR rule', e); + } + } + + static async updateRecurringRule(ruleId, payload) { + try { + const response = await request(`${host}/api/channels/recurring-rules/${ruleId}/`, { + method: 'PATCH', + body: payload, + }); + return response; + } catch (e) { + errorNotification(`Failed to update recurring rule ${ruleId}`, e); + } + } + + static async deleteRecurringRule(ruleId) { + try { + await request(`${host}/api/channels/recurring-rules/${ruleId}/`, { + method: 'DELETE', + }); + } catch (e) { + errorNotification(`Failed to delete recurring rule ${ruleId}`, e); + } + } + static async deleteRecording(id) { try { await request(`${host}/api/channels/recordings/${id}/`, { method: 'DELETE' }); @@ -2017,9 +2224,15 @@ export default class API { // If successful, requery channels to update UI if (response.success) { + // Build message based on whether EPG sources need refreshing + let message = `Updated ${response.channels_updated} channel${response.channels_updated !== 1 ? 's' : ''}`; + if (response.programs_refreshed > 0) { + message += `, refreshing ${response.programs_refreshed} EPG source${response.programs_refreshed !== 1 ? 's' : ''}`; + } + notifications.show({ title: 'EPG Association', - message: `Updated ${response.channels_updated} channels, refreshing ${response.programs_refreshed} EPG sources.`, + message: message, color: 'blue', }); @@ -2280,4 +2493,21 @@ export default class API { errorNotification('Failed to update playback position', e); } } + + static async getSystemEvents(limit = 100, offset = 0, eventType = null) { + try { + const params = new URLSearchParams(); + params.append('limit', limit); + params.append('offset', offset); + if (eventType) { + params.append('event_type', eventType); + } + const response = await request( + `${host}/api/core/system-events/?${params.toString()}` + ); + return response; + } catch (e) { + errorNotification('Failed to retrieve system events', e); + } + } } diff --git a/frontend/src/components/SeriesModal.jsx b/frontend/src/components/SeriesModal.jsx index dcfebf86a..050237121 100644 --- a/frontend/src/components/SeriesModal.jsx +++ b/frontend/src/components/SeriesModal.jsx @@ -17,7 +17,9 @@ import { Table, Divider, } from '@mantine/core'; -import { Play } from 'lucide-react'; +import { Play, Copy } from 'lucide-react'; +import { notifications } from '@mantine/notifications'; +import { copyToClipboard } from '../utils'; import useVODStore from '../store/useVODStore'; import useVideoStore from '../store/useVideoStore'; import useSettingsStore from '../store/settings'; @@ -262,6 +264,39 @@ const SeriesModal = ({ series, opened, onClose }) => { showVideo(streamUrl, 'vod', episode); }; + const getEpisodeStreamUrl = (episode) => { + let streamUrl = `/proxy/vod/episode/${episode.uuid}`; + + // Add selected provider as query parameter if available + if (selectedProvider) { + // Use stream_id for most specific selection, fallback to account_id + if (selectedProvider.stream_id) { + streamUrl += `?stream_id=${encodeURIComponent(selectedProvider.stream_id)}`; + } else { + streamUrl += `?m3u_account_id=${selectedProvider.m3u_account.id}`; + } + } + + if (env_mode === 'dev') { + streamUrl = `${window.location.protocol}//${window.location.hostname}:5656${streamUrl}`; + } else { + streamUrl = `${window.location.origin}${streamUrl}`; + } + return streamUrl; + }; + + const handleCopyEpisodeLink = async (episode) => { + const streamUrl = getEpisodeStreamUrl(episode); + const success = await copyToClipboard(streamUrl); + notifications.show({ + title: success ? 'Link Copied!' : 'Copy Failed', + message: success + ? 'Episode link copied to clipboard' + : 'Failed to copy link to clipboard', + color: success ? 'green' : 'red', + }); + }; + const handleEpisodeRowClick = (episode) => { setExpandedEpisode(expandedEpisode === episode.id ? null : episode.id); }; @@ -611,20 +646,34 @@ const SeriesModal = ({ series, opened, onClose }) => { - 0 && !selectedProvider - } - onClick={(e) => { - e.stopPropagation(); - handlePlayEpisode(episode); - }} - > - - + + 0 && + !selectedProvider + } + onClick={(e) => { + e.stopPropagation(); + handlePlayEpisode(episode); + }} + > + + + { + e.stopPropagation(); + handleCopyEpisodeLink(episode); + }} + > + + + {expandedEpisode === episode.id && ( @@ -879,7 +928,8 @@ const SeriesModal = ({ series, opened, onClose }) => { src={trailerUrl} title="YouTube Trailer" frameBorder="0" - allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" + allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" + referrerPolicy="strict-origin-when-cross-origin" allowFullScreen style={{ position: 'absolute', diff --git a/frontend/src/components/Sidebar.jsx b/frontend/src/components/Sidebar.jsx index 143d01ab8..d8c3fae84 100644 --- a/frontend/src/components/Sidebar.jsx +++ b/frontend/src/components/Sidebar.jsx @@ -188,8 +188,8 @@ const Sidebar = ({ collapsed, toggleDrawer, drawerWidth, miniDrawerWidth }) => { } }; - const onLogout = () => { - logout(); + const onLogout = async () => { + await logout(); window.location.reload(); }; diff --git a/frontend/src/components/SystemEvents.jsx b/frontend/src/components/SystemEvents.jsx new file mode 100644 index 000000000..855603c0d --- /dev/null +++ b/frontend/src/components/SystemEvents.jsx @@ -0,0 +1,333 @@ +import React, { useState, useEffect, useCallback } from 'react'; +import { + ActionIcon, + Box, + Button, + Card, + Group, + NumberInput, + Pagination, + Select, + Stack, + Text, + Title, +} from '@mantine/core'; +import { useElementSize } from '@mantine/hooks'; +import { + ChevronDown, + CirclePlay, + Download, + Gauge, + HardDriveDownload, + List, + LogIn, + LogOut, + RefreshCw, + Shield, + ShieldAlert, + SquareX, + Timer, + Users, + Video, + XCircle, +} from 'lucide-react'; +import dayjs from 'dayjs'; +import API from '../api'; +import useLocalStorage from '../hooks/useLocalStorage'; + +const SystemEvents = () => { + const [events, setEvents] = useState([]); + const [totalEvents, setTotalEvents] = useState(0); + const [isExpanded, setIsExpanded] = useState(false); + const { ref: cardRef, width: cardWidth } = useElementSize(); + const isNarrow = cardWidth < 650; + const [isLoading, setIsLoading] = useState(false); + const [dateFormatSetting] = useLocalStorage('date-format', 'mdy'); + const dateFormat = dateFormatSetting === 'mdy' ? 'MM/DD' : 'DD/MM'; + const [eventsRefreshInterval, setEventsRefreshInterval] = useLocalStorage( + 'events-refresh-interval', + 0 + ); + const [eventsLimit, setEventsLimit] = useLocalStorage('events-limit', 100); + const [currentPage, setCurrentPage] = useState(1); + + // Calculate offset based on current page and limit + const offset = (currentPage - 1) * eventsLimit; + const totalPages = Math.ceil(totalEvents / eventsLimit); + + const fetchEvents = useCallback(async () => { + try { + setIsLoading(true); + const response = await API.getSystemEvents(eventsLimit, offset); + if (response && response.events) { + setEvents(response.events); + setTotalEvents(response.total || 0); + } + } catch (error) { + console.error('Error fetching system events:', error); + } finally { + setIsLoading(false); + } + }, [eventsLimit, offset]); + + // Fetch events on mount and when eventsRefreshInterval changes + useEffect(() => { + fetchEvents(); + + // Set up polling if interval is set and events section is expanded + if (eventsRefreshInterval > 0 && isExpanded) { + const interval = setInterval(fetchEvents, eventsRefreshInterval * 1000); + return () => clearInterval(interval); + } + }, [fetchEvents, eventsRefreshInterval, isExpanded]); + + // Reset to first page when limit changes + useEffect(() => { + setCurrentPage(1); + }, [eventsLimit]); + + const getEventIcon = (eventType) => { + switch (eventType) { + case 'channel_start': + return ; + case 'channel_stop': + return ; + case 'channel_reconnect': + return ; + case 'channel_buffering': + return ; + case 'channel_failover': + return ; + case 'client_connect': + return ; + case 'client_disconnect': + return ; + case 'recording_start': + return