Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,3 @@ YOUTUBE_API_KEY=
# Gemini API key (optional - for AI companion and exams)
# Get from: https://aistudio.google.com/app/apikey
GEMINI_API_KEY=

10 changes: 10 additions & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,16 @@ jobs:
with:
fetch-depth: 0

- name: Free Disk Space (Linux)
if: matrix.platform == 'linux'
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
shell: bash

- name: Configure Cargo git CLI
run: |
mkdir -p .cargo
Expand Down
15 changes: 11 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,18 @@ keyring = { version = "3", features = ["windows-native"] }
[target.'cfg(target_os = "linux")'.dependencies]
keyring = { version = "3", features = ["linux-native"] }

[profile.dev]
debug = 1 # Reduce debug info to save disk space and prevent Bus Error on CI

[profile.test]
debug = 1 # Reduce debug info for tests as well

[profile.release]
opt-level = "z" # Optimize for size
lto = true # Enable Link Time Optimization
codegen-units = 1 # Allow for maximum cross-module optimization
panic = "abort" # Remove unwinding code for smaller binaries
opt-level = "z" # Optimize for size
lto = "thin" # Use Thin LTO to reduce memory/disk pressure during linking
codegen-units = 16 # Increase units to parallelize and reduce resource usage
panic = "abort" # Remove unwinding code for smaller binaries
strip = true # Strip symbols to further reduce binary size

[patch.crates-io]
dioxus = { git = "https://github.com/k5602/dioxus", branch = "navigation_policy_hook", package = "dioxus" }
Expand Down
8 changes: 8 additions & 0 deletions diesel.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,14 @@
[print_schema]
file = "src/schema.rs"
custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]
filter = { except_tables = [
"search_index",
"search_index_config",
"search_index_content",
"search_index_data",
"search_index_docsize",
"search_index_idx",
] }

[migrations_directory]
dir = "migrations"
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
-- Revert videos.youtube_id to NOT NULL by rebuilding the table (SQLite)
PRAGMA foreign_keys=off;

CREATE TABLE videos_old (
id TEXT PRIMARY KEY NOT NULL,
module_id TEXT NOT NULL REFERENCES modules(id) ON DELETE CASCADE,
youtube_id TEXT NOT NULL,
title TEXT NOT NULL,
duration_secs INTEGER NOT NULL,
is_completed BOOLEAN NOT NULL DEFAULT FALSE,
sort_order INTEGER NOT NULL,
description TEXT,
transcript TEXT,
summary TEXT,
source_type TEXT NOT NULL DEFAULT 'youtube',
source_ref TEXT NOT NULL DEFAULT ''
);

INSERT INTO videos_old (
id,
module_id,
youtube_id,
title,
duration_secs,
is_completed,
sort_order,
description,
transcript,
summary,
source_type,
source_ref
)
SELECT
id,
module_id,
COALESCE(youtube_id, ''),
title,
duration_secs,
is_completed,
sort_order,
description,
transcript,
summary,
source_type,
source_ref
FROM videos;

DROP TABLE videos;
ALTER TABLE videos_old RENAME TO videos;

CREATE INDEX idx_videos_module_id ON videos(module_id);
CREATE INDEX idx_videos_youtube_id ON videos(youtube_id);

PRAGMA foreign_keys=on;
54 changes: 54 additions & 0 deletions migrations/2026-02-02-000000-0000_make_youtube_id_nullable/up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
-- Make videos.youtube_id nullable by rebuilding the table
PRAGMA foreign_keys=off;

CREATE TABLE videos_new (
id TEXT PRIMARY KEY NOT NULL,
module_id TEXT NOT NULL REFERENCES modules(id) ON DELETE CASCADE,
youtube_id TEXT,
title TEXT NOT NULL,
duration_secs INTEGER NOT NULL,
is_completed BOOLEAN NOT NULL DEFAULT FALSE,
sort_order INTEGER NOT NULL,
description TEXT,
transcript TEXT,
summary TEXT,
source_type TEXT NOT NULL DEFAULT 'youtube',
source_ref TEXT NOT NULL DEFAULT ''
);

INSERT INTO videos_new (
id,
module_id,
youtube_id,
title,
duration_secs,
is_completed,
sort_order,
description,
transcript,
summary,
source_type,
source_ref
)
SELECT
id,
module_id,
youtube_id,
title,
duration_secs,
is_completed,
sort_order,
description,
transcript,
summary,
source_type,
source_ref
FROM videos;

DROP TABLE videos;
ALTER TABLE videos_new RENAME TO videos;

CREATE INDEX idx_videos_module_id ON videos(module_id);
CREATE INDEX idx_videos_youtube_id ON videos(youtube_id);

PRAGMA foreign_keys=on;
19 changes: 17 additions & 2 deletions src/application/use_cases/ingest_local.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use crate::domain::{
CourseRepository, LocalMediaScanner, ModuleRepository, RawLocalMediaMetadata,
SearchRepository, VideoRepository,
},
services::{BoundaryDetector, SubtitleCleaner, TitleSanitizer},
services::{BoundaryDetector, SubtitleCleaner, TitleSanitizer, title_number_sequence},
value_objects::{CourseId, ModuleId, PlaylistUrl, VideoId, VideoSource},
};

Expand Down Expand Up @@ -217,6 +217,20 @@ fn group_by_folder(
grouped.entry(folder).or_default().push(item.clone());
}

for entries in grouped.values_mut() {
entries.sort_by(|a, b| {
let a_key = title_number_sequence(&a.title);
let b_key = title_number_sequence(&b.title);

match (a_key, b_key) {
(Some(a_seq), Some(b_seq)) => a_seq.cmp(&b_seq).then_with(|| a.title.cmp(&b.title)),
(Some(_), None) => std::cmp::Ordering::Less,
(None, Some(_)) => std::cmp::Ordering::Greater,
(None, None) => a.title.cmp(&b.title),
}
});
}

grouped
}

Expand Down Expand Up @@ -245,7 +259,8 @@ fn split_root_group_if_needed(
}

let detector = BoundaryDetector::new();
let groups = detector.group_into_modules(items.len());
let raw_titles: Vec<&str> = items.iter().map(|item| item.title.as_str()).collect();
let groups = detector.group_by_titles(&raw_titles);
if groups.len() <= 1 {
return grouped.clone();
}
Expand Down
5 changes: 3 additions & 2 deletions src/application/use_cases/ingest_playlist.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,10 @@ where
// 3. Sanitize titles
let sanitized_titles: Vec<String> =
raw_videos.iter().map(|v| self.sanitizer.sanitize(&v.title)).collect();
let raw_titles: Vec<&str> = raw_videos.iter().map(|v| v.title.as_str()).collect();

// 4. Group videos into modules (simple batch grouping)
let module_groups = self.boundary_detector.group_into_modules(raw_videos.len());
// 4. Group videos into modules (title-aware with batch fallback)
let module_groups = self.boundary_detector.group_by_titles(&raw_titles);

// 5. Create course
let course_name = input
Expand Down
Loading
Loading