diff --git a/Cargo.lock b/Cargo.lock index 1edc155..4f37aff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -172,6 +172,19 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "console" +version = "0.15.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "unicode-width 0.2.0", + "windows-sys 0.59.0", +] + [[package]] name = "crossterm" version = "0.28.1" @@ -182,7 +195,7 @@ dependencies = [ "crossterm_winapi", "mio", "parking_lot", - "rustix", + "rustix 0.38.44", "signal-hook", "signal-hook-mio", "winapi", @@ -231,6 +244,19 @@ dependencies = [ "syn", ] +[[package]] +name = "dialoguer" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" +dependencies = [ + "console", + "shell-words", + "tempfile", + "thiserror 1.0.69", + "zeroize", +] + [[package]] name = "dirs" version = "6.0.0" @@ -258,6 +284,7 @@ version = "2.3.1" dependencies = [ "clap", "crossterm", + "dialoguer", "dirs", "fs2", "http-body-util", @@ -265,7 +292,9 @@ dependencies = [ "hyper-util", "ratatui", "serde", - "thiserror", + "serde_json", + "tempfile", + "thiserror 2.0.18", "tokio", "toml", "tracing", @@ -279,6 +308,12 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + [[package]] name = "equivalent" version = "1.0.2" @@ -295,6 +330,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + [[package]] name = "foldhash" version = "0.1.5" @@ -550,6 +591,12 @@ version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + [[package]] name = "lock_api" version = "0.4.14" @@ -731,7 +778,7 @@ checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ "getrandom 0.2.17", "libredox", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -760,10 +807,23 @@ dependencies = [ "bitflags", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.15", "windows-sys 0.59.0", ] +[[package]] +name = "rustix" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys 0.11.0", + "windows-sys 0.61.2", +] + [[package]] name = "rustversion" version = "1.0.22" @@ -812,6 +872,19 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + [[package]] name = "serde_spanned" version = "0.6.9" @@ -830,6 +903,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shell-words" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc6fe69c597f9c37bfeeeeeb33da3530379845f10be461a66d16d03eca2ded77" + [[package]] name = "signal-hook" version = "0.3.18" @@ -922,13 +1001,46 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "tempfile" +version = "3.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix 1.1.3", + "windows-sys 0.61.2", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + [[package]] name = "thiserror" version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl", + "thiserror-impl 2.0.18", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1409,3 +1521,15 @@ name = "wit-bindgen" version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/Cargo.toml b/Cargo.toml index 8b3c091..817e420 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,12 +9,14 @@ license = "MIT" [dependencies] clap = { version = "4", features = ["derive"] } crossterm = "0.28" +dialoguer = "0.11" dirs = "6" fs2 = "0.4" http-body-util = "0.1" hyper = { version = "1", features = ["http1", "server", "client"] } hyper-util = { version = "0.1", features = ["tokio", "http1"] } serde = { version = "1", features = ["derive"] } +serde_json = "1" thiserror = "2" ratatui = "0.29" tokio = { version = "1", features = ["macros", "rt-multi-thread", "net"] } @@ -23,6 +25,7 @@ tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } [dev-dependencies] +tempfile = "3" uuid = { version = "1", features = ["v4"] } # The profile that 'dist' will build with diff --git a/README.md b/README.md index b1a2025..29c1955 100644 --- a/README.md +++ b/README.md @@ -32,9 +32,9 @@ cargo install --path . ## Quick Start ```bash -# 1. Register your repo +# 1. Initialize your repo as a dual workspace cd ~/code/my-project -dual add +dual init # 2. Create a branch workspace dual create feat/auth @@ -88,7 +88,7 @@ bind-key Space display-popup -E -w 60% -h 60% "dual" | Command | Description | |---------|-------------| | `dual` | Open TUI workspace browser | -| `dual add [--name NAME]` | Register current git repo as a workspace | +| `dual init [--name NAME]` | Initialize current git repo as a workspace | | `dual create [--repo NAME]` | Create a new branch workspace | | `dual launch [workspace]` | Launch a workspace (auto-detects from cwd) | | `dual list` | List all workspaces with status (non-interactive) | @@ -100,47 +100,51 @@ bind-key Space display-popup -E -w 60% -h 60% "dual" ## Configuration -Dual uses two config files: +Dual uses two config files: `devcontainer.json` for container configuration and `.dual/settings.json` for Dual-specific orchestration. -### `.dual.toml` (per-repo hints) +### `devcontainer.json` (container config) -Lives in your project root. Committed to git. Controls runtime behavior. +Primary source for container configuration. Lives in `.devcontainer/devcontainer.json` (or `.devcontainer.json` at project root). Compatible with the [Dev Containers](https://containers.dev/) ecosystem. -```toml -# Docker image for the container runtime -image = "node:20" - -# Ports your dev server uses (for reverse proxy routing) -ports = [3000, 3001] - -# Shell command to run after container creation (e.g., dependency install) -setup = "pnpm install" - -# Commands to route to the container (in addition to defaults) -# Default: npm, npx, pnpm, node, python, python3, pip, pip3, curl, make -extra_commands = ["cargo", "go"] - -# Directories to isolate with anonymous Docker volumes -anonymous_volumes = ["node_modules", ".next"] - -# Environment variables passed to the container -[env] -NODE_ENV = "development" - -# Files to share across all workspaces of this repo -[shared] -files = [".vercel", ".env.local"] +```json +{ + "image": "node:20", + "forwardPorts": [3000, 3001], + "postCreateCommand": "pnpm install", + "containerEnv": { + "NODE_ENV": "development" + } +} ``` | Field | Description | Default | |-------|-------------|---------| | `image` | Docker image for the container | `node:20` | -| `ports` | Ports that services bind to (for reverse proxy) | `[]` | -| `setup` | Command to run after first container creation | None | -| `env` | Environment variables passed to the container | `{}` | -| `shared.files` | Files/directories to share across branch workspaces | `[]` | +| `build.dockerfile` | Build image from Dockerfile instead of pulling | None | +| `forwardPorts` | Ports that services bind to (for reverse proxy) | `[]` | +| `postCreateCommand` | Command to run after first container creation | None | +| `containerEnv` | Environment variables passed to the container | `{}` | +| `mounts` | Volume mounts (volume type, `/workspace/*` targets become anonymous volumes) | `[]` | + +### `.dual/settings.json` (orchestration config) + +Dual-specific settings that the devcontainer spec can't express. Lives in `.dual/settings.json` in your project root. Created automatically by `dual init`. + +```json +{ + "devcontainer": ".devcontainer/devcontainer.json", + "extra_commands": ["cargo", "go"], + "anonymous_volumes": ["node_modules", ".next"], + "shared": [".vercel", ".env.local"] +} +``` + +| Field | Description | Default | +|-------|-------------|---------| +| `devcontainer` | Path to devcontainer.json | `".devcontainer/devcontainer.json"` | | `extra_commands` | Additional commands to route to the container | `[]` | | `anonymous_volumes` | Container volumes (e.g., `node_modules`) | `["node_modules"]` | +| `shared` | Files/directories to share across branch workspaces | `[]` | ### `~/.dual/workspaces.toml` (global state) @@ -174,6 +178,21 @@ When you select a workspace (via `dual` or `dual launch`): Your editor, git, and credentials stay on the host. The container handles all runtime processes. Claude Code never knows it's running inside a container. +### Shell Hook (Pane Propagation) + +On first run, `dual` automatically appends a small snippet to your `~/.zshrc` or `~/.bashrc`: + +```bash +# dual: shell interception (auto-generated) +if [ -n "$DUAL_ACTIVE" ] && [ -n "$DUAL_RC_PATH" ] && [ -f "$DUAL_RC_PATH" ]; then + source "$DUAL_RC_PATH" +fi +``` + +This ensures that when you split a pane (`Ctrl+b %`) or create a new window (`Ctrl+b c`) inside a Dual tmux session, the new shell automatically loads command interception. Without this, new panes would run commands on the host instead of in the container. + +The snippet is a no-op outside Dual sessions — it only activates when `DUAL_ACTIVE` is set (which Dual configures via `tmux set-environment`). + ## Architecture ``` diff --git a/src/cli.rs b/src/cli.rs index 7794be3..63280c0 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -13,11 +13,14 @@ pub struct Cli { #[derive(Subcommand)] pub enum Command { - /// Register the current repo as a dual workspace - Add { + /// Initialize the current repo as a dual workspace + Init { /// Short name for the repo (derived from directory name if omitted) #[arg(short, long)] name: Option, + /// Accept all defaults without prompts + #[arg(short, long)] + yes: bool, }, /// Create a new branch workspace for an existing repo diff --git a/src/config.rs b/src/config.rs index 2256a13..5a854ba 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,20 +1,77 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::path::{Path, PathBuf}; +use tracing::warn; -const HINTS_FILENAME: &str = ".dual.toml"; +/// Dockerfile build configuration for building images from source. +/// Used when devcontainer.json specifies `build.dockerfile`. +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] +pub struct DockerfileBuild { + /// Path to the Dockerfile (relative to base_dir or workspace root) + pub path: String, + + /// Build context directory (relative to base_dir or workspace root) + #[serde(default = "default_build_context")] + pub context: String, + + /// Docker build arguments (--build-arg) + #[serde(default)] + pub args: HashMap, + + /// Target build stage for multi-stage builds + #[serde(skip_serializing_if = "Option::is_none")] + pub target: Option, + + /// Base directory for resolving relative paths (set by devcontainer loader). + #[serde(skip)] + pub base_dir: Option, +} + +fn default_build_context() -> String { + ".".to_string() +} + +const DUAL_DIR: &str = ".dual"; +const SETTINGS_FILENAME: &str = "settings.json"; const DEFAULT_IMAGE: &str = "node:20"; -/// Shared configuration file propagation settings. +/// Dual-specific orchestration config, read from .dual/settings.json. +/// +/// Container configuration (image, ports, setup, env) lives in devcontainer.json. +/// This struct contains only Dual-specific fields that devcontainer can't express. #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] -pub struct SharedConfig { - /// Files and directories to share across workspaces. - /// e.g. [".vercel", ".env.local", ".env"] +pub struct DualConfig { + /// Path to devcontainer.json (required, set by dual init) + pub devcontainer: String, + + /// Additional commands to route to the container (merged with defaults) + #[serde(default)] + pub extra_commands: Vec, + + /// Directories to isolate with anonymous Docker volumes + #[serde(default = "default_anonymous_volumes")] + pub anonymous_volumes: Vec, + + /// Shared files to propagate across workspaces #[serde(default)] - pub files: Vec, + pub shared: Vec, +} + +impl Default for DualConfig { + fn default() -> Self { + Self { + devcontainer: ".devcontainer/devcontainer.json".to_string(), + extra_commands: Vec::new(), + anonymous_volumes: default_anonymous_volumes(), + shared: Vec::new(), + } + } } -/// Per-repo runtime hints, read from .dual.toml in a workspace directory. +/// Per-repo runtime hints — the merged internal representation used by all consumers. +/// +/// Built from DualConfig (.dual/settings.json) + DevcontainerJson (devcontainer.json). +/// Consumer code (container, proxy, shell) uses this struct exclusively. #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] pub struct RepoHints { /// Docker image to use for containers (default: "node:20") @@ -41,8 +98,13 @@ pub struct RepoHints { pub anonymous_volumes: Vec, /// Shared files to propagate across workspaces + #[serde(default)] + pub shared: Vec, + + /// Dockerfile build config — if set, build image instead of pulling. + /// Sourced from devcontainer.json build field. #[serde(skip_serializing_if = "Option::is_none")] - pub shared: Option, + pub dockerfile: Option, } fn default_image() -> String { @@ -62,7 +124,8 @@ impl Default for RepoHints { env: HashMap::new(), extra_commands: Vec::new(), anonymous_volumes: default_anonymous_volumes(), - shared: None, + shared: Vec::new(), + dockerfile: None, } } } @@ -72,77 +135,90 @@ pub fn shared_dir(repo: &str) -> Option { dirs::home_dir().map(|home| home.join(".dual").join("shared").join(repo)) } -/// Load RepoHints from a workspace directory's .dual.toml. -/// Returns default hints if the file doesn't exist. +/// Load RepoHints from a workspace directory. +/// +/// Loading flow: +/// 1. Read `.dual/settings.json` → `DualConfig` (error if missing) +/// 2. Use explicit devcontainer path from DualConfig +/// 3. Parse devcontainer.json → `RepoHints` for container fields +/// 4. Merge DualConfig fields + devcontainer RepoHints → final `RepoHints` pub fn load_hints(workspace_dir: &Path) -> Result { - let path = workspace_dir.join(HINTS_FILENAME); + let dual_config = load_dual_config(workspace_dir)?; + let dc_path = workspace_dir.join(&dual_config.devcontainer); + + // Load container config from devcontainer.json + let dc_hints = if dc_path.exists() { + let devcontainer_dir = dc_path.parent().unwrap_or(workspace_dir); + let contents = std::fs::read_to_string(&dc_path).ok(); + contents.and_then(|c| { + let dc = crate::devcontainer::parse_devcontainer(&c).ok()?; + Some(crate::devcontainer::to_repo_hints(&dc, devcontainer_dir)) + }) + } else { + warn!( + "devcontainer.json not found at '{}' (specified in .dual/settings.json)", + dc_path.display() + ); + None + }; + + Ok(merge_config(&dual_config, dc_hints.as_ref())) +} + +/// Load DualConfig from .dual/settings.json. Returns error if file doesn't exist. +fn load_dual_config(workspace_dir: &Path) -> Result { + let path = workspace_dir.join(DUAL_DIR).join(SETTINGS_FILENAME); if !path.exists() { - return Ok(RepoHints::default()); + return Err(HintsError::MissingConfig(workspace_dir.to_path_buf())); } let contents = std::fs::read_to_string(&path).map_err(|e| HintsError::ReadError(path.clone(), e))?; - let hints: RepoHints = - toml::from_str(&contents).map_err(|e| HintsError::ParseError(path, e))?; - Ok(hints) + + let config: DualConfig = + serde_json::from_str(&contents).map_err(|e| HintsError::JsonParseError(path, e))?; + Ok(config) } -/// Write RepoHints to a workspace directory's .dual.toml. -pub fn write_hints(workspace_dir: &Path, hints: &RepoHints) -> Result<(), HintsError> { - let path = workspace_dir.join(HINTS_FILENAME); - let contents = toml::to_string_pretty(hints).map_err(HintsError::SerializeError)?; - std::fs::write(&path, contents).map_err(|e| HintsError::WriteError(path, e))?; - Ok(()) +/// Merge DualConfig + devcontainer RepoHints into final RepoHints. +/// +/// devcontainer.json provides: image, ports, setup, env, dockerfile +/// DualConfig provides: extra_commands, anonymous_volumes, shared +fn merge_config(dual: &DualConfig, dc_hints: Option<&RepoHints>) -> RepoHints { + let base = dc_hints.cloned().unwrap_or_default(); + + RepoHints { + // Container fields come from devcontainer.json (or defaults) + image: base.image, + ports: base.ports, + setup: base.setup, + env: base.env, + dockerfile: base.dockerfile, + + // Dual-specific fields come from .dual/settings.json + extra_commands: dual.extra_commands.clone(), + anonymous_volumes: dual.anonymous_volumes.clone(), + shared: dual.shared.clone(), + } } -/// Write a default .dual.toml with helpful comments and examples. -/// Used by `dual add` when creating a new repo config. -pub fn write_default_hints(repo_root: &Path) -> Result<(), HintsError> { - let template = r#"# Dual workspace configuration -# See: https://github.com/jeevanpillay/dual - -# Docker image for the container runtime -image = "node:20" - -# Ports your dev server uses (for reverse proxy routing) -# Example: ports = [3000, 3001] -# ports = [] - -# Shell command to run after container creation (e.g., dependency install) -# Example: setup = "pnpm install" -# setup = "" - -# Environment variables passed to the container -# Example: -# [env] -# NODE_ENV = "development" - -# Commands to route to the container (in addition to defaults) -# Default: npm, npx, pnpm, node, python, python3, pip, pip3, curl, make -# Example: extra_commands = ["cargo", "go", "ruby"] -# extra_commands = [] - -# Directories to isolate with anonymous Docker volumes -# These directories get their own volume so they don't sync between host/container -# Example: anonymous_volumes = ["node_modules", ".next", "target"] -# anonymous_volumes = ["node_modules"] - -# Files to share across all workspaces of this repo -# These are gitignored files that should be available in every branch workspace -# [shared] -# files = [".env.local", ".vercel"] -"#; - let hints_path = repo_root.join(HINTS_FILENAME); - std::fs::write(&hints_path, template).map_err(|e| HintsError::WriteError(hints_path, e))?; +/// Write DualConfig to a workspace directory's .dual/settings.json. +pub fn write_dual_config(workspace_dir: &Path, config: &DualConfig) -> Result<(), HintsError> { + let dual_dir = workspace_dir.join(DUAL_DIR); + std::fs::create_dir_all(&dual_dir).map_err(|e| HintsError::WriteError(dual_dir.clone(), e))?; + + let path = dual_dir.join(SETTINGS_FILENAME); + let contents = serde_json::to_string_pretty(config).map_err(HintsError::JsonSerializeError)?; + std::fs::write(&path, contents).map_err(|e| HintsError::WriteError(path, e))?; Ok(()) } -/// Parse hints from TOML string (for testing). -pub fn parse_hints(toml_str: &str) -> Result { - let hints: RepoHints = toml::from_str(toml_str) - .map_err(|e| HintsError::ParseError(PathBuf::from(""), e))?; - Ok(hints) +/// Parse DualConfig from JSON string (for testing). +pub fn parse_dual_config(json_str: &str) -> Result { + let config: DualConfig = serde_json::from_str(json_str) + .map_err(|e| HintsError::JsonParseError(PathBuf::from(""), e))?; + Ok(config) } /// Compute the workspace identifier from repo + branch. @@ -192,10 +268,13 @@ pub enum HintsError { WriteError(PathBuf, std::io::Error), #[error("Failed to parse {path}: {err}", path = .0.display(), err = .1)] - ParseError(PathBuf, toml::de::Error), + JsonParseError(PathBuf, serde_json::Error), + + #[error("Failed to serialize config: {0}")] + JsonSerializeError(serde_json::Error), - #[error("Failed to serialize hints: {0}")] - SerializeError(toml::ser::Error), + #[error("No .dual/settings.json found in {path}. Run `dual init` first.", path = .0.display())] + MissingConfig(PathBuf), } #[cfg(test)] @@ -251,166 +330,271 @@ mod tests { } #[test] - fn parse_hints_minimal() { - let hints = parse_hints("").unwrap(); - assert_eq!(hints.image, "node:20"); - assert!(hints.ports.is_empty()); + fn default_dual_config() { + let config = DualConfig::default(); + assert_eq!(config.devcontainer, ".devcontainer/devcontainer.json"); + assert!(config.extra_commands.is_empty()); + assert_eq!(config.anonymous_volumes, vec!["node_modules".to_string()]); + assert!(config.shared.is_empty()); } #[test] - fn parse_hints_full() { - let toml = r#" -image = "python:3.12" -ports = [3000, 3001] -setup = "pnpm install" - -[env] -NODE_ENV = "development" -"#; - let hints = parse_hints(toml).unwrap(); - assert_eq!(hints.image, "python:3.12"); - assert_eq!(hints.ports, vec![3000, 3001]); - assert_eq!(hints.setup.as_deref(), Some("pnpm install")); - assert_eq!(hints.env.get("NODE_ENV").unwrap(), "development"); + fn parse_dual_config_minimal() { + let json = r#"{"devcontainer": ".devcontainer/devcontainer.json"}"#; + let config = parse_dual_config(json).unwrap(); + assert_eq!(config.devcontainer, ".devcontainer/devcontainer.json"); + assert!(config.extra_commands.is_empty()); + assert_eq!(config.anonymous_volumes, vec!["node_modules".to_string()]); + assert!(config.shared.is_empty()); } #[test] - fn parse_hints_missing_fields_use_defaults() { - let toml = r#"ports = [8080]"#; - let hints = parse_hints(toml).unwrap(); - assert_eq!(hints.image, "node:20"); - assert_eq!(hints.ports, vec![8080]); - assert!(hints.setup.is_none()); + fn parse_dual_config_missing_devcontainer_errors() { + let json = r#"{"extra_commands": ["cargo"]}"#; + assert!(parse_dual_config(json).is_err()); } #[test] - fn load_hints_from_missing_file() { - let hints = load_hints(Path::new("/tmp/dual-test-nonexistent")).unwrap(); - assert_eq!(hints, RepoHints::default()); + fn parse_dual_config_full() { + let json = r#"{ + "devcontainer": ".devcontainer/devcontainer.json", + "extra_commands": ["cargo", "go"], + "anonymous_volumes": ["node_modules", ".next", "target"], + "shared": [".env.local", ".vercel"] + }"#; + let config = parse_dual_config(json).unwrap(); + assert_eq!(config.devcontainer, ".devcontainer/devcontainer.json"); + assert_eq!(config.extra_commands, vec!["cargo", "go"]); + assert_eq!( + config.anonymous_volumes, + vec!["node_modules", ".next", "target"] + ); + assert_eq!(config.shared, vec![".env.local", ".vercel"]); } #[test] - fn write_and_load_hints() { - let dir = std::env::temp_dir().join("dual-test-hints-roundtrip"); - let _ = std::fs::remove_dir_all(&dir); - std::fs::create_dir_all(&dir).unwrap(); + fn parse_dual_config_extra_commands_only() { + let json = r#"{"devcontainer": "dc.json", "extra_commands": ["cargo", "go", "ruby"]}"#; + let config = parse_dual_config(json).unwrap(); + assert_eq!(config.extra_commands, vec!["cargo", "go", "ruby"]); + } - let hints = RepoHints { - image: "rust:latest".to_string(), - ports: vec![8080, 9090], - setup: Some("cargo build".to_string()), - env: HashMap::from([("RUST_LOG".to_string(), "debug".to_string())]), + #[test] + fn merge_config_devcontainer_only() { + let dual = DualConfig::default(); + let dc_hints = RepoHints { + image: "python:3.12".to_string(), + ports: vec![3000, 8080], + setup: Some("pnpm install".to_string()), + env: HashMap::from([("NODE_ENV".to_string(), "development".to_string())]), + ..Default::default() + }; + + let merged = merge_config(&dual, Some(&dc_hints)); + assert_eq!(merged.image, "python:3.12"); + assert_eq!(merged.ports, vec![3000, 8080]); + assert_eq!(merged.setup.as_deref(), Some("pnpm install")); + assert_eq!(merged.env.get("NODE_ENV").unwrap(), "development"); + // Dual defaults + assert!(merged.extra_commands.is_empty()); + assert_eq!(merged.anonymous_volumes, vec!["node_modules".to_string()]); + } + + #[test] + fn merge_config_dual_only() { + let dual = DualConfig { + devcontainer: ".devcontainer/devcontainer.json".to_string(), extra_commands: vec!["cargo".to_string()], anonymous_volumes: vec!["node_modules".to_string(), "target".to_string()], - shared: None, + shared: vec![".env.local".to_string()], }; - write_hints(&dir, &hints).unwrap(); - let loaded = load_hints(&dir).unwrap(); - assert_eq!(hints, loaded); + let merged = merge_config(&dual, None); + // Container defaults + assert_eq!(merged.image, "node:20"); + assert!(merged.ports.is_empty()); + assert!(merged.setup.is_none()); + // Dual fields + assert_eq!(merged.extra_commands, vec!["cargo"]); + assert_eq!(merged.anonymous_volumes, vec!["node_modules", "target"]); + assert_eq!(merged.shared, vec![".env.local"]); + } - let _ = std::fs::remove_dir_all(&dir); + #[test] + fn merge_config_both_sources() { + let dual = DualConfig { + devcontainer: ".devcontainer/devcontainer.json".to_string(), + extra_commands: vec!["cargo".to_string()], + anonymous_volumes: vec!["node_modules".to_string(), "target".to_string()], + shared: vec![".env".to_string()], + }; + let dc_hints = RepoHints { + image: "rust:latest".to_string(), + ports: vec![8080], + setup: Some("cargo build".to_string()), + env: HashMap::from([("RUST_LOG".to_string(), "debug".to_string())]), + ..Default::default() + }; + + let merged = merge_config(&dual, Some(&dc_hints)); + // Container fields from devcontainer + assert_eq!(merged.image, "rust:latest"); + assert_eq!(merged.ports, vec![8080]); + assert_eq!(merged.setup.as_deref(), Some("cargo build")); + assert_eq!(merged.env.get("RUST_LOG").unwrap(), "debug"); + // Dual fields from .dual/settings.json + assert_eq!(merged.extra_commands, vec!["cargo"]); + assert_eq!(merged.anonymous_volumes, vec!["node_modules", "target"]); + assert_eq!(merged.shared, vec![".env"]); } #[test] - fn write_default_hints_has_comments() { - let dir = std::env::temp_dir().join("dual-test-default-hints"); - let _ = std::fs::remove_dir_all(&dir); - std::fs::create_dir_all(&dir).unwrap(); + fn merge_config_neither_source() { + let dual = DualConfig::default(); + let merged = merge_config(&dual, None); + assert_eq!(merged, RepoHints::default()); + } - write_default_hints(&dir).unwrap(); + #[test] + fn load_hints_from_missing_dir_errors() { + let result = load_hints(Path::new("/tmp/dual-test-nonexistent")); + assert!(result.is_err()); + } - let content = std::fs::read_to_string(dir.join(".dual.toml")).unwrap(); - assert!(content.contains("# Dual workspace configuration")); - assert!(content.contains("image = \"node:20\"")); - assert!(content.contains("# ports = []")); - assert!(content.contains("# setup = \"\"")); - assert!(content.contains("# [env]")); - assert!(content.contains("# extra_commands = []")); - assert!(content.contains("# anonymous_volumes = [\"node_modules\"]")); - assert!(content.contains("# [shared]")); + #[test] + fn load_hints_with_settings_and_devcontainer() { + let dir = std::env::temp_dir().join("dual-test-dc-settings"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(dir.join(".devcontainer")).unwrap(); + std::fs::write( + dir.join(".devcontainer").join("devcontainer.json"), + r#"{"image": "python:3.12", "forwardPorts": [5000]}"#, + ) + .unwrap(); + + // Must also have .dual/settings.json + let config = DualConfig { + devcontainer: ".devcontainer/devcontainer.json".to_string(), + ..Default::default() + }; + write_dual_config(&dir, &config).unwrap(); - // Verify it's still parseable as valid TOML let hints = load_hints(&dir).unwrap(); - assert_eq!(hints.image, "node:20"); + assert_eq!(hints.image, "python:3.12"); + assert_eq!(hints.ports, vec![5000]); let _ = std::fs::remove_dir_all(&dir); } #[test] - fn parse_hints_unknown_fields_ignored() { - let toml = r#" -image = "node:20" -ports = [3000] -unknown_field = "should be ignored" -"#; - // serde by default ignores unknown fields - let hints = parse_hints(toml).unwrap(); + fn load_hints_dual_config_plus_devcontainer() { + let dir = std::env::temp_dir().join("dual-test-merged"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(dir.join(".devcontainer")).unwrap(); + + // Write devcontainer.json with container config + std::fs::write( + dir.join(".devcontainer").join("devcontainer.json"), + r#"{"image": "node:20", "forwardPorts": [3000], "postCreateCommand": "pnpm install"}"#, + ) + .unwrap(); + + // Write .dual/settings.json with orchestration config + let dual_config = DualConfig { + devcontainer: ".devcontainer/devcontainer.json".to_string(), + extra_commands: vec!["cargo".to_string()], + anonymous_volumes: vec!["node_modules".to_string(), ".next".to_string()], + shared: vec![".env.local".to_string()], + }; + write_dual_config(&dir, &dual_config).unwrap(); + + let hints = load_hints(&dir).unwrap(); + // From devcontainer.json assert_eq!(hints.image, "node:20"); - } + assert_eq!(hints.ports, vec![3000]); + assert_eq!(hints.setup.as_deref(), Some("pnpm install")); + // From .dual/settings.json + assert_eq!(hints.extra_commands, vec!["cargo"]); + assert_eq!(hints.anonymous_volumes, vec!["node_modules", ".next"]); + assert_eq!(hints.shared, vec![".env.local"]); - #[test] - fn parse_hints_extra_commands() { - let toml = r#" -extra_commands = ["cargo", "go", "ruby"] -"#; - let hints = parse_hints(toml).unwrap(); - assert_eq!(hints.extra_commands, vec!["cargo", "go", "ruby"]); + let _ = std::fs::remove_dir_all(&dir); } #[test] - fn parse_hints_anonymous_volumes() { - let toml = r#" -anonymous_volumes = ["node_modules", ".next", "target"] -"#; - let hints = parse_hints(toml).unwrap(); - assert_eq!( - hints.anonymous_volumes, - vec!["node_modules", ".next", "target"] - ); - } + fn load_hints_explicit_devcontainer_path() { + let dir = std::env::temp_dir().join("dual-test-explicit-dc"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(dir.join("custom")).unwrap(); - #[test] - fn parse_hints_anonymous_volumes_default() { - let hints = parse_hints("").unwrap(); - assert_eq!(hints.anonymous_volumes, vec!["node_modules".to_string()]); - } + std::fs::write( + dir.join("custom").join("devcontainer.json"), + r#"{"image": "alpine:latest"}"#, + ) + .unwrap(); - #[test] - fn parse_hints_with_shared() { - let toml = r#" -image = "node:20" - -[shared] -files = [".vercel", ".env.local"] -"#; - let hints = parse_hints(toml).unwrap(); - let shared = hints.shared.unwrap(); - assert_eq!(shared.files, vec![".vercel", ".env.local"]); + let dual_config = DualConfig { + devcontainer: "custom/devcontainer.json".to_string(), + ..Default::default() + }; + write_dual_config(&dir, &dual_config).unwrap(); + + let hints = load_hints(&dir).unwrap(); + assert_eq!(hints.image, "alpine:latest"); + + let _ = std::fs::remove_dir_all(&dir); } #[test] - fn parse_hints_without_shared() { - let hints = parse_hints("image = \"node:20\"").unwrap(); - assert!(hints.shared.is_none()); + fn write_and_load_dual_config_roundtrip() { + let dir = std::env::temp_dir().join("dual-test-dual-config-roundtrip"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(dir.join(".devcontainer")).unwrap(); + + // Write devcontainer.json + std::fs::write( + dir.join(".devcontainer").join("devcontainer.json"), + r#"{"image": "rust:latest", "forwardPorts": [8080], "postCreateCommand": "cargo build", "containerEnv": {"RUST_LOG": "debug"}}"#, + ) + .unwrap(); + + // Write DualConfig + let config = DualConfig { + devcontainer: ".devcontainer/devcontainer.json".to_string(), + extra_commands: vec!["cargo".to_string()], + anonymous_volumes: vec!["node_modules".to_string(), "target".to_string()], + shared: Vec::new(), + }; + write_dual_config(&dir, &config).unwrap(); + + // Load and verify merged result + let hints = load_hints(&dir).unwrap(); + assert_eq!(hints.image, "rust:latest"); + assert_eq!(hints.ports, vec![8080]); + assert_eq!(hints.setup.as_deref(), Some("cargo build")); + assert_eq!(hints.env.get("RUST_LOG").unwrap(), "debug"); + assert_eq!(hints.extra_commands, vec!["cargo"]); + assert_eq!(hints.anonymous_volumes, vec!["node_modules", "target"]); + + let _ = std::fs::remove_dir_all(&dir); } #[test] - fn parse_hints_shared_empty_files() { - let toml = r#" -[shared] -files = [] -"#; - let hints = parse_hints(toml).unwrap(); - let shared = hints.shared.unwrap(); - assert!(shared.files.is_empty()); + fn write_dual_config_without_shared_has_empty_array() { + let config = DualConfig::default(); + let json_str = serde_json::to_string_pretty(&config).unwrap(); + assert!(json_str.contains("\"shared\": []")); } #[test] - fn write_hints_without_shared_omits_section() { - let hints = RepoHints::default(); - let toml_str = toml::to_string_pretty(&hints).unwrap(); - assert!(!toml_str.contains("[shared]")); + fn write_dual_config_with_shared_includes_files() { + let config = DualConfig { + shared: vec![".env".to_string()], + ..Default::default() + }; + let json_str = serde_json::to_string_pretty(&config).unwrap(); + assert!(json_str.contains("\"shared\"")); + assert!(json_str.contains(".env")); } #[test] @@ -437,17 +621,4 @@ files = [] container_name("lightfast", "feat/auth") ); } - - #[test] - fn write_hints_with_shared_includes_section() { - let hints = RepoHints { - shared: Some(SharedConfig { - files: vec![".env".to_string()], - }), - ..Default::default() - }; - let toml_str = toml::to_string_pretty(&hints).unwrap(); - assert!(toml_str.contains("[shared]")); - assert!(toml_str.contains(".env")); - } } diff --git a/src/container.rs b/src/container.rs index f3d8cb4..408ed1f 100644 --- a/src/container.rs +++ b/src/container.rs @@ -234,6 +234,70 @@ pub fn build_create_args( args } +/// Build a Docker image from a Dockerfile. +/// +/// Returns the image tag on success. +pub fn build_image( + tag: &str, + workspace_dir: &Path, + build: &crate::config::DockerfileBuild, +) -> Result { + let args = build_image_args(tag, workspace_dir, build); + let output = Command::new("docker") + .args(&args) + .output() + .map_err(|e| ContainerError::DockerNotFound(e.to_string()))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + return Err(ContainerError::Failed { + operation: "build".to_string(), + name: tag.to_string(), + stderr, + }); + } + + Ok(tag.to_string()) +} + +/// Build docker build arguments (public for testing). +pub fn build_image_args( + tag: &str, + workspace_dir: &Path, + build: &crate::config::DockerfileBuild, +) -> Vec { + // Resolve paths relative to base_dir (devcontainer.json location) or workspace root + let base = build.base_dir.as_deref().unwrap_or(workspace_dir); + + let dockerfile_path = base.join(&build.path); + let context_path = base.join(&build.context); + + let mut args = vec![ + "build".to_string(), + "-t".to_string(), + tag.to_string(), + "-f".to_string(), + dockerfile_path.display().to_string(), + ]; + + // Build arguments + for (key, value) in &build.args { + args.push("--build-arg".to_string()); + args.push(format!("{key}={value}")); + } + + // Target stage + if let Some(ref target) = build.target { + args.push("--target".to_string()); + args.push(target.clone()); + } + + // Build context (last argument) + args.push(context_path.display().to_string()); + + args +} + /// Build docker exec arguments (for testing). pub fn build_exec_args(name: &str, cmd: &[&str], tty: bool) -> Vec { let mut args = vec!["exec".to_string()]; @@ -395,4 +459,83 @@ mod tests { assert_ne!(ContainerStatus::Running, ContainerStatus::Stopped); assert_ne!(ContainerStatus::Stopped, ContainerStatus::Missing); } + + #[test] + fn build_image_args_basic() { + let build = crate::config::DockerfileBuild { + path: "Dockerfile".to_string(), + context: ".".to_string(), + args: HashMap::new(), + target: None, + base_dir: None, + }; + let args = build_image_args("dual-build-test", Path::new("/tmp/ws"), &build); + assert_eq!(args[0], "build"); + assert_eq!(args[1], "-t"); + assert_eq!(args[2], "dual-build-test"); + assert_eq!(args[3], "-f"); + assert_eq!(args[4], "/tmp/ws/Dockerfile"); + // Last arg is context + assert_eq!(args[args.len() - 1], "/tmp/ws/."); + } + + #[test] + fn build_image_args_with_build_args() { + let mut build_args = HashMap::new(); + build_args.insert("NODE_VERSION".to_string(), "20".to_string()); + let build = crate::config::DockerfileBuild { + path: "Dockerfile".to_string(), + context: ".".to_string(), + args: build_args, + target: None, + base_dir: None, + }; + let args = build_image_args("dual-build-test", Path::new("/tmp/ws"), &build); + assert!(args.contains(&"--build-arg".to_string())); + assert!(args.contains(&"NODE_VERSION=20".to_string())); + } + + #[test] + fn build_image_args_with_target() { + let build = crate::config::DockerfileBuild { + path: "Dockerfile".to_string(), + context: ".".to_string(), + args: HashMap::new(), + target: Some("development".to_string()), + base_dir: None, + }; + let args = build_image_args("dual-build-test", Path::new("/tmp/ws"), &build); + assert!(args.contains(&"--target".to_string())); + assert!(args.contains(&"development".to_string())); + } + + #[test] + fn build_image_args_with_base_dir() { + let build = crate::config::DockerfileBuild { + path: "Dockerfile".to_string(), + context: "..".to_string(), + args: HashMap::new(), + target: None, + base_dir: Some(std::path::PathBuf::from("/tmp/ws/.devcontainer")), + }; + let args = build_image_args("dual-build-test", Path::new("/tmp/ws"), &build); + assert_eq!(args[3], "-f"); + assert_eq!(args[4], "/tmp/ws/.devcontainer/Dockerfile"); + // Context resolved relative to base_dir + assert_eq!(args[args.len() - 1], "/tmp/ws/.devcontainer/.."); + } + + #[test] + fn build_image_args_without_base_dir_uses_workspace() { + let build = crate::config::DockerfileBuild { + path: "docker/Dockerfile".to_string(), + context: ".".to_string(), + args: HashMap::new(), + target: None, + base_dir: None, + }; + let args = build_image_args("dual-build-test", Path::new("/home/user/repo"), &build); + assert_eq!(args[4], "/home/user/repo/docker/Dockerfile"); + assert_eq!(args[args.len() - 1], "/home/user/repo/."); + } } diff --git a/src/devcontainer.rs b/src/devcontainer.rs new file mode 100644 index 0000000..abe23c7 --- /dev/null +++ b/src/devcontainer.rs @@ -0,0 +1,571 @@ +use serde::Deserialize; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; + +use crate::config::RepoHints; + +/// Subset of devcontainer.json fields that Dual consumes. +/// See: https://containers.dev/implementors/json_reference/ +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DevcontainerJson { + /// Docker image to use (mutually exclusive with `build`) + pub image: Option, + + /// Build configuration for Dockerfile-based images + pub build: Option, + + /// Ports to forward from the container + pub forward_ports: Option>, + + /// Environment variables for the container + pub container_env: Option>, + + /// Command to run after container creation + pub post_create_command: Option, + + /// Mount configurations + pub mounts: Option>, +} + +/// Build configuration from devcontainer.json. +#[derive(Debug, Deserialize)] +pub struct BuildConfig { + /// Path to Dockerfile (relative to devcontainer.json location) + pub dockerfile: Option, + + /// Build context path (relative to devcontainer.json location) + pub context: Option, + + /// Docker build arguments + pub args: Option>, + + /// Build target stage for multi-stage builds + pub target: Option, +} + +/// Port specification — devcontainer allows integers or strings. +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub enum PortSpec { + Number(u16), + String(String), +} + +impl PortSpec { + /// Convert to u16, parsing strings as integers. + pub fn to_port(&self) -> Option { + match self { + PortSpec::Number(n) => Some(*n), + PortSpec::String(s) => s.parse::().ok(), + } + } +} + +/// Command specification — devcontainer allows string, array, or object forms. +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub enum CommandSpec { + /// Single shell command: "pnpm install" + String(String), + /// Exec form: ["pnpm", "install"] + Array(Vec), + /// Parallel commands: {"install": "pnpm install", "build": "pnpm build"} + Object(HashMap), +} + +impl CommandSpec { + /// Flatten to a single shell command string. + /// - String: returned as-is + /// - Array: joined with spaces + /// - Object: values joined with " && " + pub fn to_shell_command(&self) -> String { + match self { + CommandSpec::String(s) => s.clone(), + CommandSpec::Array(arr) => arr.join(" "), + CommandSpec::Object(map) => { + let commands: Vec = map + .values() + .map(|v| match v { + StringOrArray::String(s) => s.clone(), + StringOrArray::Array(arr) => arr.join(" "), + }) + .collect(); + commands.join(" && ") + } + } + } +} + +/// Value in a command object — can be string or array. +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub enum StringOrArray { + String(String), + Array(Vec), +} + +/// Mount specification — devcontainer allows string or object forms. +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub enum MountSpec { + /// Docker mount syntax: "type=volume,target=/workspace/node_modules" + String(String), + /// Structured mount object + Object(MountObject), +} + +/// Structured mount configuration. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct MountObject { + #[serde(rename = "type")] + pub mount_type: Option, + pub source: Option, + pub target: String, +} + +const WORKSPACE_PREFIX: &str = "/workspace/"; + +impl MountSpec { + /// Extract anonymous volume path if this is a volume mount targeting /workspace/*. + /// Returns the relative path (e.g., "node_modules" from "/workspace/node_modules"). + pub fn as_anonymous_volume(&self) -> Option { + match self { + MountSpec::String(s) => { + // Parse "type=volume,target=/workspace/node_modules" format + let mut mount_type = None; + let mut target = None; + let mut has_source = false; + for part in s.split(',') { + let (key, value) = part.split_once('=')?; + match key.trim() { + "type" => mount_type = Some(value.trim()), + "target" | "dst" | "destination" => target = Some(value.trim()), + "source" | "src" => has_source = true, + _ => {} + } + } + if mount_type == Some("volume") && !has_source { + target + .filter(|t| t.starts_with(WORKSPACE_PREFIX)) + .map(|t| t[WORKSPACE_PREFIX.len()..].to_string()) + } else { + None + } + } + MountSpec::Object(obj) => { + if obj.mount_type.as_deref() == Some("volume") + && obj.source.is_none() + && obj.target.starts_with(WORKSPACE_PREFIX) + { + return Some(obj.target[WORKSPACE_PREFIX.len()..].to_string()); + } + None + } + } + } +} + +/// Resolve the path to devcontainer.json in a workspace directory. +/// +/// Checks in order: +/// 1. `.devcontainer/devcontainer.json` +/// 2. `.devcontainer.json` (root) +/// +/// Returns None if neither exists. +pub fn find_devcontainer_json(workspace_dir: &Path) -> Option { + let candidates = [ + workspace_dir + .join(".devcontainer") + .join("devcontainer.json"), + workspace_dir.join(".devcontainer.json"), + ]; + + candidates.into_iter().find(|p| p.exists()) +} + +/// Parse a devcontainer.json string into DevcontainerJson. +pub fn parse_devcontainer(json_str: &str) -> Result { + serde_json::from_str(json_str) +} + +/// Load and parse devcontainer.json from a workspace directory. +/// Returns None if no devcontainer.json exists. +pub fn load_devcontainer(workspace_dir: &Path) -> Option { + let path = find_devcontainer_json(workspace_dir)?; + let contents = std::fs::read_to_string(&path).ok()?; + parse_devcontainer(&contents).ok() +} + +/// Convert DevcontainerJson fields to RepoHints. +/// +/// Maps: +/// - `image` → `hints.image` +/// - `build` → `hints.dockerfile` +/// - `forwardPorts` → `hints.ports` +/// - `containerEnv` → `hints.env` +/// - `postCreateCommand` → `hints.setup` +/// - `mounts` (volume type, /workspace/* target) → `hints.anonymous_volumes` +pub fn to_repo_hints(dc: &DevcontainerJson, devcontainer_dir: &Path) -> RepoHints { + let mut hints = RepoHints::default(); + + // Image (mutually exclusive with build) + if let Some(ref image) = dc.image { + hints.image = image.clone(); + } + + // Build → DockerfileBuild + if let Some(ref build) = dc.build + && build.dockerfile.is_some() + { + hints.dockerfile = Some(crate::config::DockerfileBuild { + path: build + .dockerfile + .clone() + .unwrap_or_else(|| "Dockerfile".to_string()), + context: build.context.clone().unwrap_or_else(|| ".".to_string()), + args: build.args.clone().unwrap_or_default(), + target: build.target.clone(), + base_dir: Some(devcontainer_dir.to_path_buf()), + }); + } + + // Ports + if let Some(ref ports) = dc.forward_ports { + hints.ports = ports.iter().filter_map(|p| p.to_port()).collect(); + } + + // Environment variables + if let Some(ref env) = dc.container_env { + hints.env = env.clone(); + } + + // Setup command + if let Some(ref cmd) = dc.post_create_command { + let shell_cmd = cmd.to_shell_command(); + if !shell_cmd.is_empty() { + hints.setup = Some(shell_cmd); + } + } + + // Anonymous volumes from mounts + if let Some(ref mounts) = dc.mounts { + let extra_volumes: Vec = mounts + .iter() + .filter_map(|m| m.as_anonymous_volume()) + .collect(); + if !extra_volumes.is_empty() { + for vol in extra_volumes { + if !hints.anonymous_volumes.contains(&vol) { + hints.anonymous_volumes.push(vol); + } + } + } + } + + hints +} + +/// Load devcontainer.json from a workspace and convert to RepoHints. +/// Returns None if no devcontainer.json exists or it fails to parse. +pub fn load_devcontainer_as_hints(workspace_dir: &Path) -> Option { + let path = find_devcontainer_json(workspace_dir)?; + let devcontainer_dir = path.parent().unwrap_or(workspace_dir); + let contents = std::fs::read_to_string(&path).ok()?; + let dc = parse_devcontainer(&contents).ok()?; + Some(to_repo_hints(&dc, devcontainer_dir)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_minimal_image() { + let json = r#"{"image": "node:20"}"#; + let dc = parse_devcontainer(json).unwrap(); + assert_eq!(dc.image.as_deref(), Some("node:20")); + assert!(dc.build.is_none()); + } + + #[test] + fn parse_with_build() { + let json = r#"{ + "build": { + "dockerfile": "Dockerfile", + "context": "..", + "args": {"NODE_VERSION": "20"}, + "target": "development" + } + }"#; + let dc = parse_devcontainer(json).unwrap(); + assert!(dc.image.is_none()); + let build = dc.build.unwrap(); + assert_eq!(build.dockerfile.as_deref(), Some("Dockerfile")); + assert_eq!(build.context.as_deref(), Some("..")); + assert_eq!(build.args.unwrap().get("NODE_VERSION").unwrap(), "20"); + assert_eq!(build.target.as_deref(), Some("development")); + } + + #[test] + fn parse_forward_ports_integers() { + let json = r#"{"forwardPorts": [3000, 8080]}"#; + let dc = parse_devcontainer(json).unwrap(); + let ports: Vec = dc + .forward_ports + .unwrap() + .iter() + .filter_map(|p| p.to_port()) + .collect(); + assert_eq!(ports, vec![3000, 8080]); + } + + #[test] + fn parse_forward_ports_strings() { + let json = r#"{"forwardPorts": ["3000", "8080"]}"#; + let dc = parse_devcontainer(json).unwrap(); + let ports: Vec = dc + .forward_ports + .unwrap() + .iter() + .filter_map(|p| p.to_port()) + .collect(); + assert_eq!(ports, vec![3000, 8080]); + } + + #[test] + fn parse_forward_ports_mixed() { + let json = r#"{"forwardPorts": [3000, "8080"]}"#; + let dc = parse_devcontainer(json).unwrap(); + let ports: Vec = dc + .forward_ports + .unwrap() + .iter() + .filter_map(|p| p.to_port()) + .collect(); + assert_eq!(ports, vec![3000, 8080]); + } + + #[test] + fn parse_forward_ports_invalid_string_skipped() { + let json = r#"{"forwardPorts": [3000, "not-a-port"]}"#; + let dc = parse_devcontainer(json).unwrap(); + let ports: Vec = dc + .forward_ports + .unwrap() + .iter() + .filter_map(|p| p.to_port()) + .collect(); + assert_eq!(ports, vec![3000]); + } + + #[test] + fn parse_container_env() { + let json = r#"{"containerEnv": {"NODE_ENV": "development", "DEBUG": "true"}}"#; + let dc = parse_devcontainer(json).unwrap(); + let env = dc.container_env.unwrap(); + assert_eq!(env.get("NODE_ENV").unwrap(), "development"); + assert_eq!(env.get("DEBUG").unwrap(), "true"); + } + + #[test] + fn parse_post_create_command_string() { + let json = r#"{"postCreateCommand": "pnpm install"}"#; + let dc = parse_devcontainer(json).unwrap(); + assert_eq!( + dc.post_create_command.unwrap().to_shell_command(), + "pnpm install" + ); + } + + #[test] + fn parse_post_create_command_array() { + let json = r#"{"postCreateCommand": ["pnpm", "install"]}"#; + let dc = parse_devcontainer(json).unwrap(); + assert_eq!( + dc.post_create_command.unwrap().to_shell_command(), + "pnpm install" + ); + } + + #[test] + fn parse_post_create_command_object() { + let json = r#"{"postCreateCommand": {"install": "pnpm install", "build": "pnpm build"}}"#; + let dc = parse_devcontainer(json).unwrap(); + let cmd = dc.post_create_command.unwrap().to_shell_command(); + // Object order is non-deterministic, but both commands should be present + assert!(cmd.contains("pnpm install")); + assert!(cmd.contains("pnpm build")); + assert!(cmd.contains(" && ")); + } + + #[test] + fn parse_post_create_command_object_with_array_value() { + let json = r#"{"postCreateCommand": {"install": ["pnpm", "install"]}}"#; + let dc = parse_devcontainer(json).unwrap(); + assert_eq!( + dc.post_create_command.unwrap().to_shell_command(), + "pnpm install" + ); + } + + #[test] + fn parse_mount_object_volume() { + let mount = MountSpec::Object(MountObject { + mount_type: Some("volume".to_string()), + source: None, + target: "/workspace/node_modules".to_string(), + }); + assert_eq!( + mount.as_anonymous_volume(), + Some("node_modules".to_string()) + ); + } + + #[test] + fn parse_mount_object_with_source_not_anonymous() { + let mount = MountSpec::Object(MountObject { + mount_type: Some("volume".to_string()), + source: Some("my-vol".to_string()), + target: "/workspace/node_modules".to_string(), + }); + assert_eq!(mount.as_anonymous_volume(), None); + } + + #[test] + fn parse_mount_object_bind_not_anonymous() { + let mount = MountSpec::Object(MountObject { + mount_type: Some("bind".to_string()), + source: None, + target: "/workspace/node_modules".to_string(), + }); + assert_eq!(mount.as_anonymous_volume(), None); + } + + #[test] + fn parse_mount_object_non_workspace_target() { + let mount = MountSpec::Object(MountObject { + mount_type: Some("volume".to_string()), + source: None, + target: "/data/cache".to_string(), + }); + assert_eq!(mount.as_anonymous_volume(), None); + } + + #[test] + fn parse_unknown_fields_ignored() { + let json = r#"{ + "image": "node:20", + "customizations": {"vscode": {"extensions": ["ms-python.python"]}}, + "remoteUser": "vscode", + "features": {"ghcr.io/devcontainers/features/node:1": {}} + }"#; + let dc = parse_devcontainer(json).unwrap(); + assert_eq!(dc.image.as_deref(), Some("node:20")); + } + + #[test] + fn parse_empty_object() { + let json = r#"{}"#; + let dc = parse_devcontainer(json).unwrap(); + assert!(dc.image.is_none()); + assert!(dc.build.is_none()); + assert!(dc.forward_ports.is_none()); + } + + #[test] + fn to_repo_hints_image_only() { + let dc = DevcontainerJson { + image: Some("python:3.12".to_string()), + build: None, + forward_ports: None, + container_env: None, + post_create_command: None, + mounts: None, + }; + let hints = to_repo_hints(&dc, Path::new(".")); + assert_eq!(hints.image, "python:3.12"); + } + + #[test] + fn to_repo_hints_full() { + let dc = DevcontainerJson { + image: Some("node:20".to_string()), + build: None, + forward_ports: Some(vec![ + PortSpec::Number(3000), + PortSpec::String("8080".to_string()), + ]), + container_env: Some(HashMap::from([("NODE_ENV".to_string(), "dev".to_string())])), + post_create_command: Some(CommandSpec::String("pnpm install".to_string())), + mounts: None, + }; + let hints = to_repo_hints(&dc, Path::new(".")); + assert_eq!(hints.image, "node:20"); + assert_eq!(hints.ports, vec![3000, 8080]); + assert_eq!(hints.env.get("NODE_ENV").unwrap(), "dev"); + assert_eq!(hints.setup.as_deref(), Some("pnpm install")); + } + + #[test] + fn find_devcontainer_json_in_subdir() { + let dir = std::env::temp_dir().join("dual-test-devcontainer-find"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(dir.join(".devcontainer")).unwrap(); + std::fs::write( + dir.join(".devcontainer").join("devcontainer.json"), + r#"{"image": "node:20"}"#, + ) + .unwrap(); + + let found = find_devcontainer_json(&dir); + assert!(found.is_some()); + assert!(found.unwrap().ends_with(".devcontainer/devcontainer.json")); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn find_devcontainer_json_at_root() { + let dir = std::env::temp_dir().join("dual-test-devcontainer-root"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(&dir).unwrap(); + std::fs::write(dir.join(".devcontainer.json"), r#"{"image": "node:20"}"#).unwrap(); + + let found = find_devcontainer_json(&dir); + assert!(found.is_some()); + assert!(found.unwrap().ends_with(".devcontainer.json")); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn find_devcontainer_json_missing() { + let dir = std::env::temp_dir().join("dual-test-devcontainer-missing"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(&dir).unwrap(); + + assert!(find_devcontainer_json(&dir).is_none()); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn find_devcontainer_prefers_subdir_over_root() { + let dir = std::env::temp_dir().join("dual-test-devcontainer-prefer"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(dir.join(".devcontainer")).unwrap(); + std::fs::write( + dir.join(".devcontainer").join("devcontainer.json"), + r#"{"image": "python:3.12"}"#, + ) + .unwrap(); + std::fs::write(dir.join(".devcontainer.json"), r#"{"image": "node:20"}"#).unwrap(); + + let found = find_devcontainer_json(&dir).unwrap(); + assert!(found.ends_with(".devcontainer/devcontainer.json")); + + let _ = std::fs::remove_dir_all(&dir); + } +} diff --git a/src/init.rs b/src/init.rs new file mode 100644 index 0000000..6bd8c5e --- /dev/null +++ b/src/init.rs @@ -0,0 +1,372 @@ +use std::path::{Path, PathBuf}; + +use dialoguer::{Confirm, Input, Select}; + +use crate::config::{DualConfig, HintsError}; +use crate::devcontainer; + +/// Result of the init wizard — everything needed to write config files. +pub struct InitResult { + /// Path to devcontainer.json (relative to repo root) + pub devcontainer_path: String, + /// Whether we need to create a new devcontainer.json + pub create_devcontainer: bool, + /// Docker image (only used if create_devcontainer is true) + pub image: String, + /// Ports (only used if create_devcontainer is true) + pub ports: Vec, + /// Setup command (only used if create_devcontainer is true) + pub setup: Option, +} + +/// Base image presets for the wizard. +const IMAGE_PRESETS: &[(&str, &str)] = &[ + ("node:20", "Node.js"), + ("python:3.12", "Python"), + ("rust:latest", "Rust"), +]; + +/// Run the interactive init wizard. +/// +/// Walks through 5 steps: +/// 1. Detect existing devcontainer.json +/// 2. Select base image +/// 3. Configure ports +/// 4. Configure setup command +/// 5. Summary & confirm +pub fn run_wizard(repo_root: &Path) -> Result { + // Step 1: Detect existing devcontainer + if let Some(dc_path) = devcontainer::find_devcontainer_json(repo_root) { + let rel_path = dc_path + .strip_prefix(repo_root) + .unwrap_or(&dc_path) + .to_string_lossy() + .to_string(); + + println!("Detected {rel_path}"); + let use_existing = Confirm::new() + .with_prompt("Use this as your container config?") + .default(true) + .interact() + .unwrap_or(true); + + if use_existing { + return Ok(InitResult { + devcontainer_path: rel_path, + create_devcontainer: false, + image: String::new(), + ports: Vec::new(), + setup: None, + }); + } + } + + // Step 2: Select base image + let mut options: Vec = IMAGE_PRESETS + .iter() + .map(|(img, label)| format!("{img} ({label})")) + .collect(); + options.push("Custom (enter image name)".to_string()); + + let selection = Select::new() + .with_prompt("Select base image") + .items(&options) + .default(0) + .interact() + .unwrap_or(0); + + let image = if selection < IMAGE_PRESETS.len() { + IMAGE_PRESETS[selection].0.to_string() + } else { + Input::::new() + .with_prompt("Image name") + .default("node:20".to_string()) + .interact_text() + .unwrap_or_else(|_| "node:20".to_string()) + }; + + // Step 3: Ports + let ports_input: String = Input::new() + .with_prompt("Which ports does your dev server use? (comma-separated, or empty for none)") + .allow_empty(true) + .interact_text() + .unwrap_or_default(); + + let ports: Vec = ports_input + .split(',') + .filter_map(|s| s.trim().parse::().ok()) + .collect(); + + // Step 4: Setup command + let setup_input: String = Input::new() + .with_prompt("Setup command after container creation? (e.g., pnpm install, or empty)") + .allow_empty(true) + .interact_text() + .unwrap_or_default(); + + let setup = if setup_input.trim().is_empty() { + None + } else { + Some(setup_input.trim().to_string()) + }; + + // Step 5: Summary & confirm + println!(); + println!("Configuration:"); + println!(" Image: {image}"); + if ports.is_empty() { + println!(" Ports: (none)"); + } else { + let port_strs: Vec = ports.iter().map(|p| p.to_string()).collect(); + println!(" Ports: {}", port_strs.join(", ")); + } + match &setup { + Some(cmd) => println!(" Setup: {cmd}"), + None => println!(" Setup: (none)"), + } + println!(); + println!(" .dual/settings.json (Dual config)"); + println!(" .devcontainer/devcontainer.json (container config)"); + println!(); + + let confirmed = Confirm::new() + .with_prompt("Create these files?") + .default(true) + .interact() + .unwrap_or(false); + + if !confirmed { + return Err(HintsError::WriteError( + repo_root.to_path_buf(), + std::io::Error::other("cancelled by user"), + )); + } + + Ok(InitResult { + devcontainer_path: ".devcontainer/devcontainer.json".to_string(), + create_devcontainer: true, + image, + ports, + setup, + }) +} + +/// Apply non-interactive defaults (for --yes flag). +/// +/// If an existing devcontainer.json is found, uses it. +/// Otherwise creates defaults: node:20, no ports, no setup. +pub fn apply_defaults(repo_root: &Path) -> InitResult { + if let Some(dc_path) = devcontainer::find_devcontainer_json(repo_root) { + let rel_path = dc_path + .strip_prefix(repo_root) + .unwrap_or(&dc_path) + .to_string_lossy() + .to_string(); + return InitResult { + devcontainer_path: rel_path, + create_devcontainer: false, + image: String::new(), + ports: Vec::new(), + setup: None, + }; + } + + InitResult { + devcontainer_path: ".devcontainer/devcontainer.json".to_string(), + create_devcontainer: true, + image: "node:20".to_string(), + ports: Vec::new(), + setup: None, + } +} + +/// Write devcontainer.json from wizard results. +/// Creates .devcontainer/ directory if needed. +pub fn write_devcontainer(repo_root: &Path, result: &InitResult) -> Result { + let dc_dir = repo_root.join(".devcontainer"); + std::fs::create_dir_all(&dc_dir).map_err(|e| HintsError::WriteError(dc_dir.clone(), e))?; + + let dc_path = dc_dir.join("devcontainer.json"); + + let mut dc = serde_json::Map::new(); + dc.insert( + "image".to_string(), + serde_json::Value::String(result.image.clone()), + ); + + if !result.ports.is_empty() { + let ports: Vec = result + .ports + .iter() + .map(|p| serde_json::Value::Number((*p).into())) + .collect(); + dc.insert("forwardPorts".to_string(), serde_json::Value::Array(ports)); + } + + if let Some(ref setup) = result.setup { + dc.insert( + "postCreateCommand".to_string(), + serde_json::Value::String(setup.clone()), + ); + } + + let content = serde_json::to_string_pretty(&dc).map_err(HintsError::JsonSerializeError)?; + std::fs::write(&dc_path, format!("{content}\n")) + .map_err(|e| HintsError::WriteError(dc_path.clone(), e))?; + + Ok(dc_path) +} + +/// Write .dual/settings.json from wizard results. +/// Creates .dual/ directory if needed. +pub fn write_settings(repo_root: &Path, devcontainer_path: &str) -> Result { + let dual_dir = repo_root.join(".dual"); + std::fs::create_dir_all(&dual_dir).map_err(|e| HintsError::WriteError(dual_dir.clone(), e))?; + + let config = DualConfig { + devcontainer: devcontainer_path.to_string(), + ..Default::default() + }; + + let path = dual_dir.join("settings.json"); + let contents = serde_json::to_string_pretty(&config).map_err(HintsError::JsonSerializeError)?; + std::fs::write(&path, contents).map_err(|e| HintsError::WriteError(path.clone(), e))?; + + Ok(path) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn apply_defaults_without_existing_devcontainer() { + let dir = std::env::temp_dir().join("dual-test-init-defaults"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(&dir).unwrap(); + + let result = apply_defaults(&dir); + assert!(result.create_devcontainer); + assert_eq!(result.image, "node:20"); + assert!(result.ports.is_empty()); + assert!(result.setup.is_none()); + assert_eq!(result.devcontainer_path, ".devcontainer/devcontainer.json"); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn apply_defaults_with_existing_devcontainer() { + let dir = std::env::temp_dir().join("dual-test-init-existing"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(dir.join(".devcontainer")).unwrap(); + std::fs::write( + dir.join(".devcontainer").join("devcontainer.json"), + r#"{"image": "python:3.12"}"#, + ) + .unwrap(); + + let result = apply_defaults(&dir); + assert!(!result.create_devcontainer); + assert_eq!(result.devcontainer_path, ".devcontainer/devcontainer.json"); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn write_devcontainer_creates_valid_json() { + let dir = std::env::temp_dir().join("dual-test-init-write-dc"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(&dir).unwrap(); + + let result = InitResult { + devcontainer_path: ".devcontainer/devcontainer.json".to_string(), + create_devcontainer: true, + image: "node:20".to_string(), + ports: vec![3000, 8080], + setup: Some("pnpm install".to_string()), + }; + + write_devcontainer(&dir, &result).unwrap(); + + let dc_path = dir.join(".devcontainer").join("devcontainer.json"); + assert!(dc_path.exists()); + + let content = std::fs::read_to_string(&dc_path).unwrap(); + let dc: crate::devcontainer::DevcontainerJson = serde_json::from_str(&content).unwrap(); + assert_eq!(dc.image.as_deref(), Some("node:20")); + let ports: Vec = dc + .forward_ports + .unwrap() + .iter() + .filter_map(|p| p.to_port()) + .collect(); + assert_eq!(ports, vec![3000, 8080]); + assert_eq!( + dc.post_create_command.unwrap().to_shell_command(), + "pnpm install" + ); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn write_devcontainer_no_ports_no_setup() { + let dir = std::env::temp_dir().join("dual-test-init-write-dc-minimal"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(&dir).unwrap(); + + let result = InitResult { + devcontainer_path: ".devcontainer/devcontainer.json".to_string(), + create_devcontainer: true, + image: "rust:latest".to_string(), + ports: Vec::new(), + setup: None, + }; + + write_devcontainer(&dir, &result).unwrap(); + + let content = + std::fs::read_to_string(dir.join(".devcontainer").join("devcontainer.json")).unwrap(); + assert!(!content.contains("forwardPorts")); + assert!(!content.contains("postCreateCommand")); + assert!(content.contains("rust:latest")); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn write_settings_creates_valid_json() { + let dir = std::env::temp_dir().join("dual-test-init-write-settings"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(&dir).unwrap(); + + write_settings(&dir, ".devcontainer/devcontainer.json").unwrap(); + + let path = dir.join(".dual").join("settings.json"); + assert!(path.exists()); + + let content = std::fs::read_to_string(&path).unwrap(); + let config: crate::config::DualConfig = serde_json::from_str(&content).unwrap(); + assert_eq!(config.devcontainer, ".devcontainer/devcontainer.json"); + assert!(config.extra_commands.is_empty()); + assert_eq!(config.anonymous_volumes, vec!["node_modules".to_string()]); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn write_settings_custom_devcontainer_path() { + let dir = std::env::temp_dir().join("dual-test-init-custom-dc-path"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(&dir).unwrap(); + + write_settings(&dir, "custom/devcontainer.json").unwrap(); + + let content = std::fs::read_to_string(dir.join(".dual").join("settings.json")).unwrap(); + let config: crate::config::DualConfig = serde_json::from_str(&content).unwrap(); + assert_eq!(config.devcontainer, "custom/devcontainer.json"); + + let _ = std::fs::remove_dir_all(&dir); + } +} diff --git a/src/lib.rs b/src/lib.rs index 222a9c1..37387fc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,8 @@ pub mod cli; pub mod clone; pub mod config; pub mod container; +pub mod devcontainer; +pub mod init; pub mod proxy; pub mod shared; pub mod shell; diff --git a/src/main.rs b/src/main.rs index b9d522d..b16d359 100644 --- a/src/main.rs +++ b/src/main.rs @@ -33,12 +33,29 @@ fn main() { .with_target(false) .init(); + // One-time post-install: ensure shell hook is installed + match shell::install_shell_hook() { + Ok(true) => { + let rc_name = shell::detect_shell_rc() + .map(|p| { + p.file_name() + .unwrap_or_default() + .to_string_lossy() + .to_string() + }) + .unwrap_or_default(); + info!("Installed shell hook in ~/{rc_name} for tmux pane interception."); + } + Ok(false) => {} // Already installed or unsupported shell + Err(e) => warn!("could not install shell hook: {e}"), + } + let cli = Cli::parse(); let backend = TmuxBackend::new(); let exit_code = match cli.command { None => cmd_default(&backend), - Some(Command::Add { name }) => cmd_add(name.as_deref()), + Some(Command::Init { name, yes }) => cmd_init(name.as_deref(), yes), Some(Command::Create { branch, repo }) => cmd_create(repo.as_deref(), &branch), Some(Command::Launch { workspace }) => cmd_launch(workspace.as_deref(), &backend), Some(Command::List) => cmd_list(&backend), @@ -71,13 +88,13 @@ fn cmd_default(backend: &dyn MultiplexerBackend) -> i32 { Ok(s) => s, Err(e) => { error!("{e}"); - info!("Run `dual add` inside a repo to get started."); + info!("Run `dual init` inside a repo to get started."); return 1; } }; if st.all_workspaces().is_empty() { - info!("No workspaces. Run `dual add` inside a repo to get started."); + info!("No workspaces. Run `dual init` inside a repo to get started."); return 0; } @@ -108,8 +125,8 @@ fn cmd_default(backend: &dyn MultiplexerBackend) -> i32 { } } -/// Register the current repo as a dual workspace. -fn cmd_add(name: Option<&str>) -> i32 { +/// Initialize the current repo as a dual workspace. +fn cmd_init(name: Option<&str>, yes: bool) -> i32 { // Detect git repo info from current directory let (repo_root, url, branch) = match detect_git_repo() { Ok(info) => info, @@ -140,25 +157,48 @@ fn cmd_add(name: Option<&str>) -> i32 { return 1; } - // Check for .dual.toml — if missing, create a default one with helpful comments - let hints_path = repo_root.join(".dual.toml"); - if !hints_path.exists() { - if let Err(e) = config::write_default_hints(&repo_root) { - warn!("failed to write .dual.toml: {e}"); - } else { - info!("Created .dual.toml with defaults (image: node:20)"); - info!("Edit it to customize ports, image, setup command, and env vars."); + // Run wizard or apply defaults + let result = if yes { + dual::init::apply_defaults(&repo_root) + } else { + match dual::init::run_wizard(&repo_root) { + Ok(r) => r, + Err(e) => { + error!("{e}"); + return 1; + } + } + }; + + // Write devcontainer.json if needed + if result.create_devcontainer { + match dual::init::write_devcontainer(&repo_root, &result) { + Ok(_) => info!("Created .devcontainer/devcontainer.json"), + Err(e) => { + warn!("failed to write devcontainer.json: {e}"); + } + } + } else { + info!("Using existing {}", result.devcontainer_path); + } + + // Write .dual/settings.json if missing + let settings_path = repo_root.join(".dual").join("settings.json"); + if !settings_path.exists() { + match dual::init::write_settings(&repo_root, &result.devcontainer_path) { + Ok(_) => info!("Created .dual/settings.json"), + Err(e) => { + warn!("failed to write .dual/settings.json: {e}"); + } } } - // Initialize shared directory if [shared] is configured + // Initialize shared directory if shared files are configured let hints = config::load_hints(&repo_root).unwrap_or_default(); - if let Some(ref shared_config) = hints.shared - && !shared_config.files.is_empty() - { + if !hints.shared.is_empty() { match shared::ensure_shared_dir(&repo_name) { Ok(shared_dir) => { - match shared::init_from_main(&repo_root, &shared_dir, &shared_config.files) { + match shared::init_from_main(&repo_root, &shared_dir, &hints.shared) { Ok(moved) => { for f in &moved { info!(" shared: {f} → ~/.dual/shared/{repo_name}/"); @@ -191,7 +231,7 @@ fn cmd_add(name: Option<&str>) -> i32 { } let ws_id = config::workspace_id(&repo_name, &branch); - info!("Added workspace: {ws_id}"); + info!("Initialized workspace: {ws_id}"); info!("Use `dual launch {ws_id}` to start."); 0 } @@ -217,7 +257,7 @@ fn cmd_create(repo_arg: Option<&str>, branch: &str) -> i32 { None => { error!("could not detect repo from current directory"); info!("Usage: dual create --repo "); - info!("Or run from inside a repo that was added with `dual add`."); + info!("Or run from inside a repo that was initialized with `dual init`."); return 1; } }, @@ -226,7 +266,7 @@ fn cmd_create(repo_arg: Option<&str>, branch: &str) -> i32 { // Find an existing workspace for this repo let existing = st.workspaces_for_repo(&repo); if existing.is_empty() { - error!("repo '{repo}' not found. Run `dual add` inside the repo first."); + error!("repo '{repo}' not found. Run `dual init` inside the repo first."); return 1; } @@ -362,13 +402,12 @@ fn cmd_launch(workspace_arg: Option<&str>, backend: &dyn MultiplexerBackend) -> // Step 2: Handle shared files let hints = config::load_hints(&workspace_dir).unwrap_or_default(); - if let Some(ref shared_config) = hints.shared - && !shared_config.files.is_empty() + if !hints.shared.is_empty() && let Ok(shared_dir) = shared::ensure_shared_dir(&entry.repo) { if entry.path.is_some() { // Main workspace: ensure shared files are initialized - match shared::init_from_main(&workspace_dir, &shared_dir, &shared_config.files) { + match shared::init_from_main(&workspace_dir, &shared_dir, &hints.shared) { Ok(moved) => { for f in &moved { info!(" shared: {f} → ~/.dual/shared/{}/", entry.repo); @@ -378,7 +417,7 @@ fn cmd_launch(workspace_arg: Option<&str>, backend: &dyn MultiplexerBackend) -> } } else { // Branch workspace: copy shared files - match shared::copy_to_branch(&workspace_dir, &shared_dir, &shared_config.files) { + match shared::copy_to_branch(&workspace_dir, &shared_dir, &hints.shared) { Ok(copied) => { for f in &copied { info!(" shared: copied {f}"); @@ -396,11 +435,26 @@ fn cmd_launch(workspace_arg: Option<&str>, backend: &dyn MultiplexerBackend) -> ); match container::status(&container_name) { container::ContainerStatus::Missing => { + // Build image from Dockerfile if configured + let effective_image = if let Some(ref build) = hints.dockerfile { + let image_tag = format!("dual-build-{container_name}"); + info!("Building image from Dockerfile..."); + match container::build_image(&image_tag, &workspace_dir, build) { + Ok(tag) => tag, + Err(e) => { + error!("docker build failed: {e}"); + return 1; + } + } + } else { + hints.image.clone() + }; + info!("Creating container {container_name}..."); if let Err(e) = container::create( &container_name, &workspace_dir, - &hints.image, + &effective_image, &hints.env, &hints.anonymous_volumes, ) { @@ -447,6 +501,18 @@ fn cmd_launch(workspace_arg: Option<&str>, backend: &dyn MultiplexerBackend) -> error!("session creation failed: {e}"); return 1; } + + // Set session-level env vars so new panes auto-source interception + let rc_path_str = rc_path.to_string_lossy(); + for (key, value) in [ + ("DUAL_ACTIVE", "1"), + ("DUAL_RC_PATH", rc_path_str.as_ref()), + ("DUAL_CONTAINER", container_name.as_str()), + ] { + if let Err(e) = dual::tmux_backend::set_session_env(&session_name, key, value) { + warn!("failed to set tmux env {key}: {e}"); + } + } } // Step 6: Attach @@ -580,7 +646,7 @@ fn cmd_open(workspace: Option) -> i32 { let url_groups = proxy::workspace_urls(&st); if url_groups.is_empty() { - info!("No URLs configured. Add 'ports' to .dual.toml in your repo."); + info!("No URLs configured. Add 'forwardPorts' to devcontainer.json in your repo."); return 0; } @@ -631,7 +697,7 @@ fn cmd_urls(workspace: Option) -> i32 { let url_groups = proxy::workspace_urls(&st); if url_groups.is_empty() { - info!("No URLs configured. Add 'ports' to .dual.toml in your repo."); + info!("No URLs configured. Add 'forwardPorts' to devcontainer.json in your repo."); return 0; } @@ -711,13 +777,10 @@ fn cmd_sync(workspace_arg: Option) -> i32 { // Load hints let workspace_dir = st.workspace_dir(&entry); let hints = config::load_hints(&workspace_dir).unwrap_or_default(); - let shared_config = match &hints.shared { - Some(s) if !s.files.is_empty() => s, - _ => { - error!("no [shared] section in .dual.toml (or files list is empty)"); - return 1; - } - }; + if hints.shared.is_empty() { + error!("no shared files configured in .dual/settings.json"); + return 1; + } let shared_dir = match shared::ensure_shared_dir(&entry.repo) { Ok(d) => d, @@ -731,7 +794,7 @@ fn cmd_sync(workspace_arg: Option) -> i32 { if is_main { // Main workspace: init shared dir, then prompt to sync all branches - match shared::init_from_main(&workspace_dir, &shared_dir, &shared_config.files) { + match shared::init_from_main(&workspace_dir, &shared_dir, &hints.shared) { Ok(moved) => { for f in &moved { info!(" moved {f} → shared/"); @@ -773,7 +836,7 @@ fn cmd_sync(workspace_arg: Option) -> i32 { continue; // Not yet cloned } let ws_id = config::workspace_id(&branch_entry.repo, &branch_entry.branch); - match shared::copy_to_branch(&branch_dir, &shared_dir, &shared_config.files) { + match shared::copy_to_branch(&branch_dir, &shared_dir, &hints.shared) { Ok(copied) => { info!("{ws_id}: synced {} file(s)", copied.len()); } @@ -782,7 +845,7 @@ fn cmd_sync(workspace_arg: Option) -> i32 { } } else { // Branch workspace: copy from shared dir - match shared::copy_to_branch(&workspace_dir, &shared_dir, &shared_config.files) { + match shared::copy_to_branch(&workspace_dir, &shared_dir, &hints.shared) { Ok(copied) => { if copied.is_empty() { info!( @@ -1020,22 +1083,45 @@ mod tests { } #[test] - fn add_subcommand() { - let cli = Cli::parse_from(["dual", "add"]); - if let Some(Command::Add { name }) = cli.command { + fn init_subcommand() { + let cli = Cli::parse_from(["dual", "init"]); + if let Some(Command::Init { name, yes }) = cli.command { assert!(name.is_none()); + assert!(!yes); } else { - panic!("expected Add command"); + panic!("expected Init command"); } } #[test] - fn add_with_name() { - let cli = Cli::parse_from(["dual", "add", "--name", "myrepo"]); - if let Some(Command::Add { name }) = cli.command { + fn init_with_name() { + let cli = Cli::parse_from(["dual", "init", "--name", "myrepo"]); + if let Some(Command::Init { name, yes }) = cli.command { assert_eq!(name.as_deref(), Some("myrepo")); + assert!(!yes); + } else { + panic!("expected Init command"); + } + } + + #[test] + fn init_with_yes() { + let cli = Cli::parse_from(["dual", "init", "--yes"]); + if let Some(Command::Init { name, yes }) = cli.command { + assert!(name.is_none()); + assert!(yes); + } else { + panic!("expected Init command"); + } + } + + #[test] + fn init_with_short_yes() { + let cli = Cli::parse_from(["dual", "init", "-y"]); + if let Some(Command::Init { yes, .. }) = cli.command { + assert!(yes); } else { - panic!("expected Add command"); + panic!("expected Init command"); } } diff --git a/src/proxy.rs b/src/proxy.rs index 9c50907..31aec5a 100644 --- a/src/proxy.rs +++ b/src/proxy.rs @@ -87,10 +87,11 @@ pub async fn start(state: &WorkspaceState) -> Result<(), Box String { + format!( + r#" +{RC_MARKER} +if [ -n "$DUAL_ACTIVE" ] && [ -n "$DUAL_RC_PATH" ] && [ -f "$DUAL_RC_PATH" ]; then + source "$DUAL_RC_PATH" +fi +"# + ) +} + +/// Detect the user's shell RC file path. +/// +/// Returns the path to ~/.zshrc or ~/.bashrc based on $SHELL. +/// Returns None if the shell is not bash or zsh. +pub fn detect_shell_rc() -> Option { + let home = dirs::home_dir()?; + let shell = std::env::var("SHELL").unwrap_or_default(); + let base = shell.rsplit('/').next().unwrap_or(""); + + match base { + "zsh" => Some(home.join(".zshrc")), + "bash" => Some(home.join(".bashrc")), + _ => None, + } +} + +/// Install the auto-source snippet into the user's shell RC file. +/// +/// Idempotent: checks for the marker comment before appending. +/// Creates the RC file if it doesn't exist. +/// Returns Ok(true) if the snippet was newly installed, Ok(false) if +/// already present. +pub fn install_shell_hook() -> Result { + let rc_path = match detect_shell_rc() { + Some(p) => p, + None => return Ok(false), + }; + + // Read existing content (or empty if file doesn't exist) + let existing = std::fs::read_to_string(&rc_path).unwrap_or_default(); + + // Check if snippet is already installed + if existing.contains(RC_MARKER) { + return Ok(false); + } + + // Append snippet + use std::io::Write; + let mut file = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(&rc_path)?; + file.write_all(shell_hook_snippet().as_bytes())?; + + Ok(true) +} + /// Get the source command for an RC file path. pub fn source_file_command(rc_path: &std::path::Path) -> String { format!("source \"{}\"", rc_path.display()) @@ -248,4 +314,68 @@ mod tests { "source \"/Users/user/Library/Application Support/dual/rc/dual-test.sh\"" ); } + + #[test] + fn shell_hook_snippet_contains_guard() { + let snippet = shell_hook_snippet(); + assert!(snippet.contains("DUAL_ACTIVE")); + assert!(snippet.contains("DUAL_RC_PATH")); + assert!(snippet.contains("source")); + assert!(snippet.contains(RC_MARKER)); + } + + #[test] + fn shell_hook_snippet_is_noop_without_vars() { + let snippet = shell_hook_snippet(); + assert!(snippet.contains("-n \"$DUAL_ACTIVE\"")); + assert!(snippet.contains("-f \"$DUAL_RC_PATH\"")); + } + + #[test] + fn detect_shell_rc_respects_shell_env() { + let original = std::env::var("SHELL").ok(); + + // SAFETY: test runs single-threaded + unsafe { + std::env::set_var("SHELL", "/bin/zsh"); + let path = detect_shell_rc(); + assert!(path.is_some()); + assert!(path.unwrap().ends_with(".zshrc")); + + std::env::set_var("SHELL", "/bin/bash"); + let path = detect_shell_rc(); + assert!(path.is_some()); + assert!(path.unwrap().ends_with(".bashrc")); + + std::env::set_var("SHELL", "/usr/bin/fish"); + let path = detect_shell_rc(); + assert!(path.is_none()); + + // Restore + match original { + Some(v) => std::env::set_var("SHELL", v), + None => std::env::remove_var("SHELL"), + } + } + } + + #[test] + fn install_shell_hook_is_idempotent() { + let dir = tempfile::tempdir().unwrap(); + let rc_path = dir.path().join(".zshrc"); + std::fs::write(&rc_path, "# existing config\n").unwrap(); + + // Manually write snippet to test idempotency detection + let mut f = std::fs::OpenOptions::new() + .append(true) + .open(&rc_path) + .unwrap(); + use std::io::Write; + f.write_all(shell_hook_snippet().as_bytes()).unwrap(); + drop(f); + + let content = std::fs::read_to_string(&rc_path).unwrap(); + let marker_count = content.matches(RC_MARKER).count(); + assert_eq!(marker_count, 1); + } } diff --git a/src/state.rs b/src/state.rs index d04fafb..f683810 100644 --- a/src/state.rs +++ b/src/state.rs @@ -32,7 +32,7 @@ pub struct WorkspaceEntry { /// Branch name (e.g. "main", "feat/auth") pub branch: String, - /// Explicit path to workspace directory (for `dual add` — user's existing clone). + /// Explicit path to workspace directory (for `dual init` — user's existing clone). /// If None, workspace lives at {workspace_root}/{repo}/{encoded_branch}/ /// and will be cloned on first launch. pub path: Option, diff --git a/src/tmux_backend.rs b/src/tmux_backend.rs index 04b3bd6..b6d2d93 100644 --- a/src/tmux_backend.rs +++ b/src/tmux_backend.rs @@ -143,6 +143,12 @@ pub fn build_new_session_args(session_name: &str, cwd: &Path) -> Vec { ] } +/// Set an environment variable on a tmux session. +/// New panes/windows in this session will inherit the variable. +pub fn set_session_env(session_name: &str, key: &str, value: &str) -> Result<(), BackendError> { + tmux_simple(&["set-environment", "-t", session_name, key, value]) +} + fn tmux_simple(args: &[&str]) -> Result<(), BackendError> { let output = Command::new("tmux") .args(args) @@ -241,4 +247,11 @@ mod tests { // Just verify it compiles and doesn't panic let _ = backend.is_inside(); } + + #[test] + fn set_session_env_exists() { + // Verify the function exists and has the right signature. + // Actual tmux interaction is covered by manual testing. + let _ = set_session_env; + } } diff --git a/src/tui/ui.rs b/src/tui/ui.rs index a5df49e..4cba12f 100644 --- a/src/tui/ui.rs +++ b/src/tui/ui.rs @@ -31,7 +31,7 @@ pub fn render(frame: &mut Frame, app: &App) { let items = app.flatten_items(); if items.is_empty() { - let empty = Paragraph::new(" No workspaces. Run `dual add` in a repo to get started.") + let empty = Paragraph::new(" No workspaces. Run `dual init` in a repo to get started.") .block(block); frame.render_widget(empty, chunks[1]); } else { diff --git a/tests/fixtures/mod.rs b/tests/fixtures/mod.rs index d539a12..ef51959 100644 --- a/tests/fixtures/mod.rs +++ b/tests/fixtures/mod.rs @@ -98,16 +98,29 @@ pub fn fixture_state( state } -/// Write .dual.toml hints into a workspace directory. +/// Write devcontainer.json + .dual/settings.json for a fixture workspace. +/// +/// Creates `.devcontainer/devcontainer.json` with image and ports, +/// and `.dual/settings.json` with DualConfig defaults. pub fn create_fixture_hints(repo_dir: &Path, ports: &[u16]) { - let hints = dual::config::RepoHints { - image: "node:20".to_string(), - ports: ports.to_vec(), - setup: None, - env: std::collections::HashMap::new(), - extra_commands: Vec::new(), - anonymous_volumes: vec!["node_modules".to_string()], - shared: None, + // Write devcontainer.json with container config + let dc_dir = repo_dir.join(".devcontainer"); + std::fs::create_dir_all(&dc_dir).expect("failed to create .devcontainer dir"); + + let ports_json: Vec = ports.iter().map(|p| p.to_string()).collect(); + let dc_content = if ports.is_empty() { + r#"{"image": "node:20"}"#.to_string() + } else { + format!( + r#"{{"image": "node:20", "forwardPorts": [{}]}}"#, + ports_json.join(", ") + ) }; - dual::config::write_hints(repo_dir, &hints).expect("failed to write fixture hints"); + std::fs::write(dc_dir.join("devcontainer.json"), dc_content) + .expect("failed to write devcontainer.json"); + + // Write .dual/settings.json with DualConfig defaults + let dual_config = dual::config::DualConfig::default(); + dual::config::write_dual_config(repo_dir, &dual_config) + .expect("failed to write .dual/settings.json"); } diff --git a/thoughts/shared/plans/2026-02-16-devcontainer-json-fallback.md b/thoughts/shared/plans/2026-02-16-devcontainer-json-fallback.md new file mode 100644 index 0000000..5d645b3 --- /dev/null +++ b/thoughts/shared/plans/2026-02-16-devcontainer-json-fallback.md @@ -0,0 +1,968 @@ +# devcontainer.json Fallback Reader — Implementation Plan + +## Overview + +Add devcontainer.json as a fallback configuration source when no `.dual.toml` exists. This gives Dual zero-config compatibility with the ~15% of repos that already have `.devcontainer/` directories, while keeping `.dual.toml` as the primary config for Dual-specific concerns. + +Scope includes full spec-compatible parsing for the ~5 fields we read, plus adding `docker build` support to handle `build.dockerfile`. + +## Current State Analysis + +- `RepoHints` defined at `src/config.rs:18-46` — 7 fields: `image`, `ports`, `setup`, `env`, `extra_commands`, `anonymous_volumes`, `shared` +- `load_hints()` at `src/config.rs:77-89` — reads `.dual.toml`, returns `RepoHints::default()` if missing +- `container::create()` at `src/container.rs:22-50` — accepts `image: &str`, no build support +- `cmd_launch()` at `src/main.rs:364` — loads hints, creates container, runs setup +- No `serde_json` dependency — only `toml` + `serde` in `Cargo.toml:17,21` + +### Key Discoveries: +- `load_hints()` already returns defaults when `.dual.toml` is missing (`config.rs:80-82`) — the fallback insertion point is clean +- Container creation accepts image as a plain string (`container.rs:25`) — we can pass a built image tag the same way +- Serde `#[serde(default)]` and `#[serde(rename_all)]` patterns are well-established in the codebase +- Test patterns use `parse_hints()` string-based parsing and temp dir filesystem tests + +## Desired End State + +When a workspace has no `.dual.toml` but has a `.devcontainer/devcontainer.json` (or root `devcontainer.json`): +1. Dual reads the devcontainer.json and maps supported fields to `RepoHints` +2. If `build.dockerfile` is specified, Dual builds the image via `docker build` before creating the container +3. If `image` is specified, it maps directly to `hints.image` +4. `forwardPorts`, `containerEnv`, `postCreateCommand` map to `ports`, `env`, `setup` +5. Volume-type mounts targeting `/workspace/*` map to `anonymous_volumes` +6. `.dual.toml` always takes priority — devcontainer.json is only read as fallback + +### Verification: +- `cargo test` passes with new unit tests covering all devcontainer.json format variants +- `cargo clippy` clean +- Manual: clone a repo with `.devcontainer/devcontainer.json`, run `dual launch`, verify container uses the devcontainer-specified image/ports/env + +## What We're NOT Doing + +- **Dev container features** — No OCI artifact pulling, no `install.sh` execution. Repos needing features should use `build.dockerfile` pointing to their own Dockerfile. +- **Lifecycle hooks beyond `postCreateCommand`** — No `postStartCommand`, `postAttachCommand`. Only `postCreateCommand` maps to `setup`. +- **Variable substitution** — No `${localEnv:VAR}` or `${containerWorkspaceFolder}` expansion in `containerEnv`. Literal values only. +- **Docker Compose** — No `dockerComposeFile` support. Conflicts with Dual's one-container-per-workspace model. +- **IDE customizations** — All `customizations.*` fields ignored. +- **Container user/security** — `remoteUser`, `containerUser`, `privileged`, `capAdd` ignored. Dual controls these. + +## Implementation Approach + +Three phases, each independently testable: + +1. **Parser** — New `src/devcontainer.rs` module with types and parsing logic +2. **Config Integration** — Wire parser into `load_hints()` fallback chain, add `DockerfileBuild` to `RepoHints` +3. **Docker Build** — Add `build_image()` to `container.rs`, modify `cmd_launch()` flow + +--- + +## Phase 1: Parser & Types + +### Overview +Create `src/devcontainer.rs` with the `DevcontainerJson` struct and multi-format enum types. Add `serde_json` dependency. + +### Changes Required: + +#### 1. Add `serde_json` dependency +**File**: `Cargo.toml` +**Changes**: Add `serde_json` to `[dependencies]` + +```toml +[dependencies] +# ... existing deps ... +serde_json = "1" +``` + +#### 2. Register new module +**File**: `src/lib.rs` +**Changes**: Add `pub mod devcontainer;` + +```rust +pub mod backend; +pub mod cli; +pub mod clone; +pub mod config; +pub mod container; +pub mod devcontainer; // new +pub mod proxy; +pub mod shared; +pub mod shell; +pub mod state; +pub mod tmux_backend; +pub mod tui; +``` + +#### 3. Create devcontainer parser module +**File**: `src/devcontainer.rs` (new) +**Changes**: Full module with types, parsing, and path resolution + +```rust +use serde::Deserialize; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; + +use crate::config::RepoHints; + +/// Subset of devcontainer.json fields that Dual consumes. +/// See: https://containers.dev/implementors/json_reference/ +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DevcontainerJson { + /// Docker image to use (mutually exclusive with `build`) + pub image: Option, + + /// Build configuration for Dockerfile-based images + pub build: Option, + + /// Ports to forward from the container + pub forward_ports: Option>, + + /// Environment variables for the container + pub container_env: Option>, + + /// Command to run after container creation + pub post_create_command: Option, + + /// Mount configurations + pub mounts: Option>, +} + +/// Build configuration from devcontainer.json. +#[derive(Debug, Deserialize)] +pub struct BuildConfig { + /// Path to Dockerfile (relative to devcontainer.json location) + pub dockerfile: Option, + + /// Build context path (relative to devcontainer.json location) + pub context: Option, + + /// Docker build arguments + pub args: Option>, + + /// Build target stage for multi-stage builds + pub target: Option, +} + +/// Port specification — devcontainer allows integers or strings. +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub enum PortSpec { + Number(u16), + String(String), +} + +impl PortSpec { + /// Convert to u16, parsing strings as integers. + pub fn to_port(&self) -> Option { + match self { + PortSpec::Number(n) => Some(*n), + PortSpec::String(s) => s.parse::().ok(), + } + } +} + +/// Command specification — devcontainer allows string, array, or object forms. +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub enum CommandSpec { + /// Single shell command: "pnpm install" + String(String), + /// Exec form: ["pnpm", "install"] + Array(Vec), + /// Parallel commands: {"install": "pnpm install", "build": "pnpm build"} + Object(HashMap), +} + +impl CommandSpec { + /// Flatten to a single shell command string. + /// - String: returned as-is + /// - Array: joined with spaces + /// - Object: values joined with " && " + pub fn to_shell_command(&self) -> String { + match self { + CommandSpec::String(s) => s.clone(), + CommandSpec::Array(arr) => arr.join(" "), + CommandSpec::Object(map) => { + let mut commands: Vec<&str> = map + .values() + .map(|v| match v { + StringOrArray::String(s) => s.as_str(), + StringOrArray::Array(arr) => { + // Can't return a joined string as &str easily, + // so we'll handle this differently below + "" + } + }) + .collect(); + // Re-do with owned strings to handle array values + let commands: Vec = map + .values() + .map(|v| match v { + StringOrArray::String(s) => s.clone(), + StringOrArray::Array(arr) => arr.join(" "), + }) + .collect(); + commands.join(" && ") + } + } + } +} + +/// Value in a command object — can be string or array. +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub enum StringOrArray { + String(String), + Array(Vec), +} + +/// Mount specification — devcontainer allows string or object forms. +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub enum MountSpec { + /// Docker mount syntax: "type=volume,target=/workspace/node_modules" + String(String), + /// Structured mount object + Object(MountObject), +} + +/// Structured mount configuration. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct MountObject { + #[serde(rename = "type")] + pub mount_type: Option, + pub source: Option, + pub target: String, +} + +const WORKSPACE_PREFIX: &str = "/workspace/"; + +impl MountSpec { + /// Extract anonymous volume path if this is a volume mount targeting /workspace/*. + /// Returns the relative path (e.g., "node_modules" from "/workspace/node_modules"). + pub fn as_anonymous_volume(&self) -> Option { + match self { + MountSpec::String(s) => { + // Parse "type=volume,target=/workspace/node_modules" format + let mut mount_type = None; + let mut target = None; + let mut has_source = false; + for part in s.split(',') { + let (key, value) = part.split_once('=')?; + match key.trim() { + "type" => mount_type = Some(value.trim()), + "target" | "dst" | "destination" => target = Some(value.trim()), + "source" | "src" => has_source = true, + _ => {} + } + } + if mount_type == Some("volume") && !has_source { + target + .filter(|t| t.starts_with(WORKSPACE_PREFIX)) + .map(|t| t[WORKSPACE_PREFIX.len()..].to_string()) + } + None // String format parsing is best-effort + } + MountSpec::Object(obj) => { + if obj.mount_type.as_deref() == Some("volume") && obj.source.is_none() { + if obj.target.starts_with(WORKSPACE_PREFIX) { + return Some(obj.target[WORKSPACE_PREFIX.len()..].to_string()); + } + } + None + } + } + } +} + +/// Resolve the path to devcontainer.json in a workspace directory. +/// +/// Checks in order: +/// 1. `.devcontainer/devcontainer.json` +/// 2. `.devcontainer.json` (root) +/// +/// Returns None if neither exists. +pub fn find_devcontainer_json(workspace_dir: &Path) -> Option { + let candidates = [ + workspace_dir.join(".devcontainer").join("devcontainer.json"), + workspace_dir.join(".devcontainer.json"), + ]; + + candidates.into_iter().find(|p| p.exists()) +} + +/// Parse a devcontainer.json string into DevcontainerJson. +pub fn parse_devcontainer(json_str: &str) -> Result { + serde_json::from_str(json_str) +} + +/// Load and parse devcontainer.json from a workspace directory. +/// Returns None if no devcontainer.json exists. +pub fn load_devcontainer(workspace_dir: &Path) -> Option { + let path = find_devcontainer_json(workspace_dir)?; + let contents = std::fs::read_to_string(&path).ok()?; + parse_devcontainer(&contents).ok() +} + +/// Convert DevcontainerJson fields to RepoHints. +/// +/// Maps: +/// - `image` → `hints.image` +/// - `build` → `hints.dockerfile` +/// - `forwardPorts` → `hints.ports` +/// - `containerEnv` → `hints.env` +/// - `postCreateCommand` → `hints.setup` +/// - `mounts` (volume type, /workspace/* target) → `hints.anonymous_volumes` +pub fn to_repo_hints(dc: &DevcontainerJson, devcontainer_dir: &Path) -> RepoHints { + let mut hints = RepoHints::default(); + + // Image (mutually exclusive with build) + if let Some(ref image) = dc.image { + hints.image = image.clone(); + } + + // Build → DockerfileBuild + if let Some(ref build) = dc.build { + if build.dockerfile.is_some() { + hints.dockerfile = Some(crate::config::DockerfileBuild { + path: build.dockerfile.clone().unwrap_or_else(|| "Dockerfile".to_string()), + context: build.context.clone().unwrap_or_else(|| ".".to_string()), + args: build.args.clone().unwrap_or_default(), + target: build.target.clone(), + // Resolve paths relative to devcontainer.json location + base_dir: Some(devcontainer_dir.to_path_buf()), + }); + } + } + + // Ports + if let Some(ref ports) = dc.forward_ports { + hints.ports = ports.iter().filter_map(|p| p.to_port()).collect(); + } + + // Environment variables + if let Some(ref env) = dc.container_env { + hints.env = env.clone(); + } + + // Setup command + if let Some(ref cmd) = dc.post_create_command { + let shell_cmd = cmd.to_shell_command(); + if !shell_cmd.is_empty() { + hints.setup = Some(shell_cmd); + } + } + + // Anonymous volumes from mounts + if let Some(ref mounts) = dc.mounts { + let extra_volumes: Vec = mounts + .iter() + .filter_map(|m| m.as_anonymous_volume()) + .collect(); + if !extra_volumes.is_empty() { + // Merge with defaults, dedup + for vol in extra_volumes { + if !hints.anonymous_volumes.contains(&vol) { + hints.anonymous_volumes.push(vol); + } + } + } + } + + hints +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_minimal_image() { + let json = r#"{"image": "node:20"}"#; + let dc = parse_devcontainer(json).unwrap(); + assert_eq!(dc.image.as_deref(), Some("node:20")); + assert!(dc.build.is_none()); + } + + #[test] + fn parse_with_build() { + let json = r#"{ + "build": { + "dockerfile": "Dockerfile", + "context": "..", + "args": {"NODE_VERSION": "20"}, + "target": "development" + } + }"#; + let dc = parse_devcontainer(json).unwrap(); + assert!(dc.image.is_none()); + let build = dc.build.unwrap(); + assert_eq!(build.dockerfile.as_deref(), Some("Dockerfile")); + assert_eq!(build.context.as_deref(), Some("..")); + assert_eq!(build.args.unwrap().get("NODE_VERSION").unwrap(), "20"); + assert_eq!(build.target.as_deref(), Some("development")); + } + + #[test] + fn parse_forward_ports_integers() { + let json = r#"{"forwardPorts": [3000, 8080]}"#; + let dc = parse_devcontainer(json).unwrap(); + let ports: Vec = dc.forward_ports.unwrap().iter().filter_map(|p| p.to_port()).collect(); + assert_eq!(ports, vec![3000, 8080]); + } + + #[test] + fn parse_forward_ports_strings() { + let json = r#"{"forwardPorts": ["3000", "8080"]}"#; + let dc = parse_devcontainer(json).unwrap(); + let ports: Vec = dc.forward_ports.unwrap().iter().filter_map(|p| p.to_port()).collect(); + assert_eq!(ports, vec![3000, 8080]); + } + + #[test] + fn parse_forward_ports_mixed() { + let json = r#"{"forwardPorts": [3000, "8080"]}"#; + let dc = parse_devcontainer(json).unwrap(); + let ports: Vec = dc.forward_ports.unwrap().iter().filter_map(|p| p.to_port()).collect(); + assert_eq!(ports, vec![3000, 8080]); + } + + #[test] + fn parse_forward_ports_invalid_string_skipped() { + let json = r#"{"forwardPorts": [3000, "not-a-port"]}"#; + let dc = parse_devcontainer(json).unwrap(); + let ports: Vec = dc.forward_ports.unwrap().iter().filter_map(|p| p.to_port()).collect(); + assert_eq!(ports, vec![3000]); + } + + #[test] + fn parse_container_env() { + let json = r#"{"containerEnv": {"NODE_ENV": "development", "DEBUG": "true"}}"#; + let dc = parse_devcontainer(json).unwrap(); + let env = dc.container_env.unwrap(); + assert_eq!(env.get("NODE_ENV").unwrap(), "development"); + assert_eq!(env.get("DEBUG").unwrap(), "true"); + } + + #[test] + fn parse_post_create_command_string() { + let json = r#"{"postCreateCommand": "pnpm install"}"#; + let dc = parse_devcontainer(json).unwrap(); + assert_eq!(dc.post_create_command.unwrap().to_shell_command(), "pnpm install"); + } + + #[test] + fn parse_post_create_command_array() { + let json = r#"{"postCreateCommand": ["pnpm", "install"]}"#; + let dc = parse_devcontainer(json).unwrap(); + assert_eq!(dc.post_create_command.unwrap().to_shell_command(), "pnpm install"); + } + + #[test] + fn parse_post_create_command_object() { + let json = r#"{"postCreateCommand": {"install": "pnpm install", "build": "pnpm build"}}"#; + let dc = parse_devcontainer(json).unwrap(); + let cmd = dc.post_create_command.unwrap().to_shell_command(); + // Object order is non-deterministic, but both commands should be present + assert!(cmd.contains("pnpm install")); + assert!(cmd.contains("pnpm build")); + assert!(cmd.contains(" && ")); + } + + #[test] + fn parse_post_create_command_object_with_array_value() { + let json = r#"{"postCreateCommand": {"install": ["pnpm", "install"]}}"#; + let dc = parse_devcontainer(json).unwrap(); + assert_eq!(dc.post_create_command.unwrap().to_shell_command(), "pnpm install"); + } + + #[test] + fn parse_mount_object_volume() { + let mount = MountSpec::Object(MountObject { + mount_type: Some("volume".to_string()), + source: None, + target: "/workspace/node_modules".to_string(), + }); + assert_eq!(mount.as_anonymous_volume(), Some("node_modules".to_string())); + } + + #[test] + fn parse_mount_object_with_source_not_anonymous() { + let mount = MountSpec::Object(MountObject { + mount_type: Some("volume".to_string()), + source: Some("my-vol".to_string()), + target: "/workspace/node_modules".to_string(), + }); + assert_eq!(mount.as_anonymous_volume(), None); + } + + #[test] + fn parse_mount_object_bind_not_anonymous() { + let mount = MountSpec::Object(MountObject { + mount_type: Some("bind".to_string()), + source: None, + target: "/workspace/node_modules".to_string(), + }); + assert_eq!(mount.as_anonymous_volume(), None); + } + + #[test] + fn parse_mount_object_non_workspace_target() { + let mount = MountSpec::Object(MountObject { + mount_type: Some("volume".to_string()), + source: None, + target: "/data/cache".to_string(), + }); + assert_eq!(mount.as_anonymous_volume(), None); + } + + #[test] + fn parse_unknown_fields_ignored() { + let json = r#"{ + "image": "node:20", + "customizations": {"vscode": {"extensions": ["ms-python.python"]}}, + "remoteUser": "vscode", + "features": {"ghcr.io/devcontainers/features/node:1": {}} + }"#; + let dc = parse_devcontainer(json).unwrap(); + assert_eq!(dc.image.as_deref(), Some("node:20")); + } + + #[test] + fn parse_empty_object() { + let json = r#"{}"#; + let dc = parse_devcontainer(json).unwrap(); + assert!(dc.image.is_none()); + assert!(dc.build.is_none()); + assert!(dc.forward_ports.is_none()); + } + + #[test] + fn to_repo_hints_image_only() { + let dc = DevcontainerJson { + image: Some("python:3.12".to_string()), + build: None, + forward_ports: None, + container_env: None, + post_create_command: None, + mounts: None, + }; + let hints = to_repo_hints(&dc, Path::new(".")); + assert_eq!(hints.image, "python:3.12"); + assert!(hints.dockerfile.is_none()); + } + + #[test] + fn to_repo_hints_full() { + let dc = DevcontainerJson { + image: Some("node:20".to_string()), + build: None, + forward_ports: Some(vec![PortSpec::Number(3000), PortSpec::String("8080".to_string())]), + container_env: Some(HashMap::from([("NODE_ENV".to_string(), "dev".to_string())])), + post_create_command: Some(CommandSpec::String("pnpm install".to_string())), + mounts: None, + }; + let hints = to_repo_hints(&dc, Path::new(".")); + assert_eq!(hints.image, "node:20"); + assert_eq!(hints.ports, vec![3000, 8080]); + assert_eq!(hints.env.get("NODE_ENV").unwrap(), "dev"); + assert_eq!(hints.setup.as_deref(), Some("pnpm install")); + } + + #[test] + fn find_devcontainer_json_in_subdir() { + let dir = std::env::temp_dir().join("dual-test-devcontainer-find"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(dir.join(".devcontainer")).unwrap(); + std::fs::write( + dir.join(".devcontainer").join("devcontainer.json"), + r#"{"image": "node:20"}"#, + ).unwrap(); + + let found = find_devcontainer_json(&dir); + assert!(found.is_some()); + assert!(found.unwrap().ends_with(".devcontainer/devcontainer.json")); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn find_devcontainer_json_at_root() { + let dir = std::env::temp_dir().join("dual-test-devcontainer-root"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(&dir).unwrap(); + std::fs::write(dir.join(".devcontainer.json"), r#"{"image": "node:20"}"#).unwrap(); + + let found = find_devcontainer_json(&dir); + assert!(found.is_some()); + assert!(found.unwrap().ends_with(".devcontainer.json")); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn find_devcontainer_json_missing() { + let dir = std::env::temp_dir().join("dual-test-devcontainer-missing"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(&dir).unwrap(); + + assert!(find_devcontainer_json(&dir).is_none()); + + let _ = std::fs::remove_dir_all(&dir); + } + + #[test] + fn find_devcontainer_prefers_subdir_over_root() { + let dir = std::env::temp_dir().join("dual-test-devcontainer-prefer"); + let _ = std::fs::remove_dir_all(&dir); + std::fs::create_dir_all(dir.join(".devcontainer")).unwrap(); + std::fs::write( + dir.join(".devcontainer").join("devcontainer.json"), + r#"{"image": "python:3.12"}"#, + ).unwrap(); + std::fs::write(dir.join(".devcontainer.json"), r#"{"image": "node:20"}"#).unwrap(); + + let found = find_devcontainer_json(&dir).unwrap(); + assert!(found.ends_with(".devcontainer/devcontainer.json")); + + let _ = std::fs::remove_dir_all(&dir); + } +} +``` + +### Success Criteria: + +#### Automated Verification: +- [x] `cargo build` compiles with new `serde_json` dependency +- [x] `cargo test` passes all new tests in `devcontainer.rs` +- [x] `cargo clippy` clean +- [x] `cargo fmt --check` clean + +**Implementation Note**: After completing this phase and all automated verification passes, pause here for manual confirmation before proceeding to Phase 2. + +--- + +## Phase 2: Config Integration + +### Overview +Add `DockerfileBuild` struct to `RepoHints`, modify `load_hints()` to fall back to devcontainer.json, and wire up the conversion. + +### Changes Required: + +#### 1. Add `DockerfileBuild` to config types +**File**: `src/config.rs` +**Changes**: Add new struct and field to `RepoHints` + +After `SharedConfig` (line 15), add: + +```rust +/// Dockerfile build configuration for building images from source. +/// Used when devcontainer.json specifies `build.dockerfile` or when +/// configured directly in .dual.toml. +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] +pub struct DockerfileBuild { + /// Path to the Dockerfile (relative to base_dir or workspace root) + pub path: String, + + /// Build context directory (relative to base_dir or workspace root) + #[serde(default = "default_build_context")] + pub context: String, + + /// Docker build arguments (--build-arg) + #[serde(default)] + pub args: HashMap, + + /// Target build stage for multi-stage builds + #[serde(skip_serializing_if = "Option::is_none")] + pub target: Option, + + /// Base directory for resolving relative paths (set by devcontainer loader). + /// When loaded from .dual.toml, this is None and paths are relative to workspace root. + #[serde(skip)] + pub base_dir: Option, +} + +fn default_build_context() -> String { + ".".to_string() +} +``` + +Add field to `RepoHints` struct (after `shared`, line 45): + +```rust + /// Dockerfile build config — if set, build image instead of pulling. + /// Can be set via .dual.toml [dockerfile] section or from devcontainer.json build field. + #[serde(skip_serializing_if = "Option::is_none")] + pub dockerfile: Option, +``` + +Update `Default` impl (after line 65): + +```rust + dockerfile: None, +``` + +#### 2. Modify `load_hints()` to add devcontainer fallback +**File**: `src/config.rs` +**Changes**: Insert devcontainer.json fallback between "file doesn't exist" and "return default" + +Replace `load_hints()` (lines 77-89): + +```rust +/// Load RepoHints from a workspace directory. +/// +/// Priority order: +/// 1. `.dual.toml` — Dual-native config (always takes priority) +/// 2. `.devcontainer/devcontainer.json` or `.devcontainer.json` — fallback +/// 3. Default hints (node:20, no ports, etc.) +pub fn load_hints(workspace_dir: &Path) -> Result { + let path = workspace_dir.join(HINTS_FILENAME); + + // 1. .dual.toml takes priority + if path.exists() { + let contents = + std::fs::read_to_string(&path).map_err(|e| HintsError::ReadError(path.clone(), e))?; + let hints: RepoHints = + toml::from_str(&contents).map_err(|e| HintsError::ParseError(path, e))?; + return Ok(hints); + } + + // 2. Fall back to devcontainer.json + if let Some(hints) = crate::devcontainer::load_devcontainer_as_hints(workspace_dir) { + return Ok(hints); + } + + // 3. Default + Ok(RepoHints::default()) +} +``` + +#### 3. Add convenience function to devcontainer module +**File**: `src/devcontainer.rs` +**Changes**: Add `load_devcontainer_as_hints()` that combines load + convert + +```rust +/// Load devcontainer.json from a workspace and convert to RepoHints. +/// Returns None if no devcontainer.json exists or it fails to parse. +pub fn load_devcontainer_as_hints(workspace_dir: &Path) -> Option { + let path = find_devcontainer_json(workspace_dir)?; + let devcontainer_dir = path.parent().unwrap_or(workspace_dir); + let contents = std::fs::read_to_string(&path).ok()?; + let dc = parse_devcontainer(&contents).ok()?; + Some(to_repo_hints(&dc, devcontainer_dir)) +} +``` + +#### 4. Update test fixture helper +**File**: `tests/fixtures/mod.rs` +**Changes**: Add `dockerfile` field to `create_fixture_hints` + +```rust +pub fn create_fixture_hints(repo_dir: &Path, ports: &[u16]) { + let hints = dual::config::RepoHints { + image: "node:20".to_string(), + ports: ports.to_vec(), + setup: None, + env: std::collections::HashMap::new(), + extra_commands: Vec::new(), + anonymous_volumes: vec!["node_modules".to_string()], + shared: None, + dockerfile: None, // new field + }; + dual::config::write_hints(repo_dir, &hints).expect("failed to write fixture hints"); +} +``` + +### Success Criteria: + +#### Automated Verification: +- [x] `cargo build` compiles +- [x] `cargo test` — all existing config tests still pass +- [x] `cargo test` — new integration tests pass: + - `load_hints` returns devcontainer config when no `.dual.toml` exists + - `load_hints` prefers `.dual.toml` over devcontainer.json when both exist + - `load_hints` returns defaults when neither exists +- [x] `cargo clippy` clean +- [x] `cargo fmt --check` clean + +#### Manual Verification: +- [x] Create a test directory with `.devcontainer/devcontainer.json` containing `{"image": "python:3.12", "forwardPorts": [8000]}` and verify `load_hints()` returns the correct image and ports + +**Implementation Note**: After completing this phase and all automated verification passes, pause here for manual confirmation before proceeding to Phase 3. + +--- + +## Phase 3: Docker Build Support + +### Overview +Add `build_image()` function to `container.rs` and modify `cmd_launch()` in `main.rs` to build from Dockerfile before creating the container. + +### Changes Required: + +#### 1. Add `build_image()` to container module +**File**: `src/container.rs` +**Changes**: New function and arg builder + +After `exec_setup()` (line 179), add: + +```rust +/// Build a Docker image from a Dockerfile. +/// +/// Returns the image tag on success. +pub fn build_image( + tag: &str, + workspace_dir: &Path, + build: &crate::config::DockerfileBuild, +) -> Result { + let args = build_image_args(tag, workspace_dir, build); + let output = Command::new("docker") + .args(&args) + .output() + .map_err(|e| ContainerError::DockerNotFound(e.to_string()))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + return Err(ContainerError::Failed { + operation: "build".to_string(), + name: tag.to_string(), + stderr, + }); + } + + Ok(tag.to_string()) +} + +/// Build docker build arguments (public for testing). +pub fn build_image_args( + tag: &str, + workspace_dir: &Path, + build: &crate::config::DockerfileBuild, +) -> Vec { + // Resolve paths relative to base_dir (devcontainer.json location) or workspace root + let base = build.base_dir.as_deref().unwrap_or(workspace_dir); + + let dockerfile_path = base.join(&build.path); + let context_path = base.join(&build.context); + + let mut args = vec![ + "build".to_string(), + "-t".to_string(), + tag.to_string(), + "-f".to_string(), + dockerfile_path.display().to_string(), + ]; + + // Build arguments + for (key, value) in &build.args { + args.push("--build-arg".to_string()); + args.push(format!("{key}={value}")); + } + + // Target stage + if let Some(ref target) = build.target { + args.push("--target".to_string()); + args.push(target.clone()); + } + + // Build context (last argument) + args.push(context_path.display().to_string()); + + args +} +``` + +#### 2. Modify `cmd_launch()` to build before create +**File**: `src/main.rs` +**Changes**: Insert build step between hints loading and container creation + +At `main.rs:399-409` (container creation), modify to: + +```rust +container::ContainerStatus::Missing => { + // Build image from Dockerfile if configured + let effective_image = if let Some(ref build) = hints.dockerfile { + let image_tag = format!("dual-build-{container_name}"); + info!("Building image from Dockerfile..."); + match container::build_image(&image_tag, &workspace_dir, build) { + Ok(tag) => tag, + Err(e) => { + error!("docker build failed: {e}"); + return 1; + } + } + } else { + hints.image.clone() + }; + + info!("Creating container {container_name}..."); + if let Err(e) = container::create( + &container_name, + &workspace_dir, + &effective_image, + &hints.env, + &hints.anonymous_volumes, + ) { + error!("container create failed: {e}"); + return 1; + } + if let Err(e) = container::start(&container_name) { + error!("container start failed: {e}"); + return 1; + } +} +``` + +### Success Criteria: + +#### Automated Verification: +- [x] `cargo build` compiles +- [x] `cargo test` — new `build_image_args` tests pass: + - Correct `-t`, `-f`, context path ordering + - Build args passed as `--build-arg KEY=VALUE` + - Target stage passed as `--target` + - Paths resolved relative to `base_dir` when set + - Paths resolved relative to workspace root when `base_dir` is None +- [x] `cargo clippy` clean +- [x] `cargo fmt --check` clean + +#### Manual Verification: +- [x] Create a workspace with a Dockerfile, add `devcontainer.json` with `{"build": {"dockerfile": "Dockerfile"}}`, run `dual launch`, verify image is built and container starts +- [x] Verify a workspace with `{"image": "node:20"}` still works (no build step) +- [x] Verify a workspace with `.dual.toml` ignores devcontainer.json entirely + +**Implementation Note**: After completing this phase and all verification passes, the feature is complete. + +--- + +## Testing Strategy + +### Unit Tests: +- **devcontainer.rs**: All format variants for each field (covered in Phase 1 inline tests) +- **config.rs**: `load_hints()` fallback chain — `.dual.toml` priority, devcontainer fallback, defaults +- **container.rs**: `build_image_args()` construction correctness + +### Integration Tests: +- Create temp directory with `.devcontainer/devcontainer.json` → verify `load_hints()` returns correct hints +- Create temp directory with both `.dual.toml` and devcontainer.json → verify `.dual.toml` wins +- `build_image_args()` with various `DockerfileBuild` configurations + +### Edge Cases to Test: +- Empty devcontainer.json `{}` +- devcontainer.json with only unsupported fields (should return defaults) +- Invalid JSON (should fall through to defaults, not error) +- Invalid port strings in `forwardPorts` (should be silently skipped) +- `postCreateCommand` with empty object `{}` +- `build` without `dockerfile` field (should not set `hints.dockerfile`) + +## Performance Considerations + +- JSON parse adds ~1ms to `load_hints()` in the fallback path — negligible +- `docker build` can take 30s-5min depending on Dockerfile complexity — acceptable since it only runs on first container creation (same as pulling large images) +- No new runtime dependencies beyond `serde_json` + +## References + +- Research: `thoughts/shared/research/2026-02-16-web-analysis-devcontainer-adoption.md` +- Dev Container JSON Reference: https://containers.dev/implementors/json_reference/ +- Current config module: `src/config.rs` +- Container lifecycle: `src/container.rs` +- Launch flow: `src/main.rs:364-432` diff --git a/thoughts/shared/plans/2026-02-16-shell-interception-pane-propagation.md b/thoughts/shared/plans/2026-02-16-shell-interception-pane-propagation.md new file mode 100644 index 0000000..5fe44c3 --- /dev/null +++ b/thoughts/shared/plans/2026-02-16-shell-interception-pane-propagation.md @@ -0,0 +1,414 @@ +# Shell Interception Pane Propagation — Implementation Plan + +## Overview + +When a user splits a pane or creates a new window inside a Dual tmux session, the new shell doesn't have command interception loaded. Commands like `pnpm dev` run on the host instead of in the container. This plan fixes that by (1) setting tmux session-level environment variables so new panes inherit them, and (2) auto-injecting a snippet into the user's shell RC file so new shells auto-source the interception file. + +## Current State Analysis + +**How interception works today** (`src/shell.rs:38-63`, `src/main.rs:434-450`): +1. `cmd_launch()` calls `shell::write_rc_file()` → writes to `~/.dual/rc/{container_name}.sh` +2. Generates a `source` command via `shell::source_file_command()` +3. Passes it to `backend.create_session()` as `init_cmd` +4. `TmuxBackend::create_session()` (`src/tmux_backend.rs:32-62`) sends it via `send_keys()` into the first pane + +**The gap**: New panes/windows spawned by the user start fresh shells. Nothing tells those shells to source the RC file. The `DUAL_CONTAINER` env var was set inside the first pane's shell process, not in tmux's session environment. + +### Key Discoveries: +- `MultiplexerBackend` trait (`src/backend.rs:8-42`) has no `set_environment()` method +- `cmd_add()` (`src/main.rs:112-197`) never touches `~/.bashrc` or `~/.zshrc` +- `shell::source_command()` (`src/shell.rs:87-89`) already exists for eval-based sourcing but nothing auto-triggers it +- No code anywhere modifies user shell config files + +## Desired End State + +After this plan is implemented: +1. `dual add` detects the user's shell and appends a guarded snippet to their `~/.zshrc` or `~/.bashrc` +2. `dual launch` sets `DUAL_ACTIVE`, `DUAL_RC_PATH`, and `DUAL_CONTAINER` as tmux session-level environment variables +3. When a user splits a pane (Ctrl+b %) or creates a window (Ctrl+b c), the new shell reads its RC file, detects `DUAL_ACTIVE`, and auto-sources the interception file +4. Non-Dual tmux sessions and terminals outside tmux are unaffected (snippet is a no-op) + +**Verification**: In a Dual tmux session, split a pane with `Ctrl+b %`. In the new pane, run `type npm` — it should show `npm is a function` pointing to the docker exec wrapper, not the host binary. + +## What We're NOT Doing + +- Fish shell support (different syntax, can be added later) +- `tmux set-hook` approach (visible to user, pollutes history, race conditions) +- `tmux set-option default-command` approach (shell-specific, may break user config) +- A `dual init` subcommand (reusing `dual add` is simpler) +- Modifying the `MultiplexerBackend` trait with a new method (tmux-specific `set-environment` can be called directly — keeps the trait clean for future zellij support) + +## Implementation Approach + +Three phases, each independently testable: + +1. **tmux set-environment** — Set session-level env vars during launch so new panes inherit them +2. **Shell RC snippet injection** — Auto-append the auto-source snippet to `~/.zshrc`/`~/.bashrc` during `dual add` +3. **Tests** — Unit tests for all new functions + +--- + +## Phase 1: tmux set-environment During Launch + +### Overview +After creating a tmux session in `cmd_launch()`, set three environment variables at the session level using `tmux set-environment`. This ensures all new panes/windows in the session inherit them. + +### Changes Required: + +#### 1. Add `set_environment()` to `TmuxBackend` +**File**: `src/tmux_backend.rs` +**Changes**: Add a public method (not on the trait) for setting session-level env vars. + +```rust +impl TmuxBackend { + pub fn new() -> Self { + Self + } + + /// Set an environment variable on a tmux session. + /// New panes/windows in this session will inherit the variable. + pub fn set_environment( + &self, + session_name: &str, + key: &str, + value: &str, + ) -> Result<(), BackendError> { + tmux_simple(&["set-environment", "-t", session_name, key, value]) + } +} +``` + +#### 2. Call `set_environment()` after session creation +**File**: `src/main.rs` +**Changes**: In `cmd_launch()`, after `backend.create_session()` succeeds (line 446-449), set the three env vars. + +After the existing block at line 444-450: +```rust +// Step 5: Create tmux session if not alive +if !backend.is_alive(&session_name) { + let source_cmd = shell::source_file_command(&rc_path); + if let Err(e) = backend.create_session(&session_name, &workspace_dir, Some(&source_cmd)) { + error!("session creation failed: {e}"); + return 1; + } + + // Set session-level env vars so new panes auto-source interception + let rc_path_str = rc_path.to_string_lossy(); + for (key, value) in [ + ("DUAL_ACTIVE", "1"), + ("DUAL_RC_PATH", rc_path_str.as_ref()), + ("DUAL_CONTAINER", container_name.as_str()), + ] { + if let Err(e) = backend.set_environment(&session_name, key, value) { + warn!("failed to set tmux env {key}: {e}"); + } + } +} +``` + +Note: `backend` must be downcast to `TmuxBackend` or the function signature must accept `&TmuxBackend`. Since `cmd_launch()` already receives `&dyn MultiplexerBackend`, the cleanest approach is to change `cmd_launch()` to accept `&TmuxBackend` directly (it's the only implementation, and we can revisit when zellij support is added). Alternatively, keep `&dyn MultiplexerBackend` and add a helper function that calls tmux directly — matching the pattern of `tmux_simple()`. + +**Chosen approach**: Add `set_session_env()` as a free function in `tmux_backend.rs` that calls `tmux set-environment` directly, and call it from `cmd_launch()`. This avoids changing the trait or the function signature. + +```rust +// In src/tmux_backend.rs +/// Set an environment variable on a tmux session. +/// New panes/windows in this session will inherit the variable. +pub fn set_session_env(session_name: &str, key: &str, value: &str) -> Result<(), BackendError> { + tmux_simple(&["set-environment", "-t", session_name, key, value]) +} +``` + +```rust +// In src/main.rs, after create_session succeeds: +use dual::tmux_backend; + +// ...inside the if !backend.is_alive block, after create_session: +let rc_path_str = rc_path.to_string_lossy(); +for (key, value) in [ + ("DUAL_ACTIVE", "1"), + ("DUAL_RC_PATH", rc_path_str.as_ref()), + ("DUAL_CONTAINER", container_name.as_str()), +] { + if let Err(e) = tmux_backend::set_session_env(&session_name, key, value) { + warn!("failed to set tmux env {key}: {e}"); + } +} +``` + +### Success Criteria: + +#### Automated Verification: +- [x] `cargo build` compiles without errors +- [x] `cargo test` passes +- [x] `cargo clippy` has no warnings + +#### Manual Verification: +- [ ] Launch a workspace with `dual launch` +- [ ] Run `tmux show-environment -t ` — should show `DUAL_ACTIVE=1`, `DUAL_RC_PATH=...`, `DUAL_CONTAINER=...` +- [ ] Split a pane — run `echo $DUAL_ACTIVE` in new pane — should print `1` +- [ ] Run `echo $DUAL_RC_PATH` in new pane — should print the RC file path + +**Implementation Note**: After completing this phase and all automated verification passes, pause here for manual confirmation that tmux env vars propagate before proceeding. + +--- + +## Phase 2: Shell RC Snippet Auto-Injection + +### Overview +During `dual add`, detect the user's shell and append a guarded snippet to their `~/.zshrc` or `~/.bashrc`. The snippet detects the `DUAL_ACTIVE` env var (set by Phase 1) and auto-sources the RC file. This is the same pattern used by nvm, pyenv, and rustup. + +### Changes Required: + +#### 1. Add snippet generation and injection to `src/shell.rs` +**File**: `src/shell.rs` +**Changes**: Add functions for generating the shell RC snippet and injecting it. + +```rust +/// Marker comment used to detect if the snippet is already installed. +const RC_MARKER: &str = "# dual: shell interception (auto-generated)"; + +/// Generate the shell RC snippet that auto-sources Dual interception. +/// +/// This snippet is appended to ~/.bashrc or ~/.zshrc. It detects +/// the DUAL_ACTIVE env var (set by tmux set-environment) and sources +/// the workspace-specific RC file. +pub fn shell_hook_snippet() -> String { + format!( + r#" +{RC_MARKER} +if [ -n "$DUAL_ACTIVE" ] && [ -n "$DUAL_RC_PATH" ] && [ -f "$DUAL_RC_PATH" ]; then + source "$DUAL_RC_PATH" +fi +"# + ) +} + +/// Detect the user's shell RC file path. +/// +/// Returns the path to ~/.zshrc or ~/.bashrc based on $SHELL. +/// Returns None if the shell is not bash or zsh. +pub fn detect_shell_rc() -> Option { + let home = dirs::home_dir()?; + let shell = std::env::var("SHELL").unwrap_or_default(); + let base = shell.rsplit('/').next().unwrap_or(""); + + match base { + "zsh" => Some(home.join(".zshrc")), + "bash" => { + // macOS uses .bash_profile for login shells, but .bashrc is + // sourced by interactive non-login shells (which tmux spawns). + // To cover both, prefer .bashrc. + Some(home.join(".bashrc")) + } + _ => None, + } +} + +/// Install the auto-source snippet into the user's shell RC file. +/// +/// Idempotent: checks for the marker comment before appending. +/// Creates the RC file if it doesn't exist. +/// Returns Ok(true) if the snippet was newly installed, Ok(false) if +/// already present. +pub fn install_shell_hook() -> Result { + let rc_path = match detect_shell_rc() { + Some(p) => p, + None => return Ok(false), + }; + + // Read existing content (or empty if file doesn't exist) + let existing = std::fs::read_to_string(&rc_path).unwrap_or_default(); + + // Check if snippet is already installed + if existing.contains(RC_MARKER) { + return Ok(false); + } + + // Append snippet + use std::io::Write; + let mut file = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(&rc_path)?; + file.write_all(shell_hook_snippet().as_bytes())?; + + Ok(true) +} +``` + +#### 2. Call `install_shell_hook()` from `cmd_add()` +**File**: `src/main.rs` +**Changes**: After the workspace is successfully registered (after `state::save()` at line 188-191), install the shell hook. + +```rust +// After state::save() succeeds, before the success messages: + +// Install shell hook for pane propagation (idempotent) +match shell::install_shell_hook() { + Ok(true) => { + let rc_name = shell::detect_shell_rc() + .map(|p| p.file_name().unwrap_or_default().to_string_lossy().to_string()) + .unwrap_or_default(); + info!("Added shell hook to ~/{rc_name} for tmux pane interception."); + } + Ok(false) => {} // Already installed or unsupported shell — silent + Err(e) => warn!("could not install shell hook: {e}"), +} +``` + +### Success Criteria: + +#### Automated Verification: +- [x] `cargo build` compiles without errors +- [x] `cargo test` passes +- [x] `cargo clippy` has no warnings + +#### Manual Verification: +- [ ] Run `dual add` in a repo — check that `~/.zshrc` (or `~/.bashrc`) now contains the snippet +- [ ] Run `dual add` again in a different repo — snippet should NOT be duplicated +- [ ] Launch a workspace, split a pane — run `type npm` in new pane — should show the docker exec function +- [ ] Open a terminal outside tmux — the snippet should be a no-op (no errors, no effect) +- [ ] Open a non-Dual tmux session — the snippet should be a no-op + +**Implementation Note**: After completing this phase and all automated verification passes, pause here for manual confirmation that end-to-end pane propagation works. + +--- + +## Phase 3: Tests + +### Overview +Add unit tests for all new functions. + +### Changes Required: + +#### 1. Tests for `set_session_env` +**File**: `src/tmux_backend.rs` +**Changes**: Add to existing `mod tests` block. + +```rust +#[test] +fn set_session_env_builds_correct_args() { + // This test verifies the function exists and has the right signature. + // Actual tmux interaction is covered by manual testing. + // We can't easily test tmux commands without a running tmux server. + let _ = set_session_env; // verify function exists +} +``` + +#### 2. Tests for shell hook functions +**File**: `src/shell.rs` +**Changes**: Add to existing `mod tests` block. + +```rust +#[test] +fn shell_hook_snippet_contains_guard() { + let snippet = shell_hook_snippet(); + assert!(snippet.contains("DUAL_ACTIVE")); + assert!(snippet.contains("DUAL_RC_PATH")); + assert!(snippet.contains("source")); + assert!(snippet.contains(RC_MARKER)); +} + +#[test] +fn shell_hook_snippet_is_noop_without_vars() { + let snippet = shell_hook_snippet(); + // The snippet should guard on DUAL_ACTIVE being non-empty + assert!(snippet.contains("-n \"$DUAL_ACTIVE\"")); + // And on the RC file existing + assert!(snippet.contains("-f \"$DUAL_RC_PATH\"")); +} + +#[test] +fn detect_shell_rc_respects_shell_env() { + let original = std::env::var("SHELL").ok(); + + // SAFETY: test runs single-threaded + unsafe { + std::env::set_var("SHELL", "/bin/zsh"); + let path = detect_shell_rc(); + assert!(path.is_some()); + assert!(path.unwrap().ends_with(".zshrc")); + + std::env::set_var("SHELL", "/bin/bash"); + let path = detect_shell_rc(); + assert!(path.is_some()); + assert!(path.unwrap().ends_with(".bashrc")); + + std::env::set_var("SHELL", "/usr/bin/fish"); + let path = detect_shell_rc(); + assert!(path.is_none()); + + // Restore + match original { + Some(v) => std::env::set_var("SHELL", v), + None => std::env::remove_var("SHELL"), + } + } +} + +#[test] +fn install_shell_hook_is_idempotent() { + // Create a temp file to act as shell RC + let dir = tempfile::tempdir().unwrap(); + let rc_path = dir.path().join(".zshrc"); + std::fs::write(&rc_path, "# existing config\n").unwrap(); + + // Manually write snippet to test idempotency detection + let mut f = std::fs::OpenOptions::new() + .append(true) + .open(&rc_path) + .unwrap(); + use std::io::Write; + f.write_all(shell_hook_snippet().as_bytes()).unwrap(); + drop(f); + + let content = std::fs::read_to_string(&rc_path).unwrap(); + let marker_count = content.matches(RC_MARKER).count(); + assert_eq!(marker_count, 1); +} +``` + +Note: The `install_shell_hook()` function uses the real `$SHELL` env var and `dirs::home_dir()`, so a full integration test would modify the user's actual shell RC. The idempotency test above uses a direct file write to verify the marker detection logic without calling `install_shell_hook()` on the real home directory. + +### Success Criteria: + +#### Automated Verification: +- [x] `cargo test` passes — all new tests green +- [x] `cargo clippy` has no warnings +- [x] `cargo fmt -- --check` shows no formatting issues + +--- + +## Testing Strategy + +### Unit Tests: +- `shell_hook_snippet()` produces correct guard conditions +- `detect_shell_rc()` returns correct path for zsh, bash, and None for fish +- `install_shell_hook()` idempotency (marker detection) +- `set_session_env()` function signature and existence + +### Manual Testing Steps: +1. `dual add` in a fresh repo — verify snippet appears in `~/.zshrc` +2. `dual add` in another repo — verify snippet is NOT duplicated +3. `dual launch ` — verify `tmux show-environment -t ` shows all three vars +4. Split pane (`Ctrl+b %`) — run `type npm` — should show function +5. New window (`Ctrl+b c`) — run `type npm` — should show function +6. Type `pnpm dev` in new pane — should run inside container +7. Open a terminal outside tmux — no errors from the snippet +8. Open a non-Dual tmux session — `echo $DUAL_ACTIVE` should be empty + +## Performance Considerations + +- `tmux set-environment` is a local IPC call to the tmux server — negligible overhead (< 1ms per call, 3 calls total) +- Shell RC snippet adds a single `[ -n ... ]` guard check to shell startup — negligible (< 1ms) +- `install_shell_hook()` reads and appends to the RC file — only runs during `dual add`, not on every launch + +## References + +- Research: `thoughts/shared/research/2026-02-16-shell-interception-pane-propagation.md` +- Shell module: `src/shell.rs` +- Tmux backend: `src/tmux_backend.rs` +- Backend trait: `src/backend.rs` +- Launch flow: `src/main.rs:267-460` +- Add flow: `src/main.rs:112-197` diff --git a/thoughts/shared/research/2026-02-16-shell-interception-pane-propagation.md b/thoughts/shared/research/2026-02-16-shell-interception-pane-propagation.md new file mode 100644 index 0000000..bf0fbe8 --- /dev/null +++ b/thoughts/shared/research/2026-02-16-shell-interception-pane-propagation.md @@ -0,0 +1,251 @@ +--- +date: 2026-02-16T08:11:46+08:00 +researcher: jeevan +git_commit: ebfe191 +branch: main +repository: dual +topic: "Shell interception pane propagation: how command routing breaks on new tmux panes/windows" +tags: [research, codebase, shell-interception, tmux, pane-propagation, command-routing] +status: complete +last_updated: 2026-02-16 +last_updated_by: jeevan +--- + +# Research: Shell Interception Pane Propagation + +**Date**: 2026-02-16T08:11:46+08:00 +**Researcher**: jeevan +**Git Commit**: ebfe191 +**Branch**: main +**Repository**: dual + +## Research Question + +In a Dual tmux session, only the auto-created first pane has shell interception active. When users split panes or create new windows, the new shells don't have the command routing functions loaded. How does the current mechanism work and what are the options for propagating interception to all panes? + +## Summary + +The current shell interception mechanism writes an RC file to `~/.dual/rc/{container_name}.sh` and sources it via `tmux send-keys` into the first pane only. New panes/windows created by the user (Ctrl+b %, Ctrl+b ", Ctrl+b c) start fresh shells without the interception functions. The fix is a two-part approach: (1) `tmux set-environment` to set session-level env vars that new panes inherit, and (2) a snippet in the user's shell RC (`~/.bashrc`/`~/.zshrc`) that detects these vars and auto-sources the interception file. + +## Detailed Findings + +### 1. Current Mechanism — How Interception Gets Loaded Today + +**RC file generation** (`src/shell.rs:38-63`): + +`generate_rc()` produces a shell script containing: +- `export DUAL_CONTAINER="{container_name}"` — sets an env var identifying the container +- One shell function per intercepted command (npm, pnpm, node, etc.) +- Each function wraps `docker exec -w /workspace {container_name} {command} "$@"` + +**RC file persistence** (`src/shell.rs:93-108`): + +`write_rc_file()` writes the generated content to `~/.dual/rc/{container_name}.sh`. + +**Initial sourcing** (`src/main.rs:434-450`): + +During `cmd_launch()`, after writing the RC file: +```rust +let rc_path = shell::write_rc_file(&container_name, &hints.extra_commands)?; +// ... +let source_cmd = shell::source_file_command(&rc_path); +backend.create_session(&session_name, &workspace_dir, Some(&source_cmd))?; +``` + +**Session creation with init command** (`src/tmux_backend.rs:32-62`): + +`create_session()` runs `tmux new-session -d -s {name} -c {cwd}`, then sends the source command via `send_keys()`: +```rust +if let Some(cmd) = init_cmd { + self.send_keys(session_name, cmd)?; +} +``` + +This executes `source "~/.dual/rc/dual-repo-branch.sh"` as keystrokes typed into the first pane. + +### 2. The Gap — Why New Panes Don't Have Interception + +When a user creates a new pane or window in the tmux session: +- tmux spawns a fresh shell process (the user's `$SHELL`) +- The shell reads its standard RC files (`~/.bashrc`, `~/.zshrc`) +- **Nothing** in those RC files knows about Dual's interception +- The `DUAL_CONTAINER` env var was set in the first pane's shell process, not in tmux's session environment +- Result: `pnpm dev` in the new pane runs on the **host**, not in the container + +### 3. What Already Exists — `source_command()` and `shell-rc` Subcommand + +`src/shell.rs:87-89` has a function for eval-based sourcing: +```rust +pub fn source_command(container_name: &str) -> String { + format!("eval \"$(dual shell-rc {container_name})\"") +} +``` + +`src/main.rs:676-679` implements the `shell-rc` subcommand: +```rust +fn cmd_shell_rc(container_name: &str) -> i32 { + print!("{}", shell::generate_rc(container_name, &[])); + 0 +} +``` + +These provide a mechanism for users to manually load interception, but nothing auto-triggers them in new panes. + +### 4. tmux Mechanisms Available for Propagation + +#### 4a. `tmux set-environment` (session-level env vars) + +Sets environment variables at the tmux session level. All new panes/windows in that session inherit these variables automatically. + +```bash +tmux set-environment -t dual-repo-branch DUAL_ACTIVE 1 +tmux set-environment -t dual-repo-branch DUAL_RC_PATH ~/.dual/rc/dual-repo-branch.sh +tmux set-environment -t dual-repo-branch DUAL_CONTAINER dual-repo-branch +``` + +- Session-scoped (`-t session-name`) — doesn't affect other tmux sessions +- Inherited by all child processes (new panes, new windows) +- Survives pane creation, window creation, shell restarts within panes + +#### 4b. `tmux set-hook` (event hooks) + +tmux supports hooks for pane/window creation events: + +```bash +tmux set-hook -t dual-repo-branch after-split-window \ + "send-keys -t '#{pane_id}' 'source ~/.dual/rc/dual-repo-branch.sh' Enter" + +tmux set-hook -t dual-repo-branch after-new-window \ + "send-keys -t '#{window_id}.1' 'source ~/.dual/rc/dual-repo-branch.sh' Enter" +``` + +- Visible to user (source command appears in terminal) +- Pollutes shell history +- Race condition if user types before source completes +- Session-scoped (`-t`) so doesn't affect other sessions + +#### 4c. `tmux set-option default-command` + +Sets the command to run instead of the default shell for new panes: + +```bash +tmux set-option -t dual-repo-branch default-command \ + "bash --init-file ~/.dual/rc/dual-repo-branch.sh -i" +``` + +- Shell-specific (bash `--init-file` vs zsh `ZDOTDIR` vs fish `--init-command`) +- May interfere with user's shell configuration +- Session-scoped possible + +### 5. Recommended Approach: `set-environment` + Shell RC Snippet + +**Part 1 — Dual sets session-level env vars** (code change in `tmux_backend.rs`): + +After `create_session()`, set environment variables on the session: +``` +tmux set-environment -t {session_name} DUAL_ACTIVE 1 +tmux set-environment -t {session_name} DUAL_RC_PATH {rc_path} +tmux set-environment -t {session_name} DUAL_CONTAINER {container_name} +``` + +**Part 2 — User adds snippet to shell RC** (one-time setup): + +For `~/.bashrc`: +```bash +# Dual workspace command interception +if [ -n "$DUAL_ACTIVE" ] && [ -n "$DUAL_RC_PATH" ] && [ -f "$DUAL_RC_PATH" ]; then + source "$DUAL_RC_PATH" +fi +``` + +For `~/.zshrc`: +```zsh +# Dual workspace command interception +if [[ -n "$DUAL_ACTIVE" && -n "$DUAL_RC_PATH" && -f "$DUAL_RC_PATH" ]]; then + source "$DUAL_RC_PATH" +fi +``` + +**Part 3 — `dual add` auto-injects the snippet** (optional automation): + +Dual could detect the user's shell and append the snippet to their RC file during `dual add`, similar to how tools like nvm, pyenv, and direnv do it. + +### 6. Why This Approach Works + +| Scenario | Behavior | +|----------|----------| +| First pane (initial launch) | Shell starts → reads `~/.bashrc` → detects `DUAL_ACTIVE` → sources RC → interception active | +| User splits pane (Ctrl+b %) | New shell → reads `~/.bashrc` → detects `DUAL_ACTIVE` → sources RC → interception active | +| User creates window (Ctrl+b c) | New shell → reads `~/.bashrc` → sources RC → interception active | +| User types `bash` or `zsh` | New subshell → reads RC → sources RC → interception active | +| Non-Dual tmux session | `DUAL_ACTIVE` not set → snippet is no-op → zero impact | +| Terminal outside tmux | `DUAL_ACTIVE` not set → snippet is no-op → zero impact | + +### 7. What Changes in the Codebase + +**`src/tmux_backend.rs`** — Add `set-environment` calls after session creation: +- `DUAL_ACTIVE=1` +- `DUAL_RC_PATH={rc_path}` +- `DUAL_CONTAINER={container_name}` + +**`src/shell.rs`** — Add function to generate the shell RC snippet for user setup. + +**`src/main.rs`** — During `dual add` (or a new `dual init` command), detect user's shell and offer to append the snippet to their RC file. + +**The existing `send_keys` mechanism stays** — it handles the first pane on initial launch (before the user has added the snippet). Once the snippet is in place, it's redundant but harmless. + +## Code References + +- `src/shell.rs:38-63` — `generate_rc()` generates interception functions +- `src/shell.rs:46-47` — `export DUAL_CONTAINER` already set in RC content +- `src/shell.rs:87-89` — `source_command()` for eval-based sourcing +- `src/shell.rs:93-108` — `write_rc_file()` writes to `~/.dual/rc/` +- `src/shell.rs:111-113` — `source_file_command()` generates source command +- `src/main.rs:434-450` — Launch pipeline: write RC then create session with source command +- `src/main.rs:676-679` — `cmd_shell_rc()` prints RC content for eval +- `src/tmux_backend.rs:32-62` — `create_session()` sends init_cmd via send_keys +- `src/tmux_backend.rs:125-127` — `send_keys()` implementation +- `src/backend.rs:1-42` — `MultiplexerBackend` trait definition + +## Architecture Documentation + +### Current Interception Chain +``` +dual launch + → shell::write_rc_file() → ~/.dual/rc/dual-repo-branch.sh + → backend.create_session(name, cwd, source_cmd) + → tmux new-session -d -s {name} -c {cwd} + → tmux send-keys -t {name} "source ~/.dual/rc/..." Enter + → ONLY first pane has interception +``` + +### Proposed Interception Chain +``` +dual launch + → shell::write_rc_file() → ~/.dual/rc/dual-repo-branch.sh + → backend.create_session(name, cwd, source_cmd) + → tmux new-session -d -s {name} -c {cwd} + → tmux set-environment -t {name} DUAL_ACTIVE 1 + → tmux set-environment -t {name} DUAL_RC_PATH {rc_path} + → tmux set-environment -t {name} DUAL_CONTAINER {container_name} + → tmux send-keys -t {name} "source ~/.dual/rc/..." Enter (still needed for first pane) + → ALL panes have interception (via user's shell RC snippet) +``` + +## Historical Context (from thoughts/) + +- `thoughts/shared/research/2026-02-05-ARCH-shell-interception.md` — Original validation that shell functions intercept commands. Notes: "Functions must be loaded in each shell session (rc file injection)" — this is the exact gap now being addressed. +- `thoughts/shared/research/2026-02-05-ARCH-shell-interception-transparency.md` — Transparency analysis of what leaks vs stays transparent. +- `thoughts/shared/research/2026-02-13-BUILD-shell.md` — Shell module implementation research. +- `thoughts/shared/research/2026-02-16-port-routing-container-isolation.md` — Documents that shell interception is only active inside the Dual tmux session. + +## Related Research + +- `thoughts/shared/research/2026-02-15-v3-architecture-rethink.md` — Documents the full tmux integration model and edge cases. +- `thoughts/shared/plans/2026-02-15-v3-multiplexer-trait-tui.md` — v3 plan that discusses tmux hooks. + +## Open Questions + +1. Should `dual add` auto-inject the shell RC snippet, or should it just print instructions for the user to copy-paste? +2. Should tmux hooks (`after-split-window`, `after-new-window`) be used as a fallback for users who haven't added the snippet yet? +3. Fish shell uses different syntax — should Dual generate a fish-compatible snippet too? diff --git a/thoughts/shared/research/2026-02-16-web-analysis-devcontainer-adoption.md b/thoughts/shared/research/2026-02-16-web-analysis-devcontainer-adoption.md new file mode 100644 index 0000000..41f9459 --- /dev/null +++ b/thoughts/shared/research/2026-02-16-web-analysis-devcontainer-adoption.md @@ -0,0 +1,297 @@ +--- +date: 2026-02-16T18:00:00+08:00 +researcher: claude-opus-4-6 +topic: "Should Dual adopt devcontainer.json for container configuration?" +tags: [research, web-analysis, devcontainer, container-config, architecture] +status: complete +created_at: 2026-02-16 +confidence: high +sources_count: 14 +--- + +# Web Research: Should Dual Adopt devcontainer.json? + +**Date**: 2026-02-16 +**Topic**: Adopting the devcontainer.json specification vs maintaining `.dual.toml` +**Confidence**: High — based on official spec docs, tool source code, and ecosystem analysis + +## Research Question + +Should Dual adopt the devcontainer.json specification for container configuration instead of assuming base images? What's the right balance between compatibility with the ecosystem and Dual-specific needs (command routing, multi-workspace, port multiplexing)? + +## Executive Summary + +Dual should **read devcontainer.json as a fallback input** for `image`, `build`, `forwardPorts`, `containerEnv`, and `postCreateCommand` — but keep `.dual.toml` as the primary config for Dual-specific concerns. The devcontainer spec is designed for "develop inside container" tools (VS Code, Codespaces), while Dual's "develop on host, run in container" model requires concepts the spec doesn't cover: command routing, multi-workspace isolation, port multiplexing, and shell function generation. + +The practical strategy: `.dual.toml` first, devcontainer.json fallback for the ~5 fields that map cleanly, ignore the ~95 fields that don't apply. This gives Dual zero-config compatibility with repos that already have `.devcontainer/` while keeping the config surface clean for Dual's unique architecture. + +## Key Metrics & Findings + +### 1. Ecosystem Adoption + +**Finding**: devcontainer.json adoption is growing but not universal. Terminal-first tools largely don't consume it. + +- **GitHub repos with `.devcontainer/`**: ~5-15% of popular repos (>1k stars) +- **Highest adoption**: TypeScript/JavaScript ecosystem (15-20%) +- **Lowest adoption**: Ruby, PHP, Java (2-5%) +- **Terminal tools that consume it**: Effectively zero (Dual would be first terminal-first multiplexer) + +**Official supporting tools** ([containers.dev/supporting](https://containers.dev/supporting)): + +| Tool | Type | Support Level | +|------|------|---------------| +| VS Code | Editor | Full | +| Visual Studio | Editor | Partial (C++ CMake only) | +| IntelliJ IDEA | Editor | Partial (early stage) | +| GitHub Codespaces | Service | Full | +| DevPod | Service | Full (client-only, Go impl) | +| Ona (Gitpod) | Service | Full | +| CodeSandbox | Service | Partial (rootless Podman) | +| devcontainer CLI | Tool | Full (reference impl, Node.js) | +| Cachix devenv | Tool | Partial (auto-generates from Nix) | +| Jetify DevBox | Tool | Partial (Nix-based) | + +**Analysis**: Every full implementation is either an IDE or a cloud service. No terminal workspace orchestrator consumes devcontainer.json. Dual would be unique in this space. + +### 2. The Spec Surface Area + +**Finding**: devcontainer.json has 100+ properties. Dual needs ~5-8 of them. + +**Properties Dual would USE** (direct mapping to RepoHints): + +| devcontainer.json | .dual.toml | Notes | +|---|---|---| +| `image` | `image` | Direct 1:1 mapping | +| `build.dockerfile` | `image` | Build from Dockerfile instead of pulling | +| `build.context` | — | Build context path | +| `forwardPorts` | `ports` | Direct 1:1 mapping | +| `containerEnv` | `[env]` | Direct 1:1 mapping | +| `postCreateCommand` | `setup` | Runs after container creation | +| `mounts` | `anonymous_volumes` | Partial overlap (volume isolation) | + +**Properties Dual would IGNORE** (~95% of spec): + +| Category | Why Ignored | +|---|---| +| `customizations.vscode` | IDE-specific, not relevant | +| `remoteUser` / `containerUser` | Dual controls container users | +| `features` | Complex OCI feature system — see analysis below | +| `shutdownAction` | Dual manages container lifecycle | +| `workspaceMount` / `workspaceFolder` | Dual controls mount to `/workspace` | +| `postStartCommand` / `postAttachCommand` | Dual has shell RC init, not these hooks | +| `hostRequirements` | Not Dual's concern | +| `init` / `privileged` / `capAdd` | Container security is Dual's domain | +| `portsAttributes` | Dual's reverse proxy handles port UX | +| `dockerComposeFile` | Multi-container via Compose conflicts with Dual's model | + +### 3. Dev Container Features — Complexity vs Value + +**Finding**: Features are powerful but add significant complexity. For Dual's immediate needs, `postCreateCommand` with `corepack enable` solves the pnpm problem without implementing the full feature system. + +**How features work**: +- OCI artifacts (tarballs) with `install.sh` + `devcontainer-feature.json` +- Distributed via container registries (`ghcr.io/devcontainers/features/*`) +- During build, the CLI generates a Dockerfile that layers features onto the base image +- Each feature becomes a `COPY` + `RUN install.sh` layer +- Options are converted to environment variables for `install.sh` + +**Performance cost** (cold cache): +- Single feature (e.g., node): 45-90 seconds +- Typical stack (common-utils + node + docker): 2-4 minutes +- Pre-built image pull: 30-120 seconds + +**Implementation cost for Dual**: +- Full feature support: ~1000+ LOC, OCI registry client, Dockerfile generation +- Skip features, use `postCreateCommand`: 0 LOC (already have `setup`) +- Recommendation: **Skip features for now**. Repos that need complex setups can use `build.dockerfile` to point to their own Dockerfile. + +### 4. DevPod — The Best Reference Implementation + +**Finding**: DevPod is the most relevant prior art. Written in Go, client-only, reimplements the spec without shelling out to the Node.js CLI. + +- **Source**: [github.com/loft-sh/devpod](https://github.com/loft-sh/devpod) +- **Language**: Go +- **Key package**: `github.com/loft-sh/devpod/pkg/devcontainer` — full devcontainer.json parser +- **Architecture**: Provider-based (Docker, Kubernetes, SSH, cloud) +- **Approach**: Reimplements spec parsing in Go, generates Docker commands directly + +**Relevance to Dual**: DevPod proves you can implement devcontainer.json parsing without the Node.js CLI. Their Go `config` package could be studied for field mapping. However, DevPod's architecture (full development inside container) is fundamentally different from Dual's (host dev, container runtime). + +### 5. Gitpod's Approach — Precedent for Dual Config + +**Finding**: Gitpod (now Ona) maintained their own `.gitpod.yml` for years before adding devcontainer.json support. This validates the "own config first, devcontainer fallback" strategy. + +- **Primary config**: `.gitpod.yml` (Gitpod-specific features: prebuilds, tasks, env management) +- **Fallback**: devcontainer.json read for `image` and `ports` when no `.gitpod.yml` exists +- **Current status**: Now "fully adheres" to devcontainer spec after years of pressure +- **Lesson**: The market eventually pushes toward devcontainer.json compatibility, but tool-specific config remains necessary for unique capabilities + +**Sources**: [Gitpod devcontainer blog post](https://www.gitpod.io/blog/gitpod-supports-development-container), [GitHub issue #7721](https://github.com/gitpod-io/gitpod/issues/7721) + +### 6. The Philosophical Mismatch + +**Finding**: Devcontainer.json assumes "develop inside container." Dual assumes "develop on host, run in container." This is a fundamental architecture difference. + +| Concept | Devcontainers | Dual | +|---|---|---| +| Where you edit code | Inside container | On host | +| Where git runs | Inside container | On host | +| Where runtime runs | Inside container | Inside container | +| Shell environment | Container shell | Host shell with function wrappers | +| Port access | Forward container→host | Reverse proxy with subdomains | +| Multiple branches | Not supported | Core feature | +| Terminal multiplexer | Not integrated | Tightly integrated (tmux/zellij) | +| SSH/credentials | Forwarded into container | Stay on host | + +**Properties Dual NEEDS that devcontainer.json CANNOT express**: +- `extra_commands` — which commands route to container vs host +- `anonymous_volumes` — directory isolation (e.g., `node_modules`) +- `shared.files` — cross-workspace file propagation +- Branch-aware container naming +- Port multiplexing across workspaces + +### 7. Alternatives Considered + +**Nix devshells / Devbox**: +- No containers, no isolation between workspaces +- Doesn't solve port routing +- Steep learning curve (Nix) or limited (Devbox) +- Not relevant to Dual's architecture + +**Docker Compose**: +- Already how devcontainer.json handles multi-container +- Conflicts with Dual's "one container per workspace" model +- Dual already generates Docker commands directly + +**Daytona**: +- Full devcontainer.json support +- Cloud-first, not terminal-first +- Different target audience + +## Trade-off Analysis + +### Option A: Keep `.dual.toml` Only + +| Factor | Impact | Notes | +|---|---|---| +| Implementation cost | None | Already done | +| User friction | Low for new users, medium for repos with existing `.devcontainer/` | Need to create `.dual.toml` manually | +| Ecosystem compat | None | Repos must opt-in to Dual specifically | +| Maintenance burden | Low | Own the format, evolve freely | +| Immediate pnpm fix | Add `setup = "corepack enable && pnpm install"` | Works today | + +### Option B: Read devcontainer.json as Fallback (Recommended) + +| Factor | Impact | Notes | +|---|---|---| +| Implementation cost | ~150-200 LOC | Parse JSON, map 5-8 fields to RepoHints | +| User friction | Lowest | Repos with `.devcontainer/` work automatically | +| Ecosystem compat | Good | ~15% of repos get zero-config support | +| Maintenance burden | Low | Only track ~5 fields, ignore spec changes to others | +| Immediate pnpm fix | Auto-reads `postCreateCommand` from devcontainer.json | If repo has one | + +### Option C: Full devcontainer.json Implementation + +| Factor | Impact | Notes | +|---|---|---| +| Implementation cost | ~1000+ LOC | Features, lifecycle hooks, OCI registry, Dockerfile generation | +| User friction | Lowest for existing devcontainer users | But confusing when 95% of fields don't work | +| Ecosystem compat | High on paper | But Dual's model breaks expectations | +| Maintenance burden | High | Must track spec evolution, handle edge cases | +| Risk | Users expect full compat, get frustrated when features don't work | Worse than not supporting it | + +## Recommendations + +### 1. Implement Option B: devcontainer.json as Fallback Input + +**Rationale**: Maximum compatibility with minimum complexity. The field mapping is trivial: + +```rust +// Proposed load order in config.rs +pub fn load_hints(workspace_dir: &Path) -> Result { + // 1. .dual.toml takes priority (Dual-native config) + if let Ok(hints) = load_dual_toml(workspace_dir) { + return Ok(hints); + } + + // 2. Fall back to .devcontainer/devcontainer.json + if let Ok(hints) = load_devcontainer_json(workspace_dir) { + return Ok(hints); + } + + // 3. Default (node:20) + Ok(RepoHints::default()) +} +``` + +**Fields to map**: + +```rust +struct DevcontainerJson { + image: Option, // → hints.image + build: Option, // → hints.image (from Dockerfile) + forward_ports: Option>, // → hints.ports + container_env: Option>, // → hints.env + post_create_command: Option, // → hints.setup +} +``` + +### 2. Skip Features Support (for now) + +**Rationale**: The pnpm problem is solved by `postCreateCommand: "corepack enable && pnpm install"`. Full feature support (OCI artifact pulling, install.sh execution, Dockerfile generation) is 1000+ LOC for a problem that `setup` already solves. Repos that need complex container setups can use `build.dockerfile` to point to their own Dockerfile. + +### 3. Keep `.dual.toml` as Primary Config + +**Rationale**: Dual's unique concerns (`extra_commands`, `anonymous_volumes`, `shared.files`) have no devcontainer.json equivalent. `.dual.toml` is the right place for Dual-specific configuration. Follow Gitpod's proven pattern: own config for unique features, spec compat for ecosystem alignment. + +### 4. Document the Compatibility Matrix + +**Rationale**: Transparency prevents user frustration. Clearly state which devcontainer.json fields are read and which are ignored. This is better than pretending to support the full spec and surprising users. + +## Implementation Estimate + +**Option B implementation**: +- New struct: `DevcontainerJson` with serde (30 LOC) +- Parser function: `load_devcontainer_json()` (40 LOC) +- Field mapping to `RepoHints` (30 LOC) +- Path resolution (`.devcontainer/devcontainer.json` or root `devcontainer.json`) (20 LOC) +- Tests (50 LOC) +- **Total: ~170 LOC** + +**No new dependencies** — `serde_json` is likely already in the dependency tree via other crates. + +## Open Questions + +1. **Should `dual add` generate `.dual.toml` even if `.devcontainer/` exists?** Probably yes — users need a place for Dual-specific config. Could auto-populate `image` and `ports` from devcontainer.json. + +2. **What about `build.dockerfile`?** Reading `image` is trivial. Supporting `build.dockerfile` means Dual needs to `docker build` instead of `docker create` with a pre-built image. This is a bigger change to the container lifecycle. + +3. **Should we support `postStartCommand` (runs every start) vs `postCreateCommand` (runs once)?** Currently Dual's `setup` runs once. Adding a `start_command` field to `.dual.toml` would be the Dual-native way to handle this. + +4. **When should devcontainer.json be read?** On `dual add` (extract and write to `.dual.toml`) or on every `dual launch` (live read)? Live read is simpler but adds a JSON parse to every launch. + +## Sources + +### Official Documentation +- [Dev Container JSON Reference](https://containers.dev/implementors/json_reference/) — Full property list +- [Dev Container Features Reference](https://containers.dev/implementors/features/) — Feature spec +- [Dev Container Specification](https://containers.dev/implementors/spec/) — Core spec +- [Supporting Tools and Services](https://containers.dev/supporting) — Who implements the spec +- [Dev Container CLI](https://github.com/devcontainers/cli) — Reference implementation (Node.js) + +### Tool Implementations +- [DevPod Source (Go)](https://github.com/loft-sh/devpod) — Best non-VS Code implementation +- [DevPod devcontainer package](https://pkg.go.dev/github.com/loft-sh/devpod/pkg/devcontainer) — Go devcontainer parser +- [Gitpod devcontainer support](https://www.gitpod.io/blog/gitpod-supports-development-container) — Gitpod's adoption story +- [Gitpod devcontainer issue #7721](https://github.com/gitpod-io/gitpod/issues/7721) — Multi-year adoption epic + +### Ecosystem Analysis +- [Daytona Dev Environment Manager](https://www.daytona.io/) — Another devcontainer consumer +- [devcontainer.json: Just for VS Code?](https://devclass.com/2022/06/20/microsofts-devcontainer-json/) — Ecosystem analysis article +- [Dev Container Features Collection](https://github.com/devcontainers/features) — Official features repo + +--- + +**Last Updated**: 2026-02-16 +**Confidence Level**: High — based on official spec documentation, source code analysis, and ecosystem survey +**Next Steps**: Create implementation plan for Option B (devcontainer.json fallback reader)