diff --git a/.gitignore b/.gitignore index d7941c6..e7bece7 100644 --- a/.gitignore +++ b/.gitignore @@ -25,4 +25,7 @@ Cargo.lock /target .envrc -.testenv \ No newline at end of file +.testenv + +#VSCode +.vscode diff --git a/Cargo.toml b/Cargo.toml index a82eae8..dd4ece0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nebulous" -version = "0.1.90" +version = "0.1.91" edition = "2021" description = "A globally distributed container orchestrator" license = "MIT" diff --git a/Dockerfile b/Dockerfile index e29f90e..d205ffb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,73 +1,34 @@ -# Build stage FROM rust:1.88-slim-bullseye AS builder -# Install build dependencies RUN apt-get update && apt-get install -y \ pkg-config \ libssl-dev \ - curl \ build-essential \ - protobuf-compiler \ - unzip \ g++ \ - cmake \ - zlib1g-dev \ && rm -rf /var/lib/apt/lists/* -# Install sccache using cargo -RUN cargo install sccache - -# Set up sccache for Rust -ENV RUSTC_WRAPPER=sccache - -# Create a new empty shell project with only Cargo files WORKDIR /usr/src/nebulous +COPY Cargo.toml ./ -COPY Cargo.toml Cargo.lock* ./ +# Pre-build dependencies to cache them +RUN mkdir -p src && echo "fn main() {}" > src/main.rs +RUN cargo build --release || true +RUN rm -rf src -# Create a dummy main.rs to build dependencies -RUN mkdir src && \ - echo "fn main() {}" > src/main.rs && \ - echo "pub fn lib() {}" > src/lib.rs +COPY ./deploy ./deploy +COPY ./src ./src RUN cargo build --release -# Remove the dummy files and copy actual source code -RUN rm -rf src -COPY . . - -# Build with release profile (this will reuse the cached dependencies) -RUN cargo build --release -# Tools stage - install runtime tools -FROM debian:bullseye-slim AS tools +FROM debian:bullseye-slim AS binary-only -# Install runtime dependencies and tools in a single layer -RUN apt-get update && apt-get install -y \ - ca-certificates \ - curl \ - unzip \ - openssh-client \ - gnupg \ - && rm -rf /var/lib/apt/lists/* +COPY --from=builder /usr/src/nebulous/target/release/nebulous /usr/local/bin/nebulous -# Install rclone, AWS CLI, and Tailscale in parallel -RUN curl -fsSL https://rclone.org/install.sh | bash && \ - curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ - unzip awscliv2.zip && \ - ./aws/install && \ - rm -rf awscliv2.zip aws && \ - curl -fsSL https://pkgs.tailscale.com/stable/debian/bullseye.noarmor.gpg | tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null && \ - curl -fsSL https://pkgs.tailscale.com/stable/debian/bullseye.tailscale-keyring.list | tee /etc/apt/sources.list.d/tailscale.list && \ - apt-get update && apt-get install -y tailscale && \ - rm -rf /var/lib/apt/lists/* +RUN ln -s /usr/local/bin/nebulous /usr/local/bin/nebu -# Runtime stage -FROM debian:bullseye-slim -# Copy tools from tools stage -COPY --from=tools /usr/bin/rclone /usr/bin/rclone -COPY --from=tools /usr/local/bin/aws /usr/local/bin/aws +FROM binary-only AS binary-and-tools # Install runtime dependencies including Tailscale RUN apt-get update && apt-get install -y \ @@ -80,23 +41,12 @@ RUN apt-get update && apt-get install -y \ && apt-get update && apt-get install -y tailscale \ && rm -rf /var/lib/apt/lists/* -# Copy the binary from builder -COPY --from=builder /usr/src/nebulous/target/release/nebulous /usr/local/bin/nebulous - -# Create a symlink for the 'nebu' command to point to 'nebulous' -RUN ln -s /usr/local/bin/nebulous /usr/local/bin/nebu - -# Create directory for SQLite database RUN mkdir -p /data WORKDIR /data - -# Set environment variables ENV RUST_LOG=info -# Expose the default port EXPOSE 3000 -# Run the binary CMD ["sh", "-c", "tailscaled --state=/data/tailscaled.state & \ sleep 5 && \ tailscale up --authkey=$TS_AUTHKEY --login-server=${TS_LOGINSERVER:-'https://login.tailscale.com'} --hostname=nebu && \ diff --git a/Dockerfile.simple b/Dockerfile.simple new file mode 100644 index 0000000..7e836c0 --- /dev/null +++ b/Dockerfile.simple @@ -0,0 +1,41 @@ +FROM rust:1.88-slim-bullseye AS builder + +RUN apt-get update && apt-get install -y \ + pkg-config \ + libssl-dev \ + build-essential \ + g++ \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /usr/src/nebulous +COPY ./Cargo.toml ./Cargo.lock ./ + +# Pre-build dependencies to cache them +RUN mkdir -p src && echo "fn main() {}" > src/main.rs +RUN cargo build --release || true +RUN rm -rf src + +COPY ./src ./src + +RUN cargo build --release + + +FROM debian:bullseye-slim + +RUN apt-get update && apt-get install -y \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +# COPY --from=builder /usr/src/nebulous/target/debug/nebulous /usr/local/bin/nebulous +COPY --from=builder /usr/src/nebulous/target/release/nebulous /usr/local/bin/nebulous + +RUN ln -s /usr/local/bin/nebulous /usr/local/bin/nebu + +ENV RUST_LOG=info +ENV NEBU_BUCKET_NAME=nebulous +ENV NEBU_BUCKET_REGION=us-east-1 +ENV NEBU_ROOT_OWNER=me +ENV TS_APIKEY=dummy +ENV TS_TAILNET=dummy + +CMD ["nebulous", "--version"] \ No newline at end of file diff --git a/README.md b/README.md index 1fdae07..ab6250c 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,15 @@ export AWS_ACCESS_KEY_ID=... export AWS_SECRET_ACCESS_KEY=... ``` +Export the bucket related environment variables (dummies work) +```sh +export NEBU_BUCKET_NAME="XXX" +export NEBU_BUCKET_REGION="XXX" +export NEBU_ROOT_OWNER="XXX" +``` + Run a local API server on docker + ```sh neb serve --docker ``` diff --git a/cloudbuild.yaml b/cloudbuild.yaml index 2b1ba07..5feda6d 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -9,16 +9,26 @@ steps: docker buildx inspect --bootstrap - name: "gcr.io/cloud-builders/docker" - id: "Build and Push" + waitFor: ["Setup Buildx"] + id: "Build Images" entrypoint: "bash" args: - "-c" - | - # Prepare tag list - TAGS="-t us-docker.pkg.dev/$PROJECT_ID/nebulous/server:$SHORT_SHA -t us-docker.pkg.dev/$PROJECT_ID/nebulous/server:$BRANCH_NAME" + # Initialize tag variables + TAGS="-t us-docker.pkg.dev/$PROJECT_ID/nebulous/server:$SHORT_SHA" + + # Add branch name tag if BRANCH_NAME is not empty + if [ -n "$BRANCH_NAME" ]; then + echo "Detected branch: $BRANCH_NAME. Adding branch tag." + DOCKER_TAG=$(echo "$BRANCH_NAME" | sed 's/[^a-z0-9._-]/-/g' | sed 's/^[.-]//' | sed 's/[.-]$//') + echo "Transformed branch $BRANCH_NAME to Docker tag: $$DOCKER_TAG" + TAGS="$$TAGS -t us-docker.pkg.dev/$PROJECT_ID/nebulous/server:$$DOCKER_TAG" + fi # Add latest tag if on main branch if [ "$BRANCH_NAME" == "main" ]; then + echo "Detected main branch. Adding latest tag." TAGS="$$TAGS -t us-docker.pkg.dev/$PROJECT_ID/nebulous/server:latest" fi @@ -28,10 +38,28 @@ steps: TAGS="$$TAGS -t us-docker.pkg.dev/$PROJECT_ID/nebulous/server:$TAG_NAME" fi - # Build and push all tags in one operation + # Cache args + CACHE_FROM="--cache-from=type=registry,ref=us-docker.pkg.dev/$PROJECT_ID/nebulous/cache:buildcache" + CACHE_TO="--cache-to=type=registry,ref=us-docker.pkg.dev/$PROJECT_ID/nebulous/cache:buildcache,mode=max" + + # Build (binary-only) slim-image + # This img serves as base layer for the full image and cached + docker buildx build \ + --platform linux/amd64 \ + --target binary-only \ + $$TAGS \ + $$CACHE_FROM \ + $$CACHE_TO \ + --push \ + . + + # Build full image with tools on top of the binary-only image docker buildx build \ --platform linux/amd64 \ + --target binary-and-tools \ $$TAGS \ + $$CACHE_FROM \ + $$CACHE_TO \ --push \ . diff --git a/deploy/docker/docker-compose.yaml b/deploy/docker/docker-compose.yaml new file mode 100644 index 0000000..80c1f56 --- /dev/null +++ b/deploy/docker/docker-compose.yaml @@ -0,0 +1,69 @@ +services: + postgres: + image: postgres:17 + environment: + POSTGRES_PASSWORD: postgres + ports: + - "5432:5432" + restart: unless-stopped + + redis: + image: redis:8 + restart: unless-stopped + network_mode: service:tailscale + + nebu: + # image: us-docker.pkg.dev/agentsea-dev/nebulous/server:${NEBU_VERSION:-latest} + # image: us-docker.pkg.dev/agentsea-dev/nebulous/server:latest + image: us-docker.pkg.dev/agentsea-dev/nebulous/server:c2e0e00-binary-only + command: ["sh", "-c", "exec nebu serve --host 0.0.0.0 --port 3000"] + environment: + DATABASE_HOST: postgres + DATABASE_PORT: 5342 + DATABASE_USER: postgres + DATABASE_PASSWORD: postgres + DATABASE_URL: postgres://postgres:postgres@postgres:5432/postgres + REDIS_HOST: localhost + REDIS_PORT: 6379 + RUST_LOG: $RUST_LOG + NEBU_BUCKET_NAME: $NEBU_BUCKET_NAME + NEBU_BUCKET_REGION: $NEBU_BUCKET_REGION + NEBU_ROOT_OWNER: $NEBU_ROOT_OWNER + NEBU_PUBLISH_URL: $NEBU_PUBLISH_URL + TS_APIKEY: $TS_APIKEY + TS_TAILNET: $TS_TAILNET + TS_LOGIN_SERVER: $TS_LOGIN_SERVER + + network_mode: service:tailscale + depends_on: + - tailscale + - postgres + - redis + + tailscale: + image: tailscale/tailscale:stable + container_name: tailscale + hostname: nebulous + environment: + - TS_STATE_DIR=/var/lib/tailscale + - TS_USERSPACE=false + - TS_AUTHKEY=$TS_AUTHKEY + - TS_EXTRA_ARGS=--login-server $TS_LOGIN_SERVER + volumes: + - nebu-ts-authkey:/var/lib/tailscale + - nebu-ts-sock:/var/run/tailscale + - nebu-tmp:/tmp + devices: + - /dev/net/tun:/dev/net/tun + cap_add: + - NET_ADMIN + restart: unless-stopped + + + +volumes: + sccache: + nebu-ts-authkey: + driver: local + nebu-ts-sock: + nebu-tmp: diff --git a/docker-compose.yml b/docker-compose.yaml similarity index 100% rename from docker-compose.yml rename to docker-compose.yaml diff --git a/src/cli.rs b/src/cli.rs index 740109d..15d45c7 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -58,6 +58,10 @@ pub enum Commands { /// The port to bind the internal auth server to. #[arg(long, default_value_t = 8080)] auth_port: u16, + + /// Run in Docker mode + #[arg(long, default_value_t = false)] + docker: bool, }, /// Proxy services. diff --git a/src/commands/serve_cmd.rs b/src/commands/serve_cmd.rs index b1a9f09..50530f2 100644 --- a/src/commands/serve_cmd.rs +++ b/src/commands/serve_cmd.rs @@ -1,10 +1,15 @@ -// src/commands/serve.rs - +use bollard::container::*; +use bollard::errors::Error as BollardError; +use bollard::models::*; +use bollard::volume::CreateVolumeOptions; +use bollard::Docker; use nebulous::create_app; use nebulous::create_app_state; use nebulous::proxy::server::start_proxy; use nebulous::resources::v1::containers::controller::ContainerController; use nebulous::resources::v1::processors::controller::ProcessorController; +use std::collections::HashMap; +use std::default::Default; use std::error::Error; pub async fn execute( @@ -12,7 +17,17 @@ pub async fn execute( port: u16, internal_auth: bool, auth_port: u16, + docker: bool, ) -> Result<(), Box> { + // Check docker flag FIRST, before any configuration validation + if docker { + println!("Starting Nebulous in Docker mode..."); + // serve_docker_with_bollard(host, port, internal_auth, auth_port).await?; + serve_docker_with_compose(host, port, internal_auth, auth_port).await?; + return Ok(()); + } + + // Only validate configuration if NOT running in docker mode let app_state = create_app_state().await?; let app = create_app(app_state.clone()).await; @@ -63,3 +78,465 @@ pub async fn execute( Ok(()) } + +async fn serve_docker_with_bollard( + host: String, + port: u16, + internal_auth: bool, + auth_port: u16, +) -> Result<(), Box> { + let docker = Docker::connect_with_local_defaults()?; + + // Ensure volumes exist + for name in ["nebu-ts-authkey", "nebu-ts-sock", "nebu-tmp"] { + docker + .create_volume(CreateVolumeOptions { + name, + ..Default::default() + }) + .await + .ok(); + } + + // Ensure containers exist and are started + let ts_authkey = format!( + "TS_AUTHKEY={}", + std::env::var("TS_AUTHKEY").unwrap_or_default() + ); + let ts_extra_args = format!( + "TS_EXTRA_ARGS=--login-server {}", + std::env::var("TS_LOGIN_SERVER").unwrap_or_default() + ); + println!("Test 1"); + ensure_container( + &docker, + "tailscale", + bollard::container::Config { + image: Some("tailscale/tailscale:stable"), + hostname: Some("nebulous"), + env: Some(vec![ + "TS_STATE_DIR=/var/lib/tailscale", + "TS_USERSPACE=false", + &ts_authkey, + &ts_extra_args, + ]), + host_config: Some(HostConfig { + cap_add: Some(vec!["NET_ADMIN".to_string()]), + devices: Some(vec![DeviceMapping { + path_on_host: Some("/dev/net/tun".to_string()), + path_in_container: Some("/dev/net/tun".to_string()), + ..Default::default() + }]), + binds: Some(vec![ + "nebu-ts-authkey:/var/lib/tailscale".to_string(), + "nebu-ts-sock:/var/run/tailscale".to_string(), + "nebu-tmp:/tmp".to_string(), + ]), + restart_policy: Some(RestartPolicy { + name: Some(RestartPolicyNameEnum::UNLESS_STOPPED), + ..Default::default() + }), + ..Default::default() + }), + ..Default::default() + }, + ) + .await?; + println!("Tailscale container created"); + + println!("Creating postgres container"); + ensure_container( + &docker, + "postgres", + bollard::container::Config { + image: Some("postgres:17"), + env: Some(vec!["POSTGRES_PASSWORD=postgres"]), + exposed_ports: Some(HashMap::from([("5432/tcp", HashMap::new())])), + host_config: Some(HostConfig { + port_bindings: Some(HashMap::from([( + "5432/tcp".to_string(), + Some(vec![PortBinding { + host_ip: Some("0.0.0.0".to_string()), + host_port: Some("5432".to_string()), + }]), + )])), + restart_policy: Some(RestartPolicy { + name: Some(RestartPolicyNameEnum::UNLESS_STOPPED), + ..Default::default() + }), + ..Default::default() + }), + ..Default::default() + }, + ) + .await?; + println!("Postgres container created"); + + println!("Creating redis container"); + ensure_container( + &docker, + "redis", + bollard::container::Config { + image: Some("redis:8"), + host_config: Some(HostConfig { + network_mode: Some("container:tailscale".to_string()), + restart_policy: Some(RestartPolicy { + name: Some(RestartPolicyNameEnum::UNLESS_STOPPED), + ..Default::default() + }), + ..Default::default() + }), + ..Default::default() + }, + ) + .await?; + println!("Redis container created"); + + let rust_log = format!( + "RUST_LOG={}", + std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()) + ); + let nebu_bucket_name = format!( + "NEBU_BUCKET_NAME={}", + std::env::var("NEBU_BUCKET_NAME").expect("NEBU_BUCKET_NAME must be set") + ); + let nebu_bucket_region = format!( + "NEBU_BUCKET_REGION={}", + std::env::var("NEBU_BUCKET_REGION").expect("NEBU_BUCKET_REGION must be set") + ); + let nebu_root_owner = format!( + "NEBU_ROOT_OWNER={}", + std::env::var("NEBU_ROOT_OWNER").unwrap_or_else(|_| "nebulous".to_string()) + ); + let nebu_publish_url = format!( + "NEBU_PUBLISH_URL={}", + std::env::var("NEBU_PUBLISH_URL").unwrap_or_else(|_| "http://localhost:3000".to_string()) + ); + let ts_apikey = format!( + "TS_APIKEY={}", + std::env::var("TS_APIKEY").unwrap_or_else(|_| "your-ts-apikey".to_string()) + ); + + let envs = vec![ + "DATABASE_HOST=postgres", + "DATABASE_PORT=5342", + "DATABASE_USER=postgres", + "DATABASE_PASSWORD=postgres", + "DATABASE_URL=postgres://postgres:postgres@postgres:5432/postgres", + "REDIS_HOST=localhost", + "REDIS_PORT=6379", + &ts_apikey, + &rust_log, + &nebu_bucket_name, + &nebu_bucket_region, + &nebu_root_owner, + &nebu_publish_url, + ]; + + ensure_container( + &docker, + "nebu", + bollard::container::Config { + // TODO: Set to version from Cargo.toml + image: Some("us-docker.pkg.dev/agentsea-dev/nebulous/server:c2e0e00-binary-only"), + env: Some(envs), + cmd: Some(vec![ + "sh", + "-c", + "exec nebu serve --host 0.0.0.0 --port 3000", + ]), + host_config: Some(HostConfig { + network_mode: Some("container:tailscale".to_string()), + restart_policy: Some(RestartPolicy { + name: Some(RestartPolicyNameEnum::UNLESS_STOPPED), + ..Default::default() + }), + ..Default::default() + }), + ..Default::default() + }, + ) + .await?; + + println!("All containers are up."); + Ok(()) +} + +async fn ensure_container( + docker: &Docker, + name: &str, + config: bollard::container::Config<&str>, +) -> Result<(), BollardError> { + let containers = docker + .list_containers(Some(ListContainersOptions:: { + all: true, + ..Default::default() + })) + .await?; + + let exists = containers.iter().any(|c| { + c.names + .as_ref() + .map(|names| names.iter().any(|n| n.trim_start_matches('/') == name)) + .unwrap_or(false) + }); + + if !exists { + println!("Creating container: {name}"); + docker + .create_container( + Some(CreateContainerOptions { + name, + ..Default::default() + }), + config, + ) + .await?; + } else { + println!("Container '{name}' already exists."); + } + + let details = docker.inspect_container(name, None).await?; + let running = details + .state + .as_ref() + .and_then(|s| s.running) + .unwrap_or(false); + + if !running { + println!("Starting container: {name}"); + docker + .start_container(name, None::>) + .await?; + println!("Test 2"); + } else { + println!("Container '{name}' is already running."); + } + + Ok(()) +} + +// RAII guard for temporary file cleanup +struct TempFileGuard(std::path::PathBuf); +impl Drop for TempFileGuard { + fn drop(&mut self) { + let _ = std::fs::remove_file(&self.0); + } +} + +struct DockerComposeManager { + child: std::process::Child, + compose_path: String, +} + +impl DockerComposeManager { + fn new(compose_path: String) -> Result> { + + use nebulous::config::SERVER_CONFIG; + + let child = std::process::Command::new("docker") + .args(["compose", "-f", &compose_path, "up", "--build"]) + .env("NEBU_BUCKET_NAME", SERVER_CONFIG.bucket_name.clone()) + .env("NEBU_BUCKET_REGION", SERVER_CONFIG.bucket_region.clone()) + .env("NEBU_ROOT_OWNER", SERVER_CONFIG.root_owner.clone()) + .env("NEBU_PUBLISH_URL", SERVER_CONFIG.publish_url.clone().unwrap_or_default()) + .env("TS_APIKEY", SERVER_CONFIG.vpn.api_key.clone().unwrap_or_default()) + .env("RUST_LOG", "info") + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn()?; + + Ok(Self { child, compose_path }) + } + + fn kill(&mut self) { + let _ = self.child.kill(); + } + + async fn wait(&mut self) -> Result { + self.child.wait() + } + + async fn cleanup(&self) { + println!("Running docker-compose down to clean up..."); + let status = std::process::Command::new("docker") + .args(["compose", "-f", &self.compose_path, "down"]) + .status(); + + match status { + Ok(status) if status.success() => { + println!("Docker stack cleaned up successfully."); + } + Ok(_) => { + println!("Warning: docker-compose down failed."); + } + Err(e) => { + println!("Warning: Failed to run docker-compose down: {}", e); + } + } + } +} + +struct OutputStreamer { + stdout_handle: tokio::task::JoinHandle<()>, + stderr_handle: tokio::task::JoinHandle<()>, +} + +impl OutputStreamer { + fn new(stdout: std::process::ChildStdout, stderr: std::process::ChildStderr) -> Self { + let stdout_handle = tokio::spawn(async move { + use tokio::io::AsyncReadExt; + let mut reader = tokio::io::BufReader::new( + tokio::process::ChildStdout::from_std(stdout).unwrap() + ); + let mut buffer = [0; 1024]; + loop { + match reader.read(&mut buffer).await { + Ok(0) => break, // EOF + Ok(n) => { + print!("{}", String::from_utf8_lossy(&buffer[..n])); + } + Err(_) => break, + } + } + }); + + let stderr_handle = tokio::spawn(async move { + use tokio::io::AsyncReadExt; + let mut reader = tokio::io::BufReader::new( + tokio::process::ChildStderr::from_std(stderr).unwrap() + ); + let mut buffer = [0; 1024]; + loop { + match reader.read(&mut buffer).await { + Ok(0) => break, // EOF + Ok(n) => { + eprint!("{}", String::from_utf8_lossy(&buffer[..n])); + } + Err(_) => break, + } + } + }); + + Self { stdout_handle, stderr_handle } + } + + async fn wait_for_completion(self) { + let _ = self.stdout_handle.await; + let _ = self.stderr_handle.await; + } +} + +struct SignalHandler { + shutdown_rx: tokio::sync::broadcast::Receiver<()>, +} + +impl SignalHandler { + fn new() -> (Self, tokio::sync::broadcast::Sender<()>) { + let (shutdown_tx, shutdown_rx) = tokio::sync::broadcast::channel::<()>(1); + + // Spawn signal handler + let shutdown_tx_clone = shutdown_tx.clone(); + tokio::spawn(async move { + tokio::signal::ctrl_c().await.unwrap(); + println!("\nReceived interrupt signal. Cleaning up..."); + let _ = shutdown_tx_clone.send(()); + }); + + (Self { shutdown_rx }, shutdown_tx) + } + + async fn wait_for_shutdown(&mut self) { + let _ = self.shutdown_rx.recv().await; + } +} + +async fn serve_docker_with_compose( + host: String, + port: u16, + _internal_auth: bool, + _auth_port: u16, +) -> Result<(), Box> { + println!("Starting Nebulous in Docker mode..."); + println!("This will use docker-compose to start the full stack with prebuilt images."); + println!("Make sure you have Docker and docker-compose installed."); + println!("Press Ctrl+C to stop and clean up."); + + setup_environment()?; + + let (compose_path, _guard) = create_temp_compose_file()?; + + let (mut signal_handler, _shutdown_tx) = SignalHandler::new(); + let mut docker_manager = DockerComposeManager::new(compose_path.clone())?; + + let stdout = docker_manager.child.stdout.take().unwrap(); + let stderr = docker_manager.child.stderr.take().unwrap(); + let output_streamer = OutputStreamer::new(stdout, stderr); + + let result = tokio::select! { + _ = signal_handler.wait_for_shutdown() => { + println!("Shutting down docker-compose..."); + docker_manager.kill(); + tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; + docker_manager.cleanup().await; + Ok(()) + } + exit_status = async { + output_streamer.wait_for_completion().await; + docker_manager.wait().await + } => { + match exit_status { + Ok(status) if status.success() => { + println!("Docker stack started successfully!"); + println!("Nebulous server should be available at http://{}:{}", host, port); + Ok(()) + } + Ok(_) => { + Err("Docker-compose failed to start properly.".into()) + } + Err(e) => { + Err(format!("Failed to start docker-compose: {}", e).into()) + } + } + } + }; + + result +} + +fn setup_environment() -> Result<(), Box> { + // Set default environment variables for docker mode to prevent validation errors + if std::env::var("NEBU_BUCKET_NAME").is_err() { + std::env::set_var("NEBU_BUCKET_NAME", "nebulous"); + } + if std::env::var("NEBU_BUCKET_REGION").is_err() { + std::env::set_var("NEBU_BUCKET_REGION", "us-east-1"); + } + if std::env::var("NEBU_ROOT_OWNER").is_err() { + std::env::set_var("NEBU_ROOT_OWNER", "me"); + } + + // Set the version for the prebuilt image + let version = env!("CARGO_PKG_VERSION"); + std::env::set_var("NEBU_VERSION", version); + println!("Using Nebulous version: {}", version); + + Ok(()) +} + +fn create_temp_compose_file() -> Result<(String, TempFileGuard), Box> { + // Embed docker-compose file in binary + const DOCKER_COMPOSE_CONTENT: &str = include_str!("../../deploy/docker/docker-compose.yaml"); + + // Create a temporary docker-compose file + let temp_dir = std::env::temp_dir(); + let compose_path = temp_dir.join("docker-compose.yaml"); + + // Write the embedded content to the temporary file + std::fs::write(&compose_path, DOCKER_COMPOSE_CONTENT)?; + + let docker_compose_path = compose_path.to_str().unwrap().to_string(); + let guard = TempFileGuard(compose_path); + + Ok((docker_compose_path, guard)) +} diff --git a/src/config.rs b/src/config.rs index 31568da..91267a9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -349,5 +349,6 @@ impl ServerConfig { } } } + // Global static CONFIG instance pub static SERVER_CONFIG: Lazy = Lazy::new(ServerConfig::new); diff --git a/src/main.rs b/src/main.rs index d088301..86f430c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -35,8 +35,9 @@ async fn main() -> Result<(), Box> { port, internal_auth, auth_port, + docker, } => { - commands::serve_cmd::execute(host, port, internal_auth, auth_port).await?; + commands::serve_cmd::execute(host, port, internal_auth, auth_port, docker).await?; } Commands::Sync { command } => match command { SyncCommands::Volumes { diff --git a/src/utils/logging.rs b/src/utils/logging.rs new file mode 100644 index 0000000..16943c6 --- /dev/null +++ b/src/utils/logging.rs @@ -0,0 +1,99 @@ +use tracing::{debug, error, info, warn}; + +/// Structured logging macro for CLI user feedback +#[macro_export] +macro_rules! cli_info { + ($($arg:tt)*) => { + tracing::info!(category = "cli", $($arg)*) + }; +} + +/// Structured logging macro for CLI user feedback with fields +#[macro_export] +macro_rules! cli_info_with_fields { + ($($field:ident = $value:expr),*; $($arg:tt)*) => { + tracing::info!(category = "cli", $($field = $value,)* $($arg)*) + }; +} + +/// Structured logging macro for CLI errors +#[macro_export] +macro_rules! cli_error { + ($($arg:tt)*) => { + tracing::error!(category = "cli", $($arg)*) + }; +} + +/// Structured logging macro for CLI errors with fields +#[macro_export] +macro_rules! cli_error_with_fields { + ($($field:ident = $value:expr),*; $($arg:tt)*) => { + tracing::error!(category = "cli", $($field = $value,)* $($arg)*) + }; +} + +/// Structured logging macro for sync operations +#[macro_export] +macro_rules! sync_info { + ($($arg:tt)*) => { + tracing::info!(category = "sync", $($arg)*) + }; +} + +/// Structured logging macro for sync operations with fields +#[macro_export] +macro_rules! sync_info_with_fields { + ($($field:ident = $value:expr),*; $($arg:tt)*) => { + tracing::info!(category = "sync", $($field = $value,)* $($arg)*) + }; +} + +/// Structured logging macro for container operations +#[macro_export] +macro_rules! container_info { + ($($arg:tt)*) => { + tracing::info!(category = "container", $($arg)*) + }; +} + +/// Structured logging macro for container operations with fields +#[macro_export] +macro_rules! container_info_with_fields { + ($($field:ident = $value:expr),*; $($arg:tt)*) => { + tracing::info!(category = "container", $($field = $value,)* $($arg)*) + }; +} + +/// Structured logging macro for database operations +#[macro_export] +macro_rules! db_info { + ($($arg:tt)*) => { + tracing::info!(category = "database", $($arg)*) + }; +} + +/// Structured logging macro for server operations +#[macro_export] +macro_rules! server_info { + ($($arg:tt)*) => { + tracing::info!(category = "server", $($arg)*) + }; +} + +/// Structured logging macro for server operations with fields +#[macro_export] +macro_rules! server_info_with_fields { + ($($field:ident = $value:expr),*; $($arg:tt)*) => { + tracing::info!(category = "server", $($field = $value,)* $($arg)*) + }; +} + +/// Helper function to convert println! style output to structured logging +pub fn log_stdout(message: &str) { + tracing::info!(category = "stdout", message = %message); +} + +/// Helper function to convert eprintln! style output to structured logging +pub fn log_stderr(message: &str) { + tracing::error!(category = "stderr", message = %message); +} \ No newline at end of file diff --git a/src/vpn/mod.rs b/src/vpn/mod.rs index 2b4049d..449f0db 100644 --- a/src/vpn/mod.rs +++ b/src/vpn/mod.rs @@ -56,10 +56,10 @@ impl VpnConfig { let tailnet = SERVER_CONFIG.vpn.tailnet.as_ref(); if api_key.is_none() { - return Err("Tailscale requires VPN_API_KEY environment variable".to_string()); + return Err("Tailscale requires TS_APIKEY environment variable".to_string()); } if tailnet.is_none() { - return Err("Tailscale requires VPN_TAILNET environment variable".to_string()); + return Err("Tailscale requires TS_TAILNET environment variable".to_string()); } } VpnProvider::Headscale => {