diff --git a/configs/QUICK_REFERENCE.md b/configs/QUICK_REFERENCE.md new file mode 100644 index 00000000..a4a12481 --- /dev/null +++ b/configs/QUICK_REFERENCE.md @@ -0,0 +1,74 @@ +# Quick Reference: Model Configuration + +## Single File vs Directory Download + +### Single File (Default) +```yaml +my-model: + name: "My Model" + url: "https://huggingface.co/user/repo/resolve/main/file.safetensors" + path: "loras/model.safetensors" +``` + +### Directory (Add `is_directory: true`) +```yaml +my-directory: + name: "My Directory" + url: "https://huggingface.co/user/repo/tree/main/folder" + path: "models/folder" + is_directory: true # ← Add this! +``` + +## URL Patterns + +| Download Type | URL Pattern | Example | +|---------------|-------------|---------| +| **Single File** | `/resolve/` | `https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.safetensors` | +| **Directory** | `/tree/` | `https://huggingface.co/h94/IP-Adapter/tree/main/models/image_encoder` | + +## Common Model Paths + +| Model Type | Path Pattern | +|------------|--------------| +| Checkpoints | `checkpoints/SD1.5/` | +| LoRAs | `loras/SD1.5/` | +| ControlNet | `controlnet/` | +| VAE | `vae/` or `vae_approx/` | +| IP-Adapter | `ipadapter/` | +| Text Encoders | `text_encoders/CLIPText/` | +| TensorRT/ONNX | `tensorrt/` | + +## IP-Adapter Example + +```yaml +models: + # Single file - IP-Adapter model + ip-adapter-sd15: + name: "IP Adapter SD15" + url: "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.safetensors" + path: "ipadapter/ip-adapter_sd15.safetensors" + + # Directory - CLIP image encoder + clip-image-encoder: + name: "CLIP Image Encoder" + url: "https://huggingface.co/h94/IP-Adapter/tree/main/models/image_encoder" + path: "ipadapter/models/image_encoder" + is_directory: true +``` + +## Usage + +```bash +# Use a config +python src/comfystream/scripts/setup_models.py --config my-config.yaml + +# Use default config (models.yaml) +python src/comfystream/scripts/setup_models.py +``` + +## See Also + +- [DIRECTORY_DOWNLOADS.md](../DIRECTORY_DOWNLOADS.md) - Detailed directory download guide +- [models-ipadapter-example.yaml](models-ipadapter-example.yaml) - Complete working example +- [README.md](README.md) - Full configuration reference + diff --git a/configs/models-ipadapter.yaml b/configs/models-ipadapter.yaml new file mode 100644 index 00000000..466a149f --- /dev/null +++ b/configs/models-ipadapter.yaml @@ -0,0 +1,44 @@ +models: + # Example: IP-Adapter setup with directory download + + # Single file download (regular) + ip-adapter-plus-sd15: + name: "IP Adapter SD15" + url: "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus_sd15.safetensors" + path: "ipadapter/ip-adapter-plus_sd15.safetensors" + type: "ipadapter" + extra_files: + - url: "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus_sd15.bin" + path: "ipadapter/ip-adapter-plus_sd15.bin" + + clip-image-encoder: + name: "CLIP Image Encoder" + url: "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors" + path: "ipadapter/image_encoder/model.safetensors" + type: "image_encoder" + extra_files: + - url: "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/config.json" + path: "ipadapter/image_encoder/config.json" + + # Base model + sd-turbo: + name: "SD-Turbo" + url: "https://huggingface.co/stabilityai/sd-turbo/resolve/main/sd_turbo.safetensors" + path: "checkpoints/SD1.5/sd_turbo.safetensors" + type: "checkpoint" + + PixelArtRedmond15V-PixelArt-PIXARFK.safetensors: + name: "PixelArtRedmond15V-PixelArt-PIXARFK" + url: "https://huggingface.co/artificialguybr/pixelartredmond-1-5v-pixel-art-loras-for-sd-1-5/resolve/ab43d9e2cf8c9240189f01e9cdc4ca341362500c/PixelArtRedmond15V-PixelArt-PIXARFK.safetensors" + path: "loras/SD1.5/PixelArt.safetensors" + type: "lora" + + # TAESD for fast VAE + taesd: + name: "TAESD" + url: "https://huggingface.co/madebyollin/taesd/resolve/main/taesd_decoder.safetensors" + path: "vae_approx/taesd_decoder.safetensors" + type: "vae_approx" + extra_files: + - url: "https://huggingface.co/madebyollin/taesd/resolve/main/taesd_encoder.safetensors" + path: "vae_approx/taesd_encoder.safetensors" diff --git a/configs/nodes-streamdiffusion.yaml b/configs/nodes-streamdiffusion.yaml new file mode 100644 index 00000000..94fda0d1 --- /dev/null +++ b/configs/nodes-streamdiffusion.yaml @@ -0,0 +1,36 @@ +nodes: + # Minimal node configuration for faster builds + comfyui-tensorrt: + name: "ComfyUI TensorRT" + url: "https://github.com/yondonfu/ComfyUI_TensorRT.git" + branch: "quantization_with_controlnet_fixes" + type: "tensorrt" + dependencies: + - "tensorrt==10.12.0.36" + + comfyui-streamdiffusion: + name: "ComfyUI StreamDiffusion" + url: "https://github.com/muxionlabs/ComfyUI-StreamDiffusion" + branch: "main" + type: "tensorrt" + + comfyui-torch-compile: + name: "ComfyUI Torch Compile" + url: "https://github.com/yondonfu/ComfyUI-Torch-Compile" + type: "tensorrt" + + comfyui_controlnet_aux: + name: "ComfyUI ControlNet Auxiliary" + url: "https://github.com/Fannovel16/comfyui_controlnet_aux" + type: "controlnet" + + comfyui-stream-pack: + name: "ComfyUI Stream Pack" + url: "https://github.com/livepeer/ComfyUI-Stream-Pack" + branch: "main" + type: "utility" + + rgthree-comfy: + name: "rgthree Comfy" + url: "https://github.com/rgthree/rgthree-comfy.git" + type: "utility" diff --git a/docker/Dockerfile.base b/docker/Dockerfile.base index 4172e977..d2ce3fbb 100644 --- a/docker/Dockerfile.base +++ b/docker/Dockerfile.base @@ -1,11 +1,13 @@ ARG BASE_IMAGE=nvidia/cuda:12.8.1-cudnn-devel-ubuntu22.04 \ CONDA_VERSION=latest \ - PYTHON_VERSION=3.12 + PYTHON_VERSION=3.12 \ + NODES_CONFIG=nodes.yaml FROM "${BASE_IMAGE}" ARG CONDA_VERSION \ - PYTHON_VERSION + PYTHON_VERSION \ + NODES_CONFIG ENV DEBIAN_FRONTEND=noninteractive \ TensorRT_ROOT=/opt/TensorRT-10.12.0.36 \ @@ -103,8 +105,8 @@ RUN conda run -n comfystream --no-capture-output --cwd /workspace/comfystream py ARG CACHEBUST=static ENV CACHEBUST=${CACHEBUST} -# Run setup_nodes -RUN conda run -n comfystream --no-capture-output --cwd /workspace/comfystream python src/comfystream/scripts/setup_nodes.py --workspace /workspace/ComfyUI +# Run setup_nodes with custom config if specified +RUN conda run -n comfystream --no-capture-output --cwd /workspace/comfystream python src/comfystream/scripts/setup_nodes.py --workspace /workspace/ComfyUI --config ${NODES_CONFIG} # Setup opencv with CUDA support RUN conda run -n comfystream --no-capture-output --cwd /workspace/comfystream --no-capture-output docker/entrypoint.sh --opencv-cuda diff --git a/docker/README.md b/docker/README.md index ad691ace..cabb2fd4 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,4 +1,4 @@ -# ComfyStream Docker +# ComfyStream Docker Build Configuration This folder contains the Docker files that can be used to run ComfyStream in a containerized fashion or to work on the codebase within a dev container. This README contains the general usage instructions while the [Devcontainer Readme](../.devcontainer/README.md) contains instructions on how to use Comfystream inside a dev container and get quickly started with your development journey. @@ -7,21 +7,48 @@ This folder contains the Docker files that can be used to run ComfyStream in a c - [Dockerfile](Dockerfile) - The main Dockerfile that can be used to run ComfyStream in a containerized fashion. - [Dockerfile.base](Dockerfile.base) - The base Dockerfile that can be used to build the base image for ComfyStream. -## Pre-requisites +## Building with Custom Nodes Configuration -- [Docker](https://docs.docker.com/get-docker/) -- [Nvidia Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) +The base Docker image supports specifying a custom nodes configuration file during build time using the `NODES_CONFIG` build argument. -## Usage +### Usage -### Build the Base Image +#### Default build (uses `nodes.yaml`) +```bash +docker build -t livepeer/comfyui-base -f docker/Dockerfile . +``` -To build the base image, run the following command: +#### Build with custom config from configs directory +```bash +docker build -f docker/Dockerfile.base \ + --build-arg NODES_CONFIG=nodes-streamdiffusion.yaml \ + -t comfyui-base:streamdiffusion . +``` +#### Build with config from absolute path ```bash -docker build -t livepeer/comfyui-base -f docker/Dockerfile.base . +docker build -f docker/Dockerfile.base \ + --build-arg NODES_CONFIG=/path/to/custom-nodes.yaml \ + -t comfyui-base:custom . ``` +### Available Build Arguments + +| Argument | Default | Description | +|----------|---------|-------------| +| `BASE_IMAGE` | `nvidia/cuda:12.8.1-cudnn-devel-ubuntu22.04` | Base CUDA image | +| `CONDA_VERSION` | `latest` | Miniconda version | +| `PYTHON_VERSION` | `3.12` | Python version | +| `NODES_CONFIG` | `nodes.yaml` | Nodes configuration file (filename or path) | +| `CACHEBUST` | `static` | Cache invalidation for node setup | + +### Configuration Files in configs/ + +- **`nodes.yaml`** - Full node configuration (default) +- **`nodes-streamdiffusion.yaml`** - Minimal set of nodes for faster builds + +### Examples + ### Build the Main Image To build the main image, run the following command: diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index e6d44463..c37e8d76 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -138,23 +138,17 @@ if [ "$1" = "--build-engines" ]; then echo "Engines for FasterLivePortrait already exists, skipping..." fi - # Build Engine for StreamDiffusion - if [ ! -f "$TENSORRT_DIR/StreamDiffusion-engines/stabilityai/sd-turbo--lcm_lora-True--tiny_vae-True--max_batch-3--min_batch-3--mode-img2img/unet.engine.opt.onnx" ]; then - cd /workspace/ComfyUI/custom_nodes/ComfyUI-StreamDiffusion - MODELS="stabilityai/sd-turbo KBlueLeaf/kohaku-v2.1" - TIMESTEPS="3" - for model in $MODELS; do - for timestep in $TIMESTEPS; do - echo "Building model=$model with timestep=$timestep" - python build_tensorrt.py \ - --model-id "$model" \ - --timesteps "$timestep" \ - --engine-dir $TENSORRT_DIR/StreamDiffusion-engines - done - done - else - echo "Engine for StreamDiffusion already exists, skipping..." - fi + # Build Engine for StreamDiffusion using trt script and config + ENGINE_SCRIPT="/workspace/ComfyUI/custom_nodes/ComfyUI-StreamDiffusion/scripts/build_tensorrt_engines.py" + CONFIGS=( + "/workspace/ComfyUI/custom_nodes/ComfyUI-StreamDiffusion/configs/sd15_singlecontrol.yaml" + "/workspace/ComfyUI/custom_nodes/ComfyUI-StreamDiffusion/configs/sdturbo_multicontrol.yaml" + ) + cd /workspace/ComfyUI/custom_nodes/ComfyUI-StreamDiffusion/scripts + for ENGINE_CONFIG in "${CONFIGS[@]}"; do + echo "Building StreamDiffusion TensorRT engines using config: $ENGINE_CONFIG" + python "$ENGINE_SCRIPT" --config "$ENGINE_CONFIG" + done shift fi diff --git a/pyproject.toml b/pyproject.toml index d9dfaece..f667845a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ dependencies = [ [project.optional-dependencies] dev = ["pytest", "pytest-cov", "ruff"] server = [ - "pytrickle @ git+https://github.com/livepeer/pytrickle.git@v0.1.5" + "pytrickle @ git+https://github.com/livepeer/pytrickle.git@v0.1.7" ] [project.urls] diff --git a/server/frame_processor.py b/server/frame_processor.py index af927935..3dcb835d 100644 --- a/server/frame_processor.py +++ b/server/frame_processor.py @@ -11,7 +11,10 @@ from comfystream.pipeline import Pipeline from comfystream.pipeline_state import PipelineState -from comfystream.utils import convert_prompt +from comfystream.utils import ( + convert_prompt, + get_default_workflow, +) logger = logging.getLogger(__name__) @@ -151,6 +154,49 @@ async def _forward_text_loop(): except Exception: logger.warning("Failed to set up text monitoring", exc_info=True) + def _set_loading_overlay(self, enabled: bool) -> bool: + """Toggle the StreamProcessor loading overlay if available.""" + processor = self._stream_processor + if not processor: + return False + try: + processor.set_loading_overlay(enabled) + logger.debug("Set loading overlay to %s", enabled) + return True + except Exception: + logger.warning("Failed to update loading overlay state", exc_info=True) + return False + + def _schedule_overlay_reset_on_ingest_enabled(self) -> None: + """Disable the loading overlay after pipeline ingest resumes.""" + if not self.pipeline: + self._set_loading_overlay(False) + return + + if self.pipeline.is_ingest_enabled(): + self._set_loading_overlay(False) + return + + async def _wait_for_ingest_enable(): + try: + while True: + if self._stop_event.is_set(): + break + if not self.pipeline: + break + if self.pipeline.is_ingest_enabled(): + break + await asyncio.sleep(0.05) + except asyncio.CancelledError: + raise + except Exception: + logger.debug("Loading overlay watcher error", exc_info=True) + finally: + self._set_loading_overlay(False) + + task = asyncio.create_task(_wait_for_ingest_enable()) + self._background_tasks.append(task) + async def _stop_text_forwarder(self) -> None: """Stop the background text forwarder task if running.""" task = self._text_forward_task @@ -212,16 +258,27 @@ async def on_stream_start(self, params: Optional[Dict[str, Any]] = None): logger.info("Stream starting") self._reset_stop_event() logger.info(f"Stream start params: {params}") + overlay_managed = False if not self.pipeline: logger.debug("Stream start requested before pipeline initialization") return stream_params = normalize_stream_params(params) + stream_width = stream_params.get("width") + stream_height = stream_params.get("height") + stream_width = int(stream_width) if stream_width is not None else None + stream_height = int(stream_height) if stream_height is not None else None prompt_payload = stream_params.pop("prompts", None) if prompt_payload is None: prompt_payload = stream_params.pop("prompt", None) + if not prompt_payload and not self.pipeline.state_manager.is_initialized(): + logger.info( + "No prompts provided for new stream; applying default workflow for initialization" + ) + prompt_payload = get_default_workflow() + if prompt_payload: try: await self._apply_stream_start_prompt(prompt_payload) @@ -240,6 +297,19 @@ async def on_stream_start(self, params: Optional[Dict[str, Any]] = None): logger.exception("Failed to process stream start parameters") return + overlay_managed = self._set_loading_overlay(True) + + try: + await self.pipeline.ensure_warmup(stream_width, stream_height) + except Exception: + if overlay_managed: + self._set_loading_overlay(False) + logger.exception("Failed to ensure pipeline warmup during stream start") + return + + if overlay_managed: + self._schedule_overlay_reset_on_ingest_enabled() + try: if ( self.pipeline.state != PipelineState.STREAMING @@ -312,6 +382,12 @@ async def process_video_async( if not self.pipeline: return frame + # TODO: Do we really need this here? + await self.pipeline.ensure_warmup() + + if not self.pipeline.state_manager.is_initialized(): + return VideoProcessingResult.WITHHELD + # If pipeline ingestion is paused, withhold frame so pytrickle renders the overlay if not self.pipeline.is_ingest_enabled(): return VideoProcessingResult.WITHHELD @@ -324,18 +400,9 @@ async def process_video_async( # Process through pipeline await self.pipeline.put_video_frame(av_frame) - # Try to get processed frame with short timeout - try: - processed_av_frame = await asyncio.wait_for( - self.pipeline.get_processed_video_frame(), - timeout=self._stream_processor.overlay_config.auto_timeout_seconds, - ) - processed_frame = VideoFrame.from_av_frame_with_timing(processed_av_frame, frame) - return processed_frame - - except asyncio.TimeoutError: - # No frame ready yet - return withheld sentinel to trigger overlay - return VideoProcessingResult.WITHHELD + processed_av_frame = await self.pipeline.get_processed_video_frame() + processed_frame = VideoFrame.from_av_frame_with_timing(processed_av_frame, frame) + return processed_frame except Exception as e: logger.error(f"Video processing failed: {e}") diff --git a/src/comfystream/pipeline.py b/src/comfystream/pipeline.py index cf2302de..9143e591 100644 --- a/src/comfystream/pipeline.py +++ b/src/comfystream/pipeline.py @@ -72,6 +72,10 @@ def __init__( self._initialize_lock = asyncio.Lock() self._ingest_enabled = True self._prompt_update_lock = asyncio.Lock() + self._warmup_lock = asyncio.Lock() + self._warmup_task: Optional[asyncio.Task] = None + self._warmup_completed = False + self._last_warmup_resolution: Optional[tuple[int, int]] = None @property def state(self) -> PipelineState: @@ -155,6 +159,10 @@ async def warmup( await self.state_manager.transition_to(PipelineState.ERROR) raise finally: + if warmup_successful: + self._warmup_completed = True + self._last_warmup_resolution = (self.width, self.height) + if transitioned and warmup_successful: try: await self.state_manager.transition_to(PipelineState.READY) @@ -168,6 +176,63 @@ async def warmup( except Exception: logger.exception("Failed to restore STREAMING state after warmup") + async def ensure_warmup(self, width: Optional[int] = None, height: Optional[int] = None): + """Ensure the pipeline has been warmed up for the given resolution.""" + if width and width > 0: + self.width = int(width) + if height and height > 0: + self.height = int(height) + + if self._warmup_completed and self._last_warmup_resolution: + if (self.width, self.height) != self._last_warmup_resolution: + self._warmup_completed = False + + if self._warmup_completed: + return + + if not self.state_manager.is_initialized(): + logger.debug("Skipping warmup scheduling - pipeline not initialized") + return + + async with self._warmup_lock: + if self._warmup_completed: + return + if not self.state_manager.is_initialized(): + return + if self._warmup_task and not self._warmup_task.done(): + return + + logger.info("Scheduling pipeline warmup for %sx%s", self.width, self.height) + self.disable_ingest() + self._warmup_task = asyncio.create_task(self._run_background_warmup()) + + async def _run_background_warmup(self): + try: + await self.warmup() + except asyncio.CancelledError: + logger.debug("Pipeline warmup task cancelled") + raise + except Exception: + logger.exception("Pipeline warmup failed") + finally: + self.enable_ingest() + self._warmup_task = None + + async def _reset_warmup_state(self): + """Reset warmup bookkeeping and cancel any in-flight warmup tasks.""" + async with self._warmup_lock: + if self._warmup_task and not self._warmup_task.done(): + self._warmup_task.cancel() + try: + await self._warmup_task + except asyncio.CancelledError: + pass + except Exception: + logger.debug("Warmup task raised during cancellation", exc_info=True) + self._warmup_task = None + self._warmup_completed = False + self._last_warmup_resolution = None + async def _run_warmup( self, *, @@ -266,6 +331,8 @@ async def set_prompts( skip_warmup: Skip automatic warmup even if auto_warmup is enabled """ try: + await self._reset_warmup_state() + prompt_list = prompts if isinstance(prompts, list) else [prompts] await self.client.set_prompts(prompt_list) @@ -312,6 +379,8 @@ async def update_prompts( if was_streaming and should_warmup: await self.state_manager.transition_to(PipelineState.READY) + await self._reset_warmup_state() + prompt_list = prompts if isinstance(prompts, list) else [prompts] await self.client.update_prompts(prompt_list) @@ -775,6 +844,7 @@ async def cleanup(self): # Clear cached modalities and I/O capabilities since we're resetting self._cached_modalities = None self._cached_io_capabilities = None + await self._reset_warmup_state() # Clear pipeline queues await self._clear_pipeline_queues() diff --git a/src/comfystream/scripts/README.md b/src/comfystream/scripts/README.md index 1d95d49f..e593a376 100644 --- a/src/comfystream/scripts/README.md +++ b/src/comfystream/scripts/README.md @@ -22,11 +22,23 @@ python src/comfystream/scripts/setup_nodes.py --workspace /path/to/comfyui ``` > The optional flag `--pull-branches` can be used to ensure the latest git changes are pulled for any custom nodes defined with a `branch` in nodes.yaml +#### Using a custom nodes configuration +```bash +python src/comfystream/scripts/setup_nodes.py --workspace /path/to/comfyui --config nodes-streamdiffusion.yaml +``` +> The `--config` flag accepts a filename (searches in `configs/`), relative path, or absolute path to a custom nodes configuration file + ### Download models and compile tensorrt engines ```bash python src/comfystream/scripts/setup_models.py --workspace /path/to/comfyui ``` +#### Using a custom models configuration +```bash +python src/comfystream/scripts/setup_models.py --workspace /path/to/comfyui --config models-minimal.yaml +``` +> The `--config` flag accepts a filename (searches in `configs/`), relative path, or absolute path to a custom models configuration file + ## Configuration Examples ### Custom Nodes (nodes.yaml) @@ -55,6 +67,10 @@ models: type: "checkpoint" ``` +> You can create custom model configurations for different use cases. See `configs/models-minimal.yaml` and `configs/models-pixelart.yaml` for examples. + +**Directory Downloads:** The script now supports downloading entire directories from HuggingFace! Add `is_directory: true` to your config. See `configs/models-ipadapter-example.yaml` for examples or read [DIRECTORY_DOWNLOADS.md](../../../DIRECTORY_DOWNLOADS.md) for the full guide. + ## Directory Structure ```sh diff --git a/src/comfystream/scripts/constraints.txt b/src/comfystream/scripts/constraints.txt index 3de1d200..e240d7ef 100644 --- a/src/comfystream/scripts/constraints.txt +++ b/src/comfystream/scripts/constraints.txt @@ -8,7 +8,8 @@ tensorrt==10.12.0.36 tensorrt-cu12==10.12.0.36 xformers==0.0.32.post2 onnx==1.18.0 -onnxruntime==1.22.0 -onnxruntime-gpu==1.22.0 +onnxruntime>=1.22.0 +onnxruntime-gpu>=1.22.0 onnxmltools==1.14.0 cuda-python<13.0 +huggingface-hub>=0.20.0 diff --git a/src/comfystream/scripts/setup_models.py b/src/comfystream/scripts/setup_models.py index 50a186f4..b607bdd9 100644 --- a/src/comfystream/scripts/setup_models.py +++ b/src/comfystream/scripts/setup_models.py @@ -1,5 +1,6 @@ import argparse import os +import sys from pathlib import Path import requests @@ -7,6 +8,13 @@ from tqdm import tqdm from utils import get_config_path, load_model_config +try: + from huggingface_hub import snapshot_download, hf_hub_download + HF_HUB_AVAILABLE = True +except ImportError: + HF_HUB_AVAILABLE = False + print("Warning: huggingface_hub not installed. Directory downloads from HuggingFace will not be available.") + def parse_args(): parser = argparse.ArgumentParser(description="Setup ComfyUI models") @@ -15,6 +23,9 @@ def parse_args(): default=os.environ.get("COMFY_UI_WORKSPACE", os.path.expanduser("~/comfyui")), help="ComfyUI workspace directory (default: ~/comfyui or $COMFY_UI_WORKSPACE)", ) + parser.add_argument('--config', + default=None, + help='Path to custom models config file (default: configs/models.yaml). Can be a filename (searches in configs/), or an absolute/relative path.') return parser.parse_args() @@ -48,6 +59,29 @@ def download_file(url, destination, description=None): destination.unlink() raise ValueError(f"LFS pointer detected. Failed to download: {url}") +def download_hf_directory(repo_id, subfolder, destination, description=None): + """Download an entire directory from HuggingFace Hub""" + if not HF_HUB_AVAILABLE: + raise RuntimeError("huggingface_hub is required for directory downloads. Install with: pip install huggingface_hub") + + destination = Path(destination) + destination.mkdir(parents=True, exist_ok=True) + + desc = description or f"Downloading {repo_id}/{subfolder}" + print(f"{desc}...") + + try: + # Download the specific subfolder to the destination + snapshot_download( + repo_id=repo_id, + allow_patterns=f"{subfolder}/*", + local_dir=destination.parent, + local_dir_use_symlinks=False + ) + print(f"✓ Downloaded {repo_id}/{subfolder} to {destination}") + except Exception as e: + print(f"❌ Error downloading {repo_id}/{subfolder}: {e}") + raise def setup_model_files(workspace_dir, config_path=None): """Download and setup required model files based on configuration""" @@ -74,8 +108,38 @@ def setup_model_files(workspace_dir, config_path=None): if not full_path.exists(): print(f"Downloading {model_info['name']}...") - download_file(model_info["url"], full_path, f"Downloading {model_info['name']}") - print(f"Downloaded {model_info['name']} to {full_path}") + + # Check if this is a HuggingFace directory download + if model_info.get('is_directory', False): + # Parse HuggingFace URL to extract repo_id and subfolder + # Format: https://huggingface.co/{repo_id}/tree/main/{subfolder} + # Or: https://huggingface.co/{repo_id}/blob/main/{subfolder} + url = model_info['url'] + if 'huggingface.co' in url: + parts = url.split('huggingface.co/')[-1].split('/') + if len(parts) >= 4 and (parts[2] in ['tree', 'blob']): + repo_id = f"{parts[0]}/{parts[1]}" + subfolder = '/'.join(parts[4:]) if len(parts) > 4 else parts[3] + download_hf_directory( + repo_id=repo_id, + subfolder=subfolder, + destination=full_path, + description=f"Downloading {model_info['name']}" + ) + else: + print(f"❌ Invalid HuggingFace URL format: {url}") + continue + else: + print(f"❌ Directory download only supports HuggingFace URLs: {url}") + continue + else: + # Regular file download + download_file( + model_info['url'], + full_path, + f"Downloading {model_info['name']}" + ) + print(f"Downloaded {model_info['name']} to {full_path}") # Handle any extra files (like configs) if "extra_files" in model_info: @@ -112,8 +176,13 @@ def setup_directories(workspace_dir): "checkpoints/SD1.5", "controlnet", "vae", + "vae_approx", "tensorrt", "unet", + "loras/SD1.5", + "ipadapter", + "text_encoders/CLIPText", + "liveportrait_onnx/joyvasa_models", "LLM", ] for dir_name in model_dirs: @@ -124,9 +193,19 @@ def setup_directories(workspace_dir): def setup_models(): args = parse_args() workspace_dir = Path(args.workspace) + + # Resolve config path if provided + config_path = None + if args.config: + config_path = Path(args.config) + # If it's just a filename, look in configs directory + if not config_path.is_absolute() and "/" not in str(config_path): + config_path = Path("configs") / config_path + if not config_path.exists(): + print(f"Error: Config file not found at {config_path}") + sys.exit(1) setup_directories(workspace_dir) setup_model_files(workspace_dir) - setup_models() diff --git a/src/comfystream/scripts/setup_nodes.py b/src/comfystream/scripts/setup_nodes.py index 2aca1077..9bacc61a 100755 --- a/src/comfystream/scripts/setup_nodes.py +++ b/src/comfystream/scripts/setup_nodes.py @@ -21,6 +21,11 @@ def parse_args(): default=False, help="Update existing nodes to their specified branches", ) + parser.add_argument( + "--config", + default=None, + help="Path to custom nodes config file (default: configs/nodes.yaml). Can be a filename (searches in configs/), or an absolute/relative path.", + ) return parser.parse_args() @@ -122,10 +127,21 @@ def install_custom_nodes(workspace_dir, config_path=None, pull_branches=False): def setup_nodes(): args = parse_args() workspace_dir = Path(args.workspace) + + # Resolve config path if provided + config_path = None + if args.config: + config_path = Path(args.config) + # If it's just a filename, look in configs directory + if not config_path.is_absolute() and "/" not in str(config_path): + config_path = Path("configs") / config_path + if not config_path.exists(): + print(f"Error: Config file not found at {config_path}") + sys.exit(1) setup_environment(workspace_dir) setup_directories(workspace_dir) - install_custom_nodes(workspace_dir, pull_branches=args.pull_branches) + install_custom_nodes(workspace_dir, config_path=config_path, pull_branches=args.pull_branches) if __name__ == "__main__":