Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -135,3 +135,9 @@ dmypy.json
.pyre/
configs
output

# Claude settings
.claude/*

# Poetry lock file should NOT be ignored
# poetry.lock
4,645 changes: 4,645 additions & 0 deletions poetry.lock

Large diffs are not rendered by default.

97 changes: 97 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
[tool.poetry]
name = "video-processing-project"
version = "0.1.0"
description = "A video processing project using diffusion models"
authors = ["Your Name <you@example.com>"]
readme = "README.md"
packages = [{include = "models"}, {include = "utils"}, {include = "stable_lora"}]

[tool.poetry.dependencies]
python = "^3.8"
accelerate = "0.21.0"
torch = "2.0.0"
torchvision = "*"
diffusers = "0.24.0"
transformers = "4.36.2"
einops = "*"
# decord = "*" # Temporarily commented out due to installation issues
tqdm = "*"
safetensors = "*"
omegaconf = "*"
opencv-python = "*"
pydantic = "*"
compel = "*"
easydict = "*"
rotary-embedding-torch = "*"
imageio = {extras = ["ffmpeg"], version = "*"}
gradio = "*"
httpx = {extras = ["socks"], version = "*"}

[tool.poetry.group.dev.dependencies]
pytest = "^7.4.0"
pytest-cov = "^4.1.0"
pytest-mock = "^3.11.0"

[tool.poetry.scripts]
test = "pytest:main"
tests = "pytest:main"

[tool.pytest.ini_options]
minversion = "7.0"
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
addopts = [
"-ra",
"--strict-markers",
"--strict-config",
"--cov=models",
"--cov=utils",
"--cov=stable_lora",
"--cov-branch",
"--cov-report=term-missing:skip-covered",
"--cov-report=html:htmlcov",
"--cov-report=xml:coverage.xml",
"--cov-fail-under=0", # Set to 0 for infrastructure setup, change to 80 when adding actual tests
"-vv",
]
markers = [
"unit: Unit tests",
"integration: Integration tests",
"slow: Slow tests",
]

[tool.coverage.run]
source = ["models", "utils", "stable_lora"]
omit = [
"*/tests/*",
"*/__init__.py",
"*/conftest.py",
"setup.py",
]

[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"def __str__",
"raise AssertionError",
"raise NotImplementedError",
"if __name__ == .__main__.:",
"if TYPE_CHECKING:",
"if typing.TYPE_CHECKING:",
]
show_missing = true
precision = 2
fail_under = 0 # Set to 0 for infrastructure setup, change to 80 when adding actual tests

[tool.coverage.html]
directory = "htmlcov"

[tool.coverage.xml]
output = "coverage.xml"

[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
1 change: 1 addition & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Test package initialization
138 changes: 138 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
"""Shared pytest fixtures and configuration for all tests."""

import os
import shutil
import tempfile
from pathlib import Path
from typing import Generator

import pytest
from omegaconf import DictConfig, OmegaConf


@pytest.fixture
def temp_dir() -> Generator[Path, None, None]:
"""Create a temporary directory for test files."""
temp_path = Path(tempfile.mkdtemp())
yield temp_path
# Cleanup after test
if temp_path.exists():
shutil.rmtree(temp_path)


@pytest.fixture
def mock_config() -> DictConfig:
"""Create a mock configuration for testing."""
config = OmegaConf.create({
"model": {
"name": "test_model",
"batch_size": 1,
"learning_rate": 0.001,
},
"data": {
"train_path": "/tmp/train",
"val_path": "/tmp/val",
"num_workers": 2,
},
"training": {
"epochs": 10,
"save_interval": 100,
"log_interval": 50,
},
})
return config


@pytest.fixture
def sample_image_path(temp_dir: Path) -> Path:
"""Create a sample image file path for testing."""
image_path = temp_dir / "test_image.jpg"
# Create a dummy file to simulate an image
image_path.write_text("dummy image content")
return image_path


@pytest.fixture
def sample_video_path(temp_dir: Path) -> Path:
"""Create a sample video file path for testing."""
video_path = temp_dir / "test_video.mp4"
# Create a dummy file to simulate a video
video_path.write_text("dummy video content")
return video_path


@pytest.fixture
def mock_model_weights(temp_dir: Path) -> Path:
"""Create mock model weights file for testing."""
weights_path = temp_dir / "model_weights.pth"
weights_path.write_text("dummy weights")
return weights_path


@pytest.fixture
def environment_setup(monkeypatch):
"""Set up test environment variables."""
monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "0")
monkeypatch.setenv("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:128")
monkeypatch.setenv("TEST_MODE", "1")
yield
# Environment variables are automatically restored by monkeypatch


@pytest.fixture
def mock_dataset_config() -> dict:
"""Create a mock dataset configuration."""
return {
"dataset_name": "test_dataset",
"image_size": 256,
"video_length": 16,
"fps": 8,
"augmentation": {
"random_flip": True,
"color_jitter": 0.1,
},
}


@pytest.fixture(autouse=True)
def cleanup_cuda_cache():
"""Clean up CUDA cache after each test to prevent memory issues."""
yield
# Only import torch if it's available
try:
import torch
if torch.cuda.is_available():
torch.cuda.empty_cache()
except ImportError:
pass


@pytest.fixture
def mock_training_args() -> dict:
"""Create mock training arguments."""
return {
"output_dir": "/tmp/test_output",
"num_train_epochs": 3,
"per_device_train_batch_size": 4,
"per_device_eval_batch_size": 8,
"warmup_steps": 500,
"weight_decay": 0.01,
"logging_dir": "/tmp/logs",
"save_steps": 1000,
"evaluation_strategy": "steps",
"eval_steps": 500,
}


# Markers for slow tests
def pytest_configure(config):
"""Configure pytest with custom markers."""
config.addinivalue_line(
"markers", "unit: mark test as a unit test"
)
config.addinivalue_line(
"markers", "integration: mark test as an integration test"
)
config.addinivalue_line(
"markers", "slow: mark test as slow running"
)
1 change: 1 addition & 0 deletions tests/integration/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Integration tests package
114 changes: 114 additions & 0 deletions tests/test_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
"""Validation tests to ensure the testing infrastructure is set up correctly."""

import sys
from pathlib import Path

import pytest


class TestInfrastructureValidation:
"""Validate that the testing infrastructure is properly configured."""

@pytest.mark.unit
def test_pytest_is_working(self):
"""Verify that pytest can run a simple test."""
assert True

@pytest.mark.unit
def test_project_structure_exists(self):
"""Verify that the expected project structure exists."""
project_root = Path(__file__).parent.parent

# Check main directories
assert project_root.exists()
assert (project_root / "models").exists()
assert (project_root / "utils").exists()
assert (project_root / "stable_lora").exists()

# Check test directories
assert (project_root / "tests").exists()
assert (project_root / "tests" / "unit").exists()
assert (project_root / "tests" / "integration").exists()
assert (project_root / "tests" / "conftest.py").exists()

@pytest.mark.unit
def test_python_path_includes_project_root(self):
"""Verify that the project root is in the Python path."""
project_root = str(Path(__file__).parent.parent)
assert any(project_root in path for path in sys.path)

@pytest.mark.unit
def test_fixtures_are_available(self, temp_dir, mock_config):
"""Verify that conftest fixtures are accessible."""
assert temp_dir.exists()
assert temp_dir.is_dir()

assert mock_config is not None
assert "model" in mock_config
assert "data" in mock_config
assert "training" in mock_config

@pytest.mark.unit
def test_temp_dir_cleanup(self, temp_dir):
"""Verify that temp_dir fixture properly cleans up."""
test_file = temp_dir / "test.txt"
test_file.write_text("test content")
assert test_file.exists()
# Cleanup happens after the test

@pytest.mark.unit
def test_mock_file_fixtures(self, sample_image_path, sample_video_path, mock_model_weights):
"""Verify that file creation fixtures work correctly."""
assert sample_image_path.exists()
assert sample_image_path.suffix == ".jpg"

assert sample_video_path.exists()
assert sample_video_path.suffix == ".mp4"

assert mock_model_weights.exists()
assert mock_model_weights.suffix == ".pth"

@pytest.mark.unit
def test_environment_fixture(self, environment_setup):
"""Verify that environment setup fixture works."""
import os
assert os.environ.get("TEST_MODE") == "1"

@pytest.mark.unit
def test_coverage_is_configured(self):
"""Verify that coverage is properly configured."""
# This test will pass if coverage is running
# The actual verification happens through the coverage report
assert True

@pytest.mark.integration
def test_integration_marker(self):
"""Verify that integration test marker works."""
assert True

@pytest.mark.slow
def test_slow_marker(self):
"""Verify that slow test marker works."""
assert True


class TestPoetryScriptsConfiguration:
"""Validate Poetry script commands."""

@pytest.mark.unit
def test_pyproject_toml_exists(self):
"""Verify that pyproject.toml exists."""
project_root = Path(__file__).parent.parent
pyproject_path = project_root / "pyproject.toml"
assert pyproject_path.exists()

@pytest.mark.unit
def test_pyproject_contains_test_scripts(self):
"""Verify that pyproject.toml contains test scripts."""
project_root = Path(__file__).parent.parent
pyproject_path = project_root / "pyproject.toml"

content = pyproject_path.read_text()
assert "[tool.poetry.scripts]" in content
assert 'test = "pytest:main"' in content
assert 'tests = "pytest:main"' in content
1 change: 1 addition & 0 deletions tests/unit/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Unit tests package