Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 21 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -183,4 +183,24 @@ fitting/

*.data

src/test*.py
src/test*.py

# Claude Code settings
.claude/*

# Testing artifacts (additional to existing coverage entries)
.pytest_cache/
htmlcov/
.coverage
coverage.xml

# IDE files
.vscode/
.idea/
*.swp
*.swo
*~

# OS files
.DS_Store
Thumbs.db
5,406 changes: 5,406 additions & 0 deletions poetry.lock

Large diffs are not rendered by default.

103 changes: 103 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
[tool.poetry]
name = "perm"
version = "0.1.0"
description = "PERM: Parametric Hair Generation and Rendering"
authors = ["Hair Research Team"]
readme = "README.md"
packages = [{include = "src"}]

[tool.poetry.dependencies]
python = ">=3.9,<4.0"
autopep8 = "*"
pylint = "*"
trimesh = {extras = ["all"], version = "*"}
scikit-learn = "*"
attrdict = "*"
natsort = "*"
pandas = "*"
pyexr = "*"
pyyaml = "*"
imageio-ffmpeg = "0.4.8"
pyspng = "*"
psutil = "*"
einops = "*"
tensorboard = "*"
torch-cluster = "*"
nvdiffrast = {git = "https://github.com/NVlabs/nvdiffrast.git", rev = "v0.3.0"}

[tool.poetry.group.dev.dependencies]
pytest = "^7.4.0"
pytest-cov = "^4.1.0"
pytest-mock = "^3.11.1"
numpy = "^1.21.0"

[tool.poetry.scripts]
test = "pytest:main"
tests = "pytest:main"

[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
addopts = [
"--strict-markers",
"--strict-config",
"--verbose",
"--cov=src",
"--cov-report=term-missing",
"--cov-report=html:htmlcov",
"--cov-report=xml:coverage.xml",
"--cov-fail-under=80"
]
markers = [
"unit: marks tests as unit tests (fast)",
"integration: marks tests as integration tests (slower)",
"slow: marks tests as slow tests (very slow)"
]
filterwarnings = [
"error",
"ignore::UserWarning",
"ignore::DeprecationWarning"
]

[tool.coverage.run]
source = ["src"]
omit = [
"*/tests/*",
"*/test_*.py",
"*/*_test.py",
"*/conftest.py",
"*/__pycache__/*",
"*/.*",
"*/venv/*",
"*/virtualenv/*",
"*/env/*"
]

[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"if self.debug:",
"if settings.DEBUG",
"raise AssertionError",
"raise NotImplementedError",
"if 0:",
"if __name__ == .__main__.:",
"class .*\\bProtocol\\):",
"@(abc\\.)?abstractmethod"
]
show_missing = true
precision = 2
fail_under = 80

[tool.coverage.html]
directory = "htmlcov"

[tool.coverage.xml]
output = "coverage.xml"

[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
Empty file added tests/__init__.py
Empty file.
88 changes: 88 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
"""Shared pytest fixtures and configuration for the PERM project."""

import tempfile
import shutil
from pathlib import Path
from typing import Generator, Dict, Any
import pytest
import numpy as np


@pytest.fixture
def temp_dir() -> Generator[Path, None, None]:
"""Create a temporary directory for test files."""
temp_path = Path(tempfile.mkdtemp())
try:
yield temp_path
finally:
shutil.rmtree(temp_path, ignore_errors=True)


@pytest.fixture
def mock_config() -> Dict[str, Any]:
"""Provide a mock configuration for tests."""
return {
"model": {
"latent_dim": 512,
"num_layers": 8,
"hidden_dim": 256
},
"training": {
"batch_size": 4,
"learning_rate": 0.002,
"num_epochs": 100
},
"data": {
"resolution": 512,
"num_samples": 1000
}
}


@pytest.fixture
def sample_hair_data() -> Dict[str, np.ndarray]:
"""Generate sample hair data for testing."""
np.random.seed(42) # For reproducible tests
return {
"strands": np.random.randn(100, 64, 3).astype(np.float32),
"roots": np.random.randn(100, 3).astype(np.float32),
"parameters": np.random.randn(100, 32).astype(np.float32)
}


@pytest.fixture
def mock_model_weights() -> Dict[str, np.ndarray]:
"""Provide mock model weights for testing."""
np.random.seed(123)
return {
"generator.weight": np.random.randn(512, 256).astype(np.float32),
"generator.bias": np.random.randn(512).astype(np.float32),
"discriminator.weight": np.random.randn(256, 512).astype(np.float32),
"discriminator.bias": np.random.randn(256).astype(np.float32)
}


@pytest.fixture
def sample_image_data() -> np.ndarray:
"""Generate sample image data for testing."""
np.random.seed(456)
return np.random.randint(0, 256, (512, 512, 3), dtype=np.uint8)


@pytest.fixture(autouse=True)
def setup_test_environment(monkeypatch):
"""Set up consistent test environment variables."""
monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "") # Disable GPU for tests
monkeypatch.setenv("PYTHONPATH", str(Path(__file__).parent.parent / "src"))


@pytest.fixture
def mock_device():
"""Mock device configuration for testing."""
return "cpu"


@pytest.fixture
def validation_tolerance() -> float:
"""Standard tolerance for numerical comparisons in tests."""
return 1e-6
Empty file added tests/integration/__init__.py
Empty file.
100 changes: 100 additions & 0 deletions tests/test_setup_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
"""Validation tests to ensure the testing infrastructure is working correctly."""

import pytest
import sys
from pathlib import Path
import importlib.util


class TestSetupValidation:
"""Test class to validate the testing infrastructure setup."""

def test_pytest_is_working(self):
"""Test that pytest is functioning correctly."""
assert True

def test_coverage_is_configured(self):
"""Test that coverage configuration is working."""
# This test will show up in coverage reports if configured correctly
result = 2 + 2
assert result == 4

@pytest.mark.unit
def test_unit_marker_works(self, request):
"""Test that the unit test marker is working."""
markers = [marker.name for marker in request.node.iter_markers()]
assert "unit" in markers

@pytest.mark.integration
def test_integration_marker_works(self, request):
"""Test that the integration test marker is working."""
markers = [marker.name for marker in request.node.iter_markers()]
assert "integration" in markers

@pytest.mark.slow
def test_slow_marker_works(self, request):
"""Test that the slow test marker is working."""
markers = [marker.name for marker in request.node.iter_markers()]
assert "slow" in markers

def test_src_directory_importable(self):
"""Test that the src directory is importable."""
src_path = Path(__file__).parent.parent / "src"
assert src_path.exists(), "src directory should exist"

# Test importing main modules
modules_to_test = [
"dnnlib",
"hair",
"models",
"torch_utils",
"training",
"utils"
]

for module_name in modules_to_test:
module_path = src_path / module_name / "__init__.py"
if module_path.exists():
spec = importlib.util.spec_from_file_location(module_name, module_path)
assert spec is not None, f"Could not create spec for {module_name}"

def test_fixtures_are_working(self, temp_dir, mock_config, sample_hair_data):
"""Test that the shared fixtures are working correctly."""
# Test temp_dir fixture
assert temp_dir.exists()
assert temp_dir.is_dir()

# Test mock_config fixture
assert isinstance(mock_config, dict)
assert "model" in mock_config
assert "training" in mock_config
assert "data" in mock_config

# Test sample_hair_data fixture
assert isinstance(sample_hair_data, dict)
assert "strands" in sample_hair_data
assert "roots" in sample_hair_data
assert "parameters" in sample_hair_data

# Verify data shapes are reasonable
assert sample_hair_data["strands"].shape == (100, 64, 3)
assert sample_hair_data["roots"].shape == (100, 3)
assert sample_hair_data["parameters"].shape == (100, 32)

def test_mock_utilities_work(self, mocker):
"""Test that pytest-mock is working correctly."""
# Test basic mocking functionality
mock_function = mocker.Mock(return_value=42)
result = mock_function()
assert result == 42
mock_function.assert_called_once()

def test_parametrized_test(self, mock_device, validation_tolerance):
"""Test parametrized testing with fixtures."""
assert mock_device == "cpu"
assert validation_tolerance == 1e-6

# Test numerical comparison with tolerance
a = 0.1 + 0.2
b = 0.3
assert abs(a - b) < validation_tolerance * 10 # Allow for floating point errors
Empty file added tests/unit/__init__.py
Empty file.