diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6932310 --- /dev/null +++ b/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +Pipfile.lock + +# PEP 582 +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# IDE files +.vscode/ +.idea/ +*.swp +*.swo +*~ +.project +.pydevproject +.settings/ + +# OS files +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db + +# Claude specific +.claude/* + +# Project specific +*.h5 +*.pkl +*.npy +model/ +models/ +workspace/ + +# Keep poetry.lock +!poetry.lock \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..0ee0e9e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,86 @@ +[tool.poetry] +name = "faceswap" +version = "0.1.0" +description = "Deepfakes Software For All" +authors = ["Faceswap Team"] +readme = "README.md" +packages = [{include = "lib"}, {include = "plugins"}, {include = "scripts"}] + +[tool.poetry.dependencies] +python = "^3.7" +pathlib = "1.0.1" +scandir = "1.6" +h5py = "2.7.1" +keras = "2.1.2" +opencv-python = ">=3.3.0.10" +tensorflow = ">=1.4.1" +scikit-image = "*" +dlib = "*" +face-recognition = "*" +tqdm = "*" + +[tool.poetry.group.dev.dependencies] +pytest = "^7.4.0" +pytest-cov = "^4.1.0" +pytest-mock = "^3.11.1" + +[tool.poetry.scripts] +test = "pytest:main" +tests = "pytest:main" + +[tool.pytest.ini_options] +minversion = "7.0" +testpaths = ["tests"] +python_files = ["test_*.py", "*_test.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +addopts = [ + "-ra", + "--strict-markers", + "--strict-config", + "--cov=lib", + "--cov=plugins", + "--cov=scripts", + "--cov-branch", + "--cov-report=term-missing:skip-covered", + "--cov-report=html", + "--cov-report=xml", + "--cov-fail-under=0", # Set to 0 for infrastructure setup, increase when adding real tests +] +markers = [ + "unit: Unit tests", + "integration: Integration tests", + "slow: Slow running tests", +] + +[tool.coverage.run] +source = ["lib", "plugins", "scripts"] +omit = [ + "*/tests/*", + "*/__pycache__/*", + "*/venv/*", + "*/.venv/*", +] + +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", + "def __repr__", + "raise AssertionError", + "raise NotImplementedError", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] +show_missing = true +skip_covered = false +fail_under = 0 # Set to 0 for infrastructure setup, increase when adding real tests + +[tool.coverage.html] +directory = "htmlcov" + +[tool.coverage.xml] +output = "coverage.xml" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..18ce5bb --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,181 @@ +"""Shared pytest fixtures and configuration for all tests.""" +import os +import sys +import tempfile +import shutil +from pathlib import Path +from unittest.mock import MagicMock + +import pytest +import numpy as np + +try: + import cv2 + CV2_AVAILABLE = True +except ImportError: + CV2_AVAILABLE = False + cv2 = None + +# Add the project root to the Python path +project_root = Path(__file__).parent.parent +sys.path.insert(0, str(project_root)) + + +@pytest.fixture +def temp_dir(): + """Create a temporary directory for test files.""" + temp_path = tempfile.mkdtemp() + yield Path(temp_path) + shutil.rmtree(temp_path) + + +@pytest.fixture +def sample_image(temp_dir): + """Create a sample image for testing.""" + image_path = temp_dir / "test_image.jpg" + # Create a simple 100x100 RGB image + image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8) + if CV2_AVAILABLE: + cv2.imwrite(str(image_path), image) + else: + # Save using numpy if cv2 not available + np.save(str(image_path.with_suffix('.npy')), image) + image_path = image_path.with_suffix('.npy') + return image_path + + +@pytest.fixture +def sample_video(temp_dir): + """Create a sample video for testing.""" + video_path = temp_dir / "test_video.mp4" + if CV2_AVAILABLE: + # Create a simple video with 10 frames + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + out = cv2.VideoWriter(str(video_path), fourcc, 10.0, (100, 100)) + + for _ in range(10): + frame = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8) + out.write(frame) + + out.release() + else: + # Create empty file if cv2 not available + video_path.touch() + return video_path + + +@pytest.fixture +def mock_face_detector(): + """Mock face detector for testing.""" + detector = MagicMock() + detector.detect.return_value = [ + { + 'x': 10, + 'y': 10, + 'w': 50, + 'h': 50, + 'landmarks': [(15, 15), (25, 15), (20, 25), (15, 35), (25, 35)] + } + ] + return detector + + +@pytest.fixture +def mock_model(): + """Mock deep learning model for testing.""" + model = MagicMock() + model.predict.return_value = np.random.random((1, 64, 64, 3)) + model.train_on_batch.return_value = [0.1, 0.95] # loss, accuracy + return model + + +@pytest.fixture +def sample_config(): + """Sample configuration dictionary for testing.""" + return { + 'model': 'original', + 'trainer': 'original', + 'converter': 'masked', + 'penalized_mask_loss': True, + 'mask_type': 'facehullandrect', + 'erosion_kernel_size': 0, + 'blur_size': 2, + 'seamless_clone': False, + 'mask_threshold': 0.4, + 'erosion_passes': 1, + 'gpus': 1, + 'batch_size': 64, + 'rotation_range': 10, + 'zoom_range': 0.05, + 'shift_range': 0.05, + 'flip_chance': 0.5, + } + + +@pytest.fixture +def mock_cli_args(): + """Mock command line arguments.""" + class Args: + def __init__(self): + self.input_dir = '/tmp/input' + self.output_dir = '/tmp/output' + self.model_dir = '/tmp/model' + self.trainer = 'original' + self.batch_size = 64 + self.gpus = 1 + self.debug = False + self.verbose = False + self.write_image = False + self.preview = False + self.timelapse_input_A = '/tmp/timelapse_a' + self.timelapse_input_B = '/tmp/timelapse_b' + self.timelapse_output = '/tmp/timelapse_out' + + return Args() + + +@pytest.fixture +def sample_training_data(temp_dir): + """Create sample training data structure.""" + data_a_dir = temp_dir / "data_A" + data_b_dir = temp_dir / "data_B" + data_a_dir.mkdir() + data_b_dir.mkdir() + + # Create sample face images + for i in range(5): + for data_dir, prefix in [(data_a_dir, "a"), (data_b_dir, "b")]: + img_path = data_dir / f"{prefix}_face_{i}.jpg" + image = np.random.randint(0, 255, (64, 64, 3), dtype=np.uint8) + if CV2_AVAILABLE: + cv2.imwrite(str(img_path), image) + else: + np.save(str(img_path.with_suffix('.npy')), image) + + return { + 'data_A': str(data_a_dir), + 'data_B': str(data_b_dir) + } + + +@pytest.fixture(autouse=True) +def reset_modules(): + """Reset any global state between tests.""" + # This fixture runs automatically before each test + yield + # Cleanup code here if needed + + +@pytest.fixture +def capture_output(): + """Capture stdout and stderr for testing print statements.""" + import io + from contextlib import redirect_stdout, redirect_stderr + + stdout = io.StringIO() + stderr = io.StringIO() + + def _capture(): + return redirect_stdout(stdout), redirect_stderr(stderr), stdout, stderr + + return _capture \ No newline at end of file diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_infrastructure_validation.py b/tests/test_infrastructure_validation.py new file mode 100644 index 0000000..365f55c --- /dev/null +++ b/tests/test_infrastructure_validation.py @@ -0,0 +1,184 @@ +"""Validation tests to ensure the testing infrastructure is properly set up.""" +import sys +from pathlib import Path + +import pytest +import numpy as np + +try: + import cv2 + CV2_AVAILABLE = True +except ImportError: + CV2_AVAILABLE = False + + +class TestInfrastructureValidation: + """Test class to validate the testing infrastructure setup.""" + + def test_project_imports(self): + """Test that project modules can be imported.""" + # Test importing main modules + import lib + import plugins + import scripts + + # Test specific imports - skip if dependencies missing + try: + from lib import utils + from plugins import PluginLoader + except ImportError: + pytest.skip("Some dependencies not available") + + assert True # If we get here, imports worked + + def test_pytest_markers(self, request): + """Test that custom pytest markers are registered.""" + # Check markers are available in the configuration + marker_names = [marker.name for marker in request.config.iter_markers()] + assert 'unit' in marker_names + assert 'integration' in marker_names + assert 'slow' in marker_names + + @pytest.mark.unit + def test_unit_marker(self): + """Test unit marker works.""" + assert True + + @pytest.mark.integration + def test_integration_marker(self): + """Test integration marker works.""" + assert True + + @pytest.mark.slow + def test_slow_marker(self): + """Test slow marker works.""" + assert True + + def test_fixtures_available(self, temp_dir, sample_image, mock_face_detector, + mock_model, sample_config, mock_cli_args): + """Test that all custom fixtures are available and working.""" + # Test temp_dir fixture + assert temp_dir.exists() + assert temp_dir.is_dir() + + # Test sample_image fixture + assert sample_image.exists() + assert sample_image.suffix in ['.jpg', '.npy'] # Allow both formats + if CV2_AVAILABLE and sample_image.suffix == '.jpg': + image = cv2.imread(str(sample_image)) + assert image is not None + assert image.shape == (100, 100, 3) + + # Test mock_face_detector fixture + faces = mock_face_detector.detect() + assert len(faces) == 1 + assert 'x' in faces[0] + assert 'landmarks' in faces[0] + + # Test mock_model fixture + prediction = mock_model.predict(np.random.random((1, 64, 64, 3))) + assert prediction.shape == (1, 64, 64, 3) + + # Test sample_config fixture + assert 'model' in sample_config + assert sample_config['model'] == 'original' + assert 'batch_size' in sample_config + + # Test mock_cli_args fixture + assert hasattr(mock_cli_args, 'input_dir') + assert hasattr(mock_cli_args, 'batch_size') + assert mock_cli_args.batch_size == 64 + + @pytest.mark.skipif(not CV2_AVAILABLE, reason="OpenCV not available") + def test_sample_video_fixture(self, sample_video): + """Test that sample video fixture creates a valid video.""" + assert sample_video.exists() + assert sample_video.suffix == '.mp4' + + # Test reading the video + cap = cv2.VideoCapture(str(sample_video)) + assert cap.isOpened() + + frame_count = 0 + while True: + ret, frame = cap.read() + if not ret: + break + frame_count += 1 + + cap.release() + assert frame_count == 10 # We created 10 frames + + def test_sample_training_data_fixture(self, sample_training_data): + """Test that sample training data fixture creates proper structure.""" + assert 'data_A' in sample_training_data + assert 'data_B' in sample_training_data + + data_a_path = Path(sample_training_data['data_A']) + data_b_path = Path(sample_training_data['data_B']) + + assert data_a_path.exists() + assert data_b_path.exists() + + # Check that files were created (either .jpg or .npy) + a_files = list(data_a_path.glob('*.jpg')) + list(data_a_path.glob('*.npy')) + b_files = list(data_b_path.glob('*.jpg')) + list(data_b_path.glob('*.npy')) + + assert len(a_files) == 5 + assert len(b_files) == 5 + + def test_capture_output_fixture(self, capture_output): + """Test that output capture fixture works.""" + redirect_out, redirect_err, stdout, stderr = capture_output() + + with redirect_out, redirect_err: + print("Test stdout") + print("Test stderr", file=sys.stderr) + + assert stdout.getvalue() == "Test stdout\n" + assert stderr.getvalue() == "Test stderr\n" + + def test_coverage_configured(self): + """Test that coverage is properly configured.""" + # This test will pass if coverage is running (which it should be + # based on our pytest configuration) + import coverage + assert hasattr(coverage, 'Coverage') + + def test_project_structure(self): + """Test that the project has the expected structure.""" + project_root = Path(__file__).parent.parent + + # Check main directories exist + assert (project_root / 'lib').exists() + assert (project_root / 'plugins').exists() + assert (project_root / 'scripts').exists() + assert (project_root / 'tests').exists() + + # Check test structure + assert (project_root / 'tests' / '__init__.py').exists() + assert (project_root / 'tests' / 'conftest.py').exists() + assert (project_root / 'tests' / 'unit').exists() + assert (project_root / 'tests' / 'integration').exists() + + # Check pyproject.toml exists + assert (project_root / 'pyproject.toml').exists() + + +def test_basic_assertion(): + """Most basic test to ensure pytest runs.""" + assert True + + +def test_numpy_available(): + """Test that numpy is available for tests.""" + arr = np.array([1, 2, 3]) + assert arr.shape == (3,) + + +@pytest.mark.skipif(not CV2_AVAILABLE, reason="OpenCV not available") +def test_opencv_available(): + """Test that OpenCV is available for tests.""" + # Create a simple image + img = np.zeros((10, 10, 3), dtype=np.uint8) + assert img.shape == (10, 10, 3) \ No newline at end of file diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29