Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -158,3 +158,21 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

# Claude Code settings
.claude/*

# Additional IDE files
.vscode/
*.swp
*.swo
*~

# OS generated files
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
539 changes: 539 additions & 0 deletions poetry.lock

Large diffs are not rendered by default.

80 changes: 80 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
[tool.poetry]
name = "textcraft-research"
version = "0.1.0"
description = "TextCraft research project with adaptive planning for text-based environments"
authors = ["Research Team <research@example.com>"]
readme = "README.md"
packages = [{include = "TextCraft"}]

[tool.poetry.dependencies]
python = "^3.8"
# Core dependencies - install these as needed based on requirements.txt
numpy = "^1.21.0"
requests = "^2.28.0"
pyyaml = "^6.0"

[tool.poetry.group.test.dependencies]
pytest = "^7.4.0"
pytest-cov = "^4.1.0"
pytest-mock = "^3.11.1"

# Poetry scripts for running tests
# Use: poetry run pytest or poetry run test-suite

[tool.pytest.ini_options]
minversion = "6.0"
addopts = [
"-ra",
"--strict-markers",
"--strict-config",
"--cov=TextCraft",
"--cov-report=html:htmlcov",
"--cov-report=xml:coverage.xml",
"--cov-report=term-missing",
"--cov-fail-under=80"
]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
markers = [
"unit: marks tests as unit tests (deselect with '-m \"not unit\"')",
"integration: marks tests as integration tests (deselect with '-m \"not integration\"')",
"slow: marks tests as slow (deselect with '-m \"not slow\"')"
]

[tool.coverage.run]
source = ["TextCraft"]
omit = [
"*/tests/*",
"*/test_*",
"*/__pycache__/*",
"*/.*",
"setup.py",
"*/venv/*",
"*/.venv/*"
]

[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"if self.debug:",
"if settings.DEBUG",
"raise AssertionError",
"raise NotImplementedError",
"if 0:",
"if __name__ == .__main__.:"
]
show_missing = true
precision = 2

[tool.coverage.html]
directory = "htmlcov"

[tool.coverage.xml]
output = "coverage.xml"

[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
1 change: 1 addition & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Test package initialization
138 changes: 138 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
"""
Shared pytest fixtures for the TextCraft test suite.
"""
import os
import tempfile
import shutil
from pathlib import Path
from unittest.mock import Mock, MagicMock
import pytest


@pytest.fixture
def temp_dir():
"""Create a temporary directory for test files."""
temp_path = tempfile.mkdtemp()
yield Path(temp_path)
shutil.rmtree(temp_path)


@pytest.fixture
def temp_file(temp_dir):
"""Create a temporary file in the temp directory."""
file_path = temp_dir / "test_file.txt"
file_path.write_text("test content")
return file_path


@pytest.fixture
def mock_openai_client():
"""Mock OpenAI client for testing."""
mock_client = Mock()
mock_response = Mock()
mock_response.choices = [Mock()]
mock_response.choices[0].message.content = "Test response"
mock_client.chat.completions.create.return_value = mock_response
return mock_client


@pytest.fixture
def mock_environment():
"""Mock environment configuration."""
return {
'api_key': 'test-api-key',
'model': 'gpt-3.5-turbo',
'temperature': 0.7,
'max_tokens': 150
}


@pytest.fixture
def sample_crafting_tree():
"""Sample crafting tree data for testing."""
return {
'nodes': [
{'id': 'wood', 'type': 'resource', 'available': True},
{'id': 'stick', 'type': 'craft', 'recipe': ['wood'], 'available': False}
],
'edges': [
{'from': 'wood', 'to': 'stick', 'quantity': 2}
]
}


@pytest.fixture
def mock_alfworld_env():
"""Mock AlfWorld environment for testing."""
mock_env = MagicMock()
mock_env.reset.return_value = ("Initial observation", {})
mock_env.step.return_value = ("Step observation", 0, False, {})
return mock_env


@pytest.fixture
def mock_textcraft_env():
"""Mock TextCraft environment for testing."""
mock_env = MagicMock()
mock_env.reset.return_value = "Initial state"
mock_env.step.return_value = ("New state", 0, False, {})
return mock_env


@pytest.fixture
def sample_prompt_data():
"""Sample prompt data for testing."""
return {
'system': 'You are a helpful assistant for text-based games.',
'examples': [
{'input': 'look around', 'output': 'You see a room with furniture.'},
{'input': 'take key', 'output': 'You pick up the key.'}
]
}


@pytest.fixture
def mock_file_system(tmp_path, monkeypatch):
"""Mock file system operations."""
monkeypatch.chdir(tmp_path)
return tmp_path


@pytest.fixture(autouse=True)
def clean_environment():
"""Clean environment variables before and after tests."""
# Store original values
original_env = os.environ.copy()

yield

# Restore original environment
os.environ.clear()
os.environ.update(original_env)


@pytest.fixture
def mock_json_response():
"""Mock JSON response for API testing."""
return {
'status': 'success',
'data': {
'action': 'move north',
'confidence': 0.95,
'reasoning': 'The description suggests a door to the north.'
}
}


# Pytest configuration hooks
def pytest_configure(config):
"""Configure pytest with custom markers."""
config.addinivalue_line(
"markers", "unit: mark test as a unit test"
)
config.addinivalue_line(
"markers", "integration: mark test as an integration test"
)
config.addinivalue_line(
"markers", "slow: mark test as slow running"
)
1 change: 1 addition & 0 deletions tests/integration/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Integration tests package initialization
96 changes: 96 additions & 0 deletions tests/test_setup_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
"""
Validation tests to ensure the testing infrastructure is set up correctly.
"""
import pytest
import sys
from pathlib import Path


class TestInfrastructureSetup:
"""Test that the testing infrastructure is properly configured."""

def test_pytest_is_working(self):
"""Verify that pytest is functioning correctly."""
assert True

def test_python_path_includes_project_root(self):
"""Verify that the project root is in the Python path."""
project_root = Path(__file__).parent.parent
assert str(project_root) in sys.path or any(
project_root.samefile(Path(p)) for p in sys.path if Path(p).exists()
)

@pytest.mark.unit
def test_unit_marker_works(self):
"""Test that the unit test marker is working."""
assert True

@pytest.mark.integration
def test_integration_marker_works(self):
"""Test that the integration test marker is working."""
assert True

@pytest.mark.slow
def test_slow_marker_works(self):
"""Test that the slow test marker is working."""
assert True

def test_temp_dir_fixture(self, temp_dir):
"""Test that the temp_dir fixture works correctly."""
assert temp_dir.exists()
assert temp_dir.is_dir()

def test_temp_file_fixture(self, temp_file):
"""Test that the temp_file fixture works correctly."""
assert temp_file.exists()
assert temp_file.is_file()
assert temp_file.read_text() == "test content"

def test_mock_openai_client_fixture(self, mock_openai_client):
"""Test that the mock OpenAI client fixture works."""
response = mock_openai_client.chat.completions.create()
assert response.choices[0].message.content == "Test response"

def test_mock_environment_fixture(self, mock_environment):
"""Test that the mock environment fixture works."""
assert 'api_key' in mock_environment
assert 'model' in mock_environment
assert mock_environment['model'] == 'gpt-3.5-turbo'

def test_sample_crafting_tree_fixture(self, sample_crafting_tree):
"""Test that the sample crafting tree fixture works."""
assert 'nodes' in sample_crafting_tree
assert 'edges' in sample_crafting_tree
assert len(sample_crafting_tree['nodes']) > 0

def test_textcraft_module_can_be_imported(self):
"""Test that the TextCraft module can be imported."""
try:
import TextCraft
assert True
except ImportError:
# If the module doesn't exist yet, that's okay for this validation
pytest.skip("TextCraft module not yet available for import")

def test_project_structure_exists(self):
"""Test that the expected project structure exists."""
project_root = Path(__file__).parent.parent

# Check for main directories
assert (project_root / "TextCraft").exists()
assert (project_root / "tests").exists()
assert (project_root / "tests" / "unit").exists()
assert (project_root / "tests" / "integration").exists()

# Check for configuration files
assert (project_root / "pyproject.toml").exists()

def test_pytest_mock_is_available(self):
"""Test that pytest-mock plugin is available."""
pytest_mock = pytest.importorskip("pytest_mock")
assert pytest_mock is not None

def test_coverage_is_available(self):
"""Test that coverage tools are available."""
coverage = pytest.importorskip("coverage")
assert coverage is not None
1 change: 1 addition & 0 deletions tests/unit/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Unit tests package initialization