Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -124,3 +124,13 @@ debug_*
*.swp

.tabnine_root

# Testing and Coverage
.pytest_cache/
.coverage
htmlcov/
coverage.xml
.coverage.*

# Claude Code settings
.claude/*
5,314 changes: 5,314 additions & 0 deletions poetry.lock

Large diffs are not rendered by default.

87 changes: 87 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
[tool.poetry]
name = "transic"
version = "0.0.1"
description = "research project"
authors = ["TRANSIC Developers"]
readme = "README.md"
keywords = ["Robotics", "Reinforcement Learning", "Machine Learning"]
license = "Apache-2.0"
packages = [{include = "transic"}]

[tool.poetry.dependencies]
python = "^3.8"
rl-games = {version = "1.6.1", python = "<3.11"}
gym = "0.23.1"
hydra-core = "*"
h5py = "*"
dm-tree = "*"
einops = "*"
pytorch-lightning = "*"

[tool.poetry.group.test.dependencies]
pytest = "^7.4.0"
pytest-cov = "^4.1.0"
pytest-mock = "^3.11.1"

[tool.poetry.scripts]

[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

[tool.pytest.ini_options]
minversion = "6.0"
addopts = [
"-ra",
"--strict-markers",
"--strict-config",
"--cov=transic",
"--cov-report=term-missing",
"--cov-report=html:htmlcov",
"--cov-report=xml:coverage.xml",
"--cov-fail-under=80"
]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
markers = [
"unit: marks tests as unit tests (deselect with '-m \"not unit\"')",
"integration: marks tests as integration tests (deselect with '-m \"not integration\"')",
"slow: marks tests as slow (deselect with '-m \"not slow\"')"
]

[tool.coverage.run]
source = ["transic"]
omit = [
"*/tests/*",
"*/conftest.py",
"*/__init__.py",
"*/setup.py",
"*/venv/*",
"*/.venv/*",
"*/env/*",
"*/.env/*"
]

[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"if self.debug:",
"if settings.DEBUG",
"raise AssertionError",
"raise NotImplementedError",
"if 0:",
"if __name__ == .__main__.:",
"class .*\\bProtocol\\):",
"@(abc\\.)?abstractmethod"
]
show_missing = true
precision = 2

[tool.coverage.html]
directory = "htmlcov"

[tool.coverage.xml]
output = "coverage.xml"
Empty file added tests/__init__.py
Empty file.
182 changes: 182 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
import pytest
import tempfile
import os
from pathlib import Path
from unittest.mock import Mock, MagicMock
import torch
import numpy as np


@pytest.fixture
def temp_dir():
"""Create a temporary directory for test files."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)


@pytest.fixture
def temp_file(temp_dir):
"""Create a temporary file for testing."""
temp_file = temp_dir / "test_file.txt"
temp_file.write_text("test content")
return temp_file


@pytest.fixture
def mock_config():
"""Mock configuration object for testing."""
config = MagicMock()
config.name = "test_config"
config.version = "1.0.0"
config.debug = False
return config


@pytest.fixture
def sample_tensor():
"""Create a sample torch tensor for testing."""
return torch.randn(4, 4)


@pytest.fixture
def sample_numpy_array():
"""Create a sample numpy array for testing."""
return np.random.randn(4, 4)


@pytest.fixture
def mock_device():
"""Mock torch device for testing."""
return torch.device("cpu")


@pytest.fixture
def sample_pointcloud():
"""Create a sample point cloud for testing."""
return torch.randn(1000, 3) # 1000 points with x, y, z coordinates


@pytest.fixture
def mock_env():
"""Mock environment for RL testing."""
env = MagicMock()
env.observation_space = MagicMock()
env.action_space = MagicMock()
env.reset.return_value = torch.zeros(10)
env.step.return_value = (torch.zeros(10), 0.0, False, {})
return env


@pytest.fixture
def mock_policy():
"""Mock policy for testing."""
policy = MagicMock()
policy.forward.return_value = torch.randn(1, 5)
policy.parameters.return_value = [torch.randn(10, 10)]
return policy


@pytest.fixture
def mock_dataset():
"""Mock dataset for testing."""
dataset = MagicMock()
dataset.__len__.return_value = 100
dataset.__getitem__.return_value = {
'observations': torch.randn(10),
'actions': torch.randn(5),
'rewards': torch.tensor(1.0)
}
return dataset


@pytest.fixture
def mock_dataloader():
"""Mock dataloader for testing."""
dataloader = MagicMock()
sample_batch = {
'observations': torch.randn(32, 10),
'actions': torch.randn(32, 5),
'rewards': torch.randn(32)
}
dataloader.__iter__.return_value = iter([sample_batch])
dataloader.__len__.return_value = 10
return dataloader


@pytest.fixture
def mock_wandb_logger():
"""Mock wandb logger for testing."""
logger = MagicMock()
logger.log_metrics = MagicMock()
logger.log_hyperparams = MagicMock()
return logger


@pytest.fixture
def sample_config_dict():
"""Sample configuration dictionary for testing."""
return {
'model': {
'name': 'test_model',
'layers': [64, 128, 64],
'activation': 'relu'
},
'training': {
'batch_size': 32,
'learning_rate': 0.001,
'epochs': 10
},
'data': {
'train_path': '/path/to/train',
'val_path': '/path/to/val',
'test_path': '/path/to/test'
}
}


@pytest.fixture
def mock_lightning_module():
"""Mock PyTorch Lightning module for testing."""
module = MagicMock()
module.training_step.return_value = torch.tensor(0.5)
module.validation_step.return_value = torch.tensor(0.3)
module.test_step.return_value = torch.tensor(0.2)
return module


@pytest.fixture
def mock_optimizer():
"""Mock optimizer for testing."""
optimizer = MagicMock()
optimizer.zero_grad = MagicMock()
optimizer.step = MagicMock()
optimizer.param_groups = [{'lr': 0.001}]
return optimizer


@pytest.fixture
def sample_trajectory():
"""Sample trajectory data for RL testing."""
return {
'observations': torch.randn(100, 10),
'actions': torch.randn(100, 5),
'rewards': torch.randn(100),
'dones': torch.zeros(100, dtype=torch.bool),
'next_observations': torch.randn(100, 10)
}


@pytest.fixture(autouse=True)
def set_random_seeds():
"""Set random seeds for reproducible testing."""
torch.manual_seed(42)
np.random.seed(42)


@pytest.fixture
def suppress_warnings():
"""Suppress common warnings during testing."""
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
Empty file added tests/integration/__init__.py
Empty file.
99 changes: 99 additions & 0 deletions tests/test_infrastructure_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
import pytest
import torch
import numpy as np
from pathlib import Path


class TestInfrastructureValidation:
"""Test suite to validate that the testing infrastructure is working correctly."""

def test_pytest_is_working(self):
"""Basic test to ensure pytest is functioning."""
assert True

def test_fixtures_are_available(self, temp_dir, sample_tensor, mock_config):
"""Test that our custom fixtures are working."""
assert isinstance(temp_dir, Path)
assert temp_dir.exists()

assert torch.is_tensor(sample_tensor)
assert sample_tensor.shape == (4, 4)

assert mock_config.name == "test_config"

@pytest.mark.unit
def test_unit_marker(self):
"""Test that unit marker is working."""
assert True

@pytest.mark.integration
def test_integration_marker(self):
"""Test that integration marker is working."""
assert True

@pytest.mark.slow
def test_slow_marker(self):
"""Test that slow marker is working."""
import time
time.sleep(0.1) # Simulate slow test
assert True

def test_torch_available(self):
"""Test that PyTorch is available and working."""
x = torch.randn(2, 3)
assert x.shape == (2, 3)

def test_numpy_available(self):
"""Test that NumPy is available and working."""
x = np.random.randn(2, 3)
assert x.shape == (2, 3)

def test_temp_dir_fixture(self, temp_dir):
"""Test that temp_dir fixture works correctly."""
test_file = temp_dir / "test.txt"
test_file.write_text("hello world")

assert test_file.exists()
assert test_file.read_text() == "hello world"

def test_mock_fixtures(self, mock_env, mock_policy, mock_dataset):
"""Test that mock fixtures are properly configured."""
assert mock_env.reset.return_value.shape == (10,)
assert mock_policy.forward.return_value.shape == (1, 5)
assert len(mock_dataset) == 100

def test_sample_data_fixtures(self, sample_pointcloud, sample_trajectory):
"""Test that sample data fixtures provide correct data."""
assert sample_pointcloud.shape == (1000, 3)
assert sample_trajectory['observations'].shape == (100, 10)
assert sample_trajectory['actions'].shape == (100, 5)

def test_random_seeds_are_set(self):
"""Test that random seeds are properly set for reproducibility."""
torch_val1 = torch.randn(1).item()
np_val1 = np.random.randn()

# Reset seeds manually to verify they produce same results
torch.manual_seed(42)
np.random.seed(42)

torch_val2 = torch.randn(1).item()
np_val2 = np.random.randn()

assert torch_val1 == torch_val2
assert np_val1 == np_val2


def test_module_imports():
"""Test that the main transic module can be imported."""
import transic
assert hasattr(transic, '__init__')


def test_coverage_integration():
"""Test that coverage measurement is working."""
def dummy_function():
return "covered"

result = dummy_function()
assert result == "covered"
Empty file added tests/unit/__init__.py
Empty file.