diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9363579b..51e96c7d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -68,6 +68,7 @@ jobs: name: Checkout Code uses: actions/checkout@v4 with: + fetch-depth:: 0 submodules: true # LFS checkout here somehow causes issues when LFS stuff changes over time. # So I do LFS manually in a step after. @@ -81,9 +82,13 @@ jobs: git lfs install --local git lfs pull - - name: git status - run: | - git status + - &git_status + name: git status + run: git status + + - &git_fsck + name: git fsck + run: git fsck --full - name: Run pre-commit uses: pre-commit/action@v3.0.1 @@ -121,19 +126,22 @@ jobs: pip install --no-cache-dir -e . [ -e ./submodules/IsaacLab/_isaac_sim ] || ln -s /isaac-sim ./submodules/IsaacLab/_isaac_sim - # Run the tests (excluding the gr00t related tests) - - name: Run pytest excluding policy-related tests. First we run all tests without cameras. - run: /isaac-sim/python.sh -m pytest -sv -m "not with_cameras" isaaclab_arena/tests/ --ignore=isaaclab_arena/tests/policy/ + - *git_status + - *git_fsck - - name: Run pytest excluding policy-related tests. Now we run all tests with cameras. - run: /isaac-sim/python.sh -m pytest -sv -m with_cameras isaaclab_arena/tests/ --ignore=isaaclab_arena/tests/policy/ + # # Run the tests (excluding the gr00t related tests) + # - name: Run pytest excluding policy-related tests. First we run all tests without cameras. + # run: /isaac-sim/python.sh -m pytest -sv -m "not with_cameras" isaaclab_arena/tests/ --ignore=isaaclab_arena/tests/policy/ + + # - name: Run pytest excluding policy-related tests. Now we run all tests with cameras. + # run: /isaac-sim/python.sh -m pytest -sv -m with_cameras isaaclab_arena/tests/ --ignore=isaaclab_arena/tests/policy/ test_policy: name: Run policy-related tests with GR00T & cuda12_8 deps runs-on: [gpu] timeout-minutes: 60 - needs: [pre_commit] + # needs: [pre_commit] container: image: nvcr.io/nvstaging/isaac-amr/isaaclab_arena:cuda_gr00t @@ -152,10 +160,51 @@ jobs: - *git_lfs_step - *install_project_step - # Run the policy (GR00T) related tests. - - name: Run policy-related pytest - run: /isaac-sim/python.sh -m pytest -sv isaaclab_arena/tests/policy/ - + - *git_status + - *git_fsck + + # Debug + - name: Debug + run: find isaaclab_arena/tests/test_data/test_gr1_manip_lerobot/ -type f -print0 | xargs -0 sha256sum + # find isaaclab_arena/tests/test_data/test_gr1_manip_lerobot/ -type f -print0 | xargs -0 du -h + # run: find isaaclab_arena/tests/test_data/test_gr1_manip_lerobot/ + + # # Run the policy (GR00T) related tests. + # - name: Run policy-related pytest + # run: | + # sudo apt-get update && sudo apt-get install time + # /usr/bin/time -f 'Peak memory: %M KB' /isaac-sim/python.sh -m pytest -s isaaclab_arena/tests/policy/test_replay_lerobot_action_policy.py + # # run: /isaac-sim/python.sh -m pytest -sv isaaclab_arena/tests/policy/ + + # - name: Run command while tracking system memory + # run: | + # # Start memory sampling in the background + # ( + # peak=0 + # while true; do + # used=$(free -m | awk '/Mem:/ {print $3}') + # if [ "$used" -gt "$peak" ]; then peak=$used; fi + # echo $peak > peak_mem.txt + # sleep 1 + # done + # ) & + # sampler_pid=$! + + # # Run your command + # /isaac-sim/python.sh -m pytest -s isaaclab_arena/tests/policy/test_replay_lerobot_action_policy.py + # exit_code=$? + + # # Stop the sampler + # kill $sampler_pid || true + + # # Print peak system memory used (in MB) + # echo "Peak system memory: $(cat peak_mem.txt) MB" + + # exit $exit_code + + - name: Run test in main process + run: | + /isaac-sim/python.sh isaaclab_arena/examples/policy_runner.py --policy_type replay_lerobot --config_yaml_path isaaclab_arena/tests/test_data/test_gr1_manip_lerobot/test_gr1_manip_replay_action_config.yaml --max_steps 10 --trajectory_index 0 --headless --enable_cameras gr1_open_microwave --object microwave --embodiment gr1_joint build_docs_pre_merge: name: Build the docs (pre-merge) @@ -172,6 +221,9 @@ jobs: - *cleanup_step - *checkout_step + - *git_status + - *git_fsck + # Build docs - name: Build docs run: | @@ -202,6 +254,9 @@ jobs: - *cleanup_step - *checkout_step + - *git_status + - *git_fsck + - name: Install docker run: | apt-get update diff --git a/isaaclab_arena/tests/policy/test_convert_hdf5_to_lerobot.py b/isaaclab_arena/tests/policy/test_convert_hdf5_to_lerobot.py index 0bad8199..b6ea7da3 100644 --- a/isaaclab_arena/tests/policy/test_convert_hdf5_to_lerobot.py +++ b/isaaclab_arena/tests/policy/test_convert_hdf5_to_lerobot.py @@ -13,53 +13,53 @@ from isaaclab_arena_gr00t.data_utils.io_utils import create_config_from_yaml -def test_g1_convert_hdf5_to_lerobot(): - # Load expected data for comparison - expected_g1_parquet = pd.read_parquet( - TestConstants.test_data_dir + "/test_g1_locomanip_lerobot/data/chunk-000/episode_000000.parquet" - ) - g1_ds_config = create_config_from_yaml( - TestConstants.test_data_dir + "/test_g1_locomanip_lerobot/test_g1_locomanip_config.yaml", Gr00tDatasetConfig - ) +# def test_g1_convert_hdf5_to_lerobot(): +# # Load expected data for comparison +# expected_g1_parquet = pd.read_parquet( +# TestConstants.test_data_dir + "/test_g1_locomanip_lerobot/data/chunk-000/episode_000000.parquet" +# ) +# g1_ds_config = create_config_from_yaml( +# TestConstants.test_data_dir + "/test_g1_locomanip_lerobot/test_g1_locomanip_config.yaml", Gr00tDatasetConfig +# ) - # Clean up any existing output directory - if g1_ds_config.lerobot_data_dir.exists(): +# # Clean up any existing output directory +# if g1_ds_config.lerobot_data_dir.exists(): - shutil.rmtree(g1_ds_config.lerobot_data_dir) +# shutil.rmtree(g1_ds_config.lerobot_data_dir) - # Run conversion - convert_hdf5_to_lerobot(g1_ds_config) +# # Run conversion +# convert_hdf5_to_lerobot(g1_ds_config) - # assert it has episodes.jsonl file - assert (g1_ds_config.lerobot_data_dir / "meta" / "episodes.jsonl").exists() +# # assert it has episodes.jsonl file +# assert (g1_ds_config.lerobot_data_dir / "meta" / "episodes.jsonl").exists() - # assert it has tasks.jsonl file - assert (g1_ds_config.lerobot_data_dir / "meta" / "tasks.jsonl").exists() +# # assert it has tasks.jsonl file +# assert (g1_ds_config.lerobot_data_dir / "meta" / "tasks.jsonl").exists() - # assert it has info.json file - assert (g1_ds_config.lerobot_data_dir / "meta" / "info.json").exists() +# # assert it has info.json file +# assert (g1_ds_config.lerobot_data_dir / "meta" / "info.json").exists() - # assert it has modality.json file - assert (g1_ds_config.lerobot_data_dir / "meta" / "modality.json").exists() +# # assert it has modality.json file +# assert (g1_ds_config.lerobot_data_dir / "meta" / "modality.json").exists() - # assert it has data/ folder has parquet files - parquet_files = list((g1_ds_config.lerobot_data_dir / "data").glob("**/*.parquet")) - assert len(parquet_files) == 1 +# # assert it has data/ folder has parquet files +# parquet_files = list((g1_ds_config.lerobot_data_dir / "data").glob("**/*.parquet")) +# assert len(parquet_files) == 1 - # assert it has videos/ folder has mp4 files - mp4_files = list((g1_ds_config.lerobot_data_dir / "videos").glob("**/*.mp4")) - assert len(mp4_files) == 1 - # check parquet file contains expected columns - actual_df = pd.read_parquet(parquet_files[0]) - expected_columns = set(expected_g1_parquet.columns) - actual_columns = set(actual_df.columns) - assert expected_columns.issubset(actual_columns), f"Missing columns: {expected_columns - actual_columns}" - # check parquet file data is the same as expected - assert actual_df.equals(expected_g1_parquet) +# # assert it has videos/ folder has mp4 files +# mp4_files = list((g1_ds_config.lerobot_data_dir / "videos").glob("**/*.mp4")) +# assert len(mp4_files) == 1 +# # check parquet file contains expected columns +# actual_df = pd.read_parquet(parquet_files[0]) +# expected_columns = set(expected_g1_parquet.columns) +# actual_columns = set(actual_df.columns) +# assert expected_columns.issubset(actual_columns), f"Missing columns: {expected_columns - actual_columns}" +# # check parquet file data is the same as expected +# assert actual_df.equals(expected_g1_parquet) - # remove lerobot_data_dir - shutil.rmtree(g1_ds_config.lerobot_data_dir.parent) +# # remove lerobot_data_dir +# shutil.rmtree(g1_ds_config.lerobot_data_dir.parent) -if __name__ == "__main__": - test_g1_convert_hdf5_to_lerobot() +# if __name__ == "__main__": +# test_g1_convert_hdf5_to_lerobot() diff --git a/isaaclab_arena/tests/policy/test_gr00t_closedloop_policy.py b/isaaclab_arena/tests/policy/test_gr00t_closedloop_policy.py index 482450f5..58f51274 100644 --- a/isaaclab_arena/tests/policy/test_gr00t_closedloop_policy.py +++ b/isaaclab_arena/tests/policy/test_gr00t_closedloop_policy.py @@ -78,64 +78,64 @@ def get_tmp_config_file(input_config_file, tmp_path, model_path): return output_config_file -def test_g1_locomanip_gr00t_closedloop_policy_runner_single_env(gr00t_finetuned_model_path, tmp_path): - # Write a new temporary config file with the finetuned model path. - default_config_file = ( - TestConstants.test_data_dir + "/test_g1_locomanip_lerobot/test_g1_locomanip_gr00t_closedloop_config.yaml" - ) - config_file = get_tmp_config_file(default_config_file, tmp_path, gr00t_finetuned_model_path) - - # Run the model - args = [TestConstants.python_path, f"{TestConstants.examples_dir}/policy_runner.py"] - args.append("--policy_type") - args.append("gr00t_closedloop") - args.append("--policy_config_yaml_path") - args.append(config_file) - args.append("--num_steps") - args.append(str(NUM_STEPS)) - if HEADLESS: - args.append("--headless") - if ENABLE_CAMERAS: - args.append("--enable_cameras") - # example env - args.append("galileo_g1_locomanip_pick_and_place") - args.append("--object") - args.append("brown_box") - args.append("--embodiment") - args.append("g1_wbc_joint") - run_subprocess(args) - - -def test_g1_locomanip_gr00t_closedloop_policy_runner_multi_envs(gr00t_finetuned_model_path, tmp_path): - # Write a new temporary config file with the finetuned model path. - default_config_file = ( - TestConstants.test_data_dir + "/test_g1_locomanip_lerobot/test_g1_locomanip_gr00t_closedloop_config.yaml" - ) - config_file = get_tmp_config_file(default_config_file, tmp_path, gr00t_finetuned_model_path) - - # Run the model - args = [TestConstants.python_path, f"{TestConstants.examples_dir}/policy_runner.py"] - args.append("--policy_type") - args.append("gr00t_closedloop") - args.append("--policy_config_yaml_path") - args.append(config_file) - args.append("--num_steps") - args.append(str(NUM_STEPS)) - args.append("--num_envs") - args.append(str(NUM_ENVS)) - if HEADLESS: - args.append("--headless") - if ENABLE_CAMERAS: - args.append("--enable_cameras") - # example env - args.append("galileo_g1_locomanip_pick_and_place") - args.append("--object") - args.append("brown_box") - args.append("--embodiment") - args.append("g1_wbc_joint") - run_subprocess(args) - - -if __name__ == "__main__": - test_g1_locomanip_gr00t_closedloop_policy_runner_single_env() - test_g1_locomanip_gr00t_closedloop_policy_runner_multi_envs() +# def test_g1_locomanip_gr00t_closedloop_policy_runner_single_env(gr00t_finetuned_model_path, tmp_path): +# # Write a new temporary config file with the finetuned model path. +# default_config_file = ( +# TestConstants.test_data_dir + "/test_g1_locomanip_lerobot/test_g1_locomanip_gr00t_closedloop_config.yaml" +# ) +# config_file = get_tmp_config_file(default_config_file, tmp_path, gr00t_finetuned_model_path) + +# # Run the model +# args = [TestConstants.python_path, f"{TestConstants.examples_dir}/policy_runner.py"] +# args.append("--policy_type") +# args.append("gr00t_closedloop") +# args.append("--policy_config_yaml_path") +# args.append(config_file) +# args.append("--num_steps") +# args.append(str(NUM_STEPS)) +# if HEADLESS: +# args.append("--headless") +# if ENABLE_CAMERAS: +# args.append("--enable_cameras") +# # example env +# args.append("galileo_g1_locomanip_pick_and_place") +# args.append("--object") +# args.append("brown_box") +# args.append("--embodiment") +# args.append("g1_wbc_joint") +# run_subprocess(args) + + +# def test_g1_locomanip_gr00t_closedloop_policy_runner_multi_envs(gr00t_finetuned_model_path, tmp_path): +# # Write a new temporary config file with the finetuned model path. +# default_config_file = ( +# TestConstants.test_data_dir + "/test_g1_locomanip_lerobot/test_g1_locomanip_gr00t_closedloop_config.yaml" +# ) +# config_file = get_tmp_config_file(default_config_file, tmp_path, gr00t_finetuned_model_path) + +# # Run the model +# args = [TestConstants.python_path, f"{TestConstants.examples_dir}/policy_runner.py"] +# args.append("--policy_type") +# args.append("gr00t_closedloop") +# args.append("--policy_config_yaml_path") +# args.append(config_file) +# args.append("--num_steps") +# args.append(str(NUM_STEPS)) +# args.append("--num_envs") +# args.append(str(NUM_ENVS)) +# if HEADLESS: +# args.append("--headless") +# if ENABLE_CAMERAS: +# args.append("--enable_cameras") +# # example env +# args.append("galileo_g1_locomanip_pick_and_place") +# args.append("--object") +# args.append("brown_box") +# args.append("--embodiment") +# args.append("g1_wbc_joint") +# run_subprocess(args) + + +# if __name__ == "__main__": +# test_g1_locomanip_gr00t_closedloop_policy_runner_single_env() +# test_g1_locomanip_gr00t_closedloop_policy_runner_multi_envs() diff --git a/isaaclab_arena/tests/policy/test_gr00t_deps.py b/isaaclab_arena/tests/policy/test_gr00t_deps.py index d1d74790..91e2b36f 100644 --- a/isaaclab_arena/tests/policy/test_gr00t_deps.py +++ b/isaaclab_arena/tests/policy/test_gr00t_deps.py @@ -17,163 +17,163 @@ import pytest -class TestGr00tDependencies: - """Test class for GR00T dependencies.""" +# class TestGr00tDependencies: +# """Test class for GR00T dependencies.""" - def test_flash_attn_import(self): - """Test that flash_attn can be imported successfully.""" - try: - import flash_attn # pylint: disable=import-outside-toplevel +# def test_flash_attn_import(self): +# """Test that flash_attn can be imported successfully.""" +# try: +# import flash_attn # pylint: disable=import-outside-toplevel - # Verify version is available - assert hasattr(flash_attn, "__version__"), "flash_attn version not available" - print(f"Flash Attention version: {flash_attn.__version__}") - except ImportError as e: - pytest.fail(f"Failed to import flash_attn: {e}") +# # Verify version is available +# assert hasattr(flash_attn, "__version__"), "flash_attn version not available" +# print(f"Flash Attention version: {flash_attn.__version__}") +# except ImportError as e: +# pytest.fail(f"Failed to import flash_attn: {e}") - def test_flash_attn_functionality(self): - """Test basic flash_attn functionality.""" - pytest.importorskip("torch", reason="PyTorch not available") - pytest.importorskip("flash_attn", reason="flash_attn not available") +# def test_flash_attn_functionality(self): +# """Test basic flash_attn functionality.""" +# pytest.importorskip("torch", reason="PyTorch not available") +# pytest.importorskip("flash_attn", reason="flash_attn not available") - import torch # pylint: disable=import-outside-toplevel - - from flash_attn import flash_attn_func # pylint: disable=import-outside-toplevel - - # Skip test if CUDA is not available - if not torch.cuda.is_available(): - pytest.skip("CUDA not available for flash_attn test") - - try: - # Create small test tensors - batch_size, seq_len, num_heads, head_dim = 1, 32, 4, 64 - device = "cuda" - dtype = torch.float16 - - q = torch.randn(batch_size, seq_len, num_heads, head_dim, device=device, dtype=dtype) - k = torch.randn(batch_size, seq_len, num_heads, head_dim, device=device, dtype=dtype) - v = torch.randn(batch_size, seq_len, num_heads, head_dim, device=device, dtype=dtype) - - # Test flash attention function - out = flash_attn_func(q, k, v) - - # Verify output shape - expected_shape = (batch_size, seq_len, num_heads, head_dim) - assert out.shape == expected_shape, f"Expected shape {expected_shape}, got {out.shape}" - - # Verify output is on correct device and dtype - assert out.device.type == "cuda", f"Expected output on CUDA, got {out.device}" - assert out.dtype == dtype, f"Expected dtype {dtype}, got {out.dtype}" - - print(f"Flash Attention test passed - output shape: {out.shape}") - - except (RuntimeError, ValueError) as e: - pytest.fail(f"Flash Attention functionality test failed: {e}") - finally: - # Clean up GPU memory - torch.cuda.empty_cache() - - def test_gr00t_package_directory_exists(self): - """Test that GR00T package directory exists.""" - gr00t_path = f"{os.getenv('WORKDIR', '/workspaces/isaaclab_arena')}/submodules/Isaac-GR00T" - - assert os.path.exists(gr00t_path), f"GR00T directory not found at {gr00t_path}" - assert os.path.isdir(gr00t_path), f"GR00T path exists but is not a directory: {gr00t_path}" - - print(f"GR00T package directory exists at: {gr00t_path}") - - def test_gr00t_package_import(self): - """Test that GR00T package can be imported.""" - # get workdir from env - gr00t_path = f"{os.getenv('WORKDIR', '/workspaces/isaaclab_arena')}/submodules/Isaac-GR00T" - - # Add GR00T path to Python path if not already there - if gr00t_path not in sys.path: - sys.path.insert(0, gr00t_path) - - # First, try to import the gr00t package directly - try: - import gr00t # pylint: disable=import-outside-toplevel # noqa: F401 - - _ = gr00t # Mark as used - - print( - "Successfully imported gr00t package from" - f" {gr00t.__file__ if hasattr(gr00t, '__file__') else 'unknown location'}" - ) - - # Try to import a submodule to verify package structure - try: - from gr00t.data import dataset # pylint: disable=import-outside-toplevel # noqa: F401 - - _ = dataset # Mark as used - print("Successfully imported gr00t.data.dataset module") - except ImportError: - print("gr00t package imported but submodules may not be fully accessible") - - return # Test passed - - except ImportError: - print("Could not import gr00t package directly, checking directory structure...") - - # If direct import fails, verify the directory has Python files - python_files = [] - for root, _, files in os.walk(gr00t_path): - for file in files: - if file.endswith(".py") and not file.startswith("__"): - python_files.append(os.path.join(root, file)) - - if python_files: - print(f"GR00T directory contains {len(python_files)} Python files") - # Just verify the structure exists - don't try to import files with relative imports - # Look for key files that indicate a proper installation - key_files = ["gr00t/data/dataset.py", "gr00t/model", "gr00t/eval"] - found_structure = False - - for key_file in key_files: - if any(key_file in py_file for py_file in python_files): - found_structure = True - print(f"Found expected GR00T structure: {key_file}") +# import torch # pylint: disable=import-outside-toplevel + +# from flash_attn import flash_attn_func # pylint: disable=import-outside-toplevel + +# # Skip test if CUDA is not available +# if not torch.cuda.is_available(): +# pytest.skip("CUDA not available for flash_attn test") + +# try: +# # Create small test tensors +# batch_size, seq_len, num_heads, head_dim = 1, 32, 4, 64 +# device = "cuda" +# dtype = torch.float16 + +# q = torch.randn(batch_size, seq_len, num_heads, head_dim, device=device, dtype=dtype) +# k = torch.randn(batch_size, seq_len, num_heads, head_dim, device=device, dtype=dtype) +# v = torch.randn(batch_size, seq_len, num_heads, head_dim, device=device, dtype=dtype) + +# # Test flash attention function +# out = flash_attn_func(q, k, v) + +# # Verify output shape +# expected_shape = (batch_size, seq_len, num_heads, head_dim) +# assert out.shape == expected_shape, f"Expected shape {expected_shape}, got {out.shape}" + +# # Verify output is on correct device and dtype +# assert out.device.type == "cuda", f"Expected output on CUDA, got {out.device}" +# assert out.dtype == dtype, f"Expected dtype {dtype}, got {out.dtype}" + +# print(f"Flash Attention test passed - output shape: {out.shape}") + +# except (RuntimeError, ValueError) as e: +# pytest.fail(f"Flash Attention functionality test failed: {e}") +# finally: +# # Clean up GPU memory +# torch.cuda.empty_cache() + +# def test_gr00t_package_directory_exists(self): +# """Test that GR00T package directory exists.""" +# gr00t_path = f"{os.getenv('WORKDIR', '/workspaces/isaaclab_arena')}/submodules/Isaac-GR00T" + +# assert os.path.exists(gr00t_path), f"GR00T directory not found at {gr00t_path}" +# assert os.path.isdir(gr00t_path), f"GR00T path exists but is not a directory: {gr00t_path}" + +# print(f"GR00T package directory exists at: {gr00t_path}") + +# def test_gr00t_package_import(self): +# """Test that GR00T package can be imported.""" +# # get workdir from env +# gr00t_path = f"{os.getenv('WORKDIR', '/workspaces/isaaclab_arena')}/submodules/Isaac-GR00T" + +# # Add GR00T path to Python path if not already there +# if gr00t_path not in sys.path: +# sys.path.insert(0, gr00t_path) + +# # First, try to import the gr00t package directly +# try: +# import gr00t # pylint: disable=import-outside-toplevel # noqa: F401 + +# _ = gr00t # Mark as used + +# print( +# "Successfully imported gr00t package from" +# f" {gr00t.__file__ if hasattr(gr00t, '__file__') else 'unknown location'}" +# ) + +# # Try to import a submodule to verify package structure +# try: +# from gr00t.data import dataset # pylint: disable=import-outside-toplevel # noqa: F401 + +# _ = dataset # Mark as used +# print("Successfully imported gr00t.data.dataset module") +# except ImportError: +# print("gr00t package imported but submodules may not be fully accessible") + +# return # Test passed + +# except ImportError: +# print("Could not import gr00t package directly, checking directory structure...") + +# # If direct import fails, verify the directory has Python files +# python_files = [] +# for root, _, files in os.walk(gr00t_path): +# for file in files: +# if file.endswith(".py") and not file.startswith("__"): +# python_files.append(os.path.join(root, file)) + +# if python_files: +# print(f"GR00T directory contains {len(python_files)} Python files") +# # Just verify the structure exists - don't try to import files with relative imports +# # Look for key files that indicate a proper installation +# key_files = ["gr00t/data/dataset.py", "gr00t/model", "gr00t/eval"] +# found_structure = False + +# for key_file in key_files: +# if any(key_file in py_file for py_file in python_files): +# found_structure = True +# print(f"Found expected GR00T structure: {key_file}") - if found_structure: - print("GR00T package structure verified (import may require additional setup)") - else: - pytest.fail("GR00T directory exists but expected package structure not found") - else: - pytest.fail("No Python files found in GR00T directory") - - def test_pytorch_cuda_compatibility(self): - """Test that PyTorch and CUDA are properly configured for GR00T.""" - pytest.importorskip("torch", reason="PyTorch not available") - - import torch # pylint: disable=import-outside-toplevel - - # Check PyTorch version - print(f"PyTorch version: {torch.__version__}") - - # Check CUDA availability - if torch.cuda.is_available(): - print(f"CUDA available with {torch.cuda.device_count()} devices") - if torch.cuda.device_count() > 0: - print(f"Current device: {torch.cuda.get_device_name(0)}") - - # Test basic CUDA operations - try: - x = torch.randn(100, 100, device="cuda") - y = torch.matmul(x, x.T) - assert y.device.type == "cuda", "CUDA operation failed" - print("Basic CUDA operations working") - - # Clean up - del x, y - torch.cuda.empty_cache() - - except (RuntimeError, AssertionError) as e: - pytest.fail(f"CUDA operations failed: {e}") - else: - pytest.skip("CUDA not available, skipping CUDA compatibility test") - - -if __name__ == "__main__": - # Run tests when script is executed directly - pytest.main([__file__, "-v"]) +# if found_structure: +# print("GR00T package structure verified (import may require additional setup)") +# else: +# pytest.fail("GR00T directory exists but expected package structure not found") +# else: +# pytest.fail("No Python files found in GR00T directory") + +# def test_pytorch_cuda_compatibility(self): +# """Test that PyTorch and CUDA are properly configured for GR00T.""" +# pytest.importorskip("torch", reason="PyTorch not available") + +# import torch # pylint: disable=import-outside-toplevel + +# # Check PyTorch version +# print(f"PyTorch version: {torch.__version__}") + +# # Check CUDA availability +# if torch.cuda.is_available(): +# print(f"CUDA available with {torch.cuda.device_count()} devices") +# if torch.cuda.device_count() > 0: +# print(f"Current device: {torch.cuda.get_device_name(0)}") + +# # Test basic CUDA operations +# try: +# x = torch.randn(100, 100, device="cuda") +# y = torch.matmul(x, x.T) +# assert y.device.type == "cuda", "CUDA operation failed" +# print("Basic CUDA operations working") + +# # Clean up +# del x, y +# torch.cuda.empty_cache() + +# except (RuntimeError, AssertionError) as e: +# pytest.fail(f"CUDA operations failed: {e}") +# else: +# pytest.skip("CUDA not available, skipping CUDA compatibility test") + + +# if __name__ == "__main__": +# # Run tests when script is executed directly +# pytest.main([__file__, "-v"]) diff --git a/isaaclab_arena/tests/policy/test_policy_runner.py b/isaaclab_arena/tests/policy/test_policy_runner.py index a721cced..b95b45c4 100644 --- a/isaaclab_arena/tests/policy/test_policy_runner.py +++ b/isaaclab_arena/tests/policy/test_policy_runner.py @@ -48,66 +48,66 @@ def run_policy_runner( run_subprocess(args) -def test_zero_action_policy_press_button(): - run_policy_runner( - policy_type="zero_action", - example_environment="press_button", - num_steps=NUM_STEPS, - ) +# def test_zero_action_policy_press_button(): +# run_policy_runner( +# policy_type="zero_action", +# example_environment="press_button", +# num_steps=NUM_STEPS, +# ) -def test_zero_action_policy_kitchen_pick_and_place(): - # TODO(alexmillane, 2025.07.29): Get an exhaustive list of all scenes and embodiments - # from a registry when we have one. - example_environment = "kitchen_pick_and_place" - embodiments = ["franka", "gr1_pink", "gr1_joint"] - object_names = ["cracker_box", "tomato_soup_can"] - for embodiment in embodiments: - for object_name in object_names: - run_policy_runner( - policy_type="zero_action", - example_environment=example_environment, - embodiment=embodiment, - object_name=object_name, - num_steps=NUM_STEPS, - ) +# def test_zero_action_policy_kitchen_pick_and_place(): +# # TODO(alexmillane, 2025.07.29): Get an exhaustive list of all scenes and embodiments +# # from a registry when we have one. +# example_environment = "kitchen_pick_and_place" +# embodiments = ["franka", "gr1_pink", "gr1_joint"] +# object_names = ["cracker_box", "tomato_soup_can"] +# for embodiment in embodiments: +# for object_name in object_names: +# run_policy_runner( +# policy_type="zero_action", +# example_environment=example_environment, +# embodiment=embodiment, +# object_name=object_name, +# num_steps=NUM_STEPS, +# ) -def test_zero_action_policy_galileo_pick_and_place(): - # TODO(alexmillane, 2025.07.29): Get an exhaustive list of all scenes and embodiments - # from a registry when we have one. - # NOTE(alexmillane, 2025.09.04): Only test one configuration here to keep - # the test fast. - run_policy_runner( - policy_type="zero_action", - example_environment="galileo_pick_and_place", - embodiment="gr1_pink", - object_name="power_drill", - num_steps=NUM_STEPS, - ) +# def test_zero_action_policy_galileo_pick_and_place(): +# # TODO(alexmillane, 2025.07.29): Get an exhaustive list of all scenes and embodiments +# # from a registry when we have one. +# # NOTE(alexmillane, 2025.09.04): Only test one configuration here to keep +# # the test fast. +# run_policy_runner( +# policy_type="zero_action", +# example_environment="galileo_pick_and_place", +# embodiment="gr1_pink", +# object_name="power_drill", +# num_steps=NUM_STEPS, +# ) -def test_zero_action_policy_gr1_open_microwave(): - # TODO(alexmillane, 2025.07.29): Get an exhaustive list of all scenes and embodiments - # from a registry when we have one. - example_environment = "gr1_open_microwave" - object_name = ["cracker_box", "tomato_soup_can", "mustard_bottle"] - for object_name in object_name: - run_policy_runner( - policy_type="zero_action", - example_environment=example_environment, - embodiment="gr1_pink", - background=None, - object_name=object_name, - num_steps=NUM_STEPS, - ) +# def test_zero_action_policy_gr1_open_microwave(): +# # TODO(alexmillane, 2025.07.29): Get an exhaustive list of all scenes and embodiments +# # from a registry when we have one. +# example_environment = "gr1_open_microwave" +# object_name = ["cracker_box", "tomato_soup_can", "mustard_bottle"] +# for object_name in object_name: +# run_policy_runner( +# policy_type="zero_action", +# example_environment=example_environment, +# embodiment="gr1_pink", +# background=None, +# object_name=object_name, +# num_steps=NUM_STEPS, +# ) -def test_replay_policy_gr1_open_microwave(): - run_policy_runner( - policy_type="replay", - replay_file_path=TestConstants.test_data_dir + "/test_demo_gr1_open_microwave.hdf5", - example_environment="gr1_open_microwave", - embodiment="gr1_pink", - num_steps=NUM_STEPS, - ) +# def test_replay_policy_gr1_open_microwave(): +# run_policy_runner( +# policy_type="replay", +# replay_file_path=TestConstants.test_data_dir + "/test_demo_gr1_open_microwave.hdf5", +# example_environment="gr1_open_microwave", +# embodiment="gr1_pink", +# num_steps=NUM_STEPS, +# ) diff --git a/isaaclab_arena/tests/policy/test_replay_lerobot_action_policy.py b/isaaclab_arena/tests/policy/test_replay_lerobot_action_policy.py index 85fe0bb3..808e84cb 100644 --- a/isaaclab_arena/tests/policy/test_replay_lerobot_action_policy.py +++ b/isaaclab_arena/tests/policy/test_replay_lerobot_action_policy.py @@ -15,30 +15,29 @@ TRAJECTORY_INDEX = 0 -def test_g1_locomanip_replay_lerobot_policy_runner_single_env(): - args = [TestConstants.python_path, f"{TestConstants.examples_dir}/policy_runner.py"] - args.append("--policy_type") - args.append("replay_lerobot") - args.append("--config_yaml_path") - args.append(TestConstants.test_data_dir + "/test_g1_locomanip_lerobot/test_g1_locomanip_replay_action_config.yaml") - args.append("--max_steps") - args.append(str(NUM_STEPS)) - args.append("--trajectory_index") - args.append(str(TRAJECTORY_INDEX)) - if HEADLESS: - args.append("--headless") - if ENABLE_CAMERAS: - args.append("--enable_cameras") - # example env - args.append("galileo_g1_locomanip_pick_and_place") - args.append("--object") - args.append("brown_box") - args.append("--embodiment") - args.append("g1_wbc_joint") - run_subprocess(args) +# def test_g1_locomanip_replay_lerobot_policy_runner_single_env(): +# args = [TestConstants.python_path, f"{TestConstants.examples_dir}/policy_runner.py"] +# args.append("--policy_type") +# args.append("replay_lerobot") +# args.append("--config_yaml_path") +# args.append(TestConstants.test_data_dir + "/test_g1_locomanip_lerobot/test_g1_locomanip_replay_action_config.yaml") +# args.append("--max_steps") +# args.append(str(NUM_STEPS)) +# args.append("--trajectory_index") +# args.append(str(TRAJECTORY_INDEX)) +# if HEADLESS: +# args.append("--headless") +# if ENABLE_CAMERAS: +# args.append("--enable_cameras") +# # example env +# args.append("galileo_g1_locomanip_pick_and_place") +# args.append("--object") +# args.append("brown_box") +# args.append("--embodiment") +# args.append("g1_wbc_joint") +# run_subprocess(args) -@pytest.mark.skip(reason="Fails on CI for reasons under investigation.") def test_gr1_manip_replay_lerobot_policy_runner_single_env(): args = [TestConstants.python_path, f"{TestConstants.examples_dir}/policy_runner.py"] args.append("--policy_type") @@ -63,5 +62,5 @@ def test_gr1_manip_replay_lerobot_policy_runner_single_env(): if __name__ == "__main__": - test_g1_locomanip_replay_lerobot_policy_runner_single_env() + # test_g1_locomanip_replay_lerobot_policy_runner_single_env() test_gr1_manip_replay_lerobot_policy_runner_single_env()