Skip to content

Commit 8ddc493

Browse files
PanaetiusRalf Grubenmann
authored andcommitted
tests: improve performance of tests by caching repos
1 parent e02e5bf commit 8ddc493

File tree

146 files changed

+442
-306
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

146 files changed

+442
-306
lines changed

.github/workflows/test_deploy.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@ on:
1414
branches:
1515
- "**"
1616
- "!master"
17-
17+
env:
18+
RENKU_TEST_RECREATE_CACHE: "${{ (endsWith(github.ref, 'master') || endsWith(github.ref, 'develop') || startsWith(github.ref, 'refs/tags/') || startsWith(github.ref, 'refs/heads/release/' ) ) && '1' || '0' }}"
1819
jobs:
1920
set-matrix:
2021
runs-on: ubuntu-latest

conftest.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,10 +79,12 @@ def pytest_configure(config):
7979
os.environ["RENKU_DISABLE_VERSION_CHECK"] = "1"
8080
# NOTE: Set an env var during during tests to mark that Renku is running in a test session.
8181
os.environ["RENKU_RUNNING_UNDER_TEST"] = "1"
82+
os.environ["RENKU_SKIP_HOOK_CHECKS"] = "1"
8283

8384

8485
def pytest_unconfigure(config):
8586
"""Hook that is called by pytest after all tests are executed."""
8687
os.environ.pop("RENKU_SKIP_MIN_VERSION_CHECK", None)
8788
os.environ.pop("RENKU_DISABLE_VERSION_CHECK", None)
8889
os.environ.pop("RENKU_RUNNING_UNDER_TEST", None)
90+
os.environ.pop("RENKU_SKIP_HOOK_CHECKS", None)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ pattern = """(?x) (?# ignore whitespace
244244
"""
245245

246246
[tool.pytest.ini_options]
247-
addopts = "--flake8 --black --doctest-glob=\"*.rst\" --doctest-modules --cov --cov-report=term-missing --ignore=docs/cheatsheet/"
247+
addopts = "--doctest-glob=\"*.rst\" --doctest-modules --cov --cov-report=term-missing --ignore=docs/cheatsheet/"
248248
doctest_optionflags = "ALLOW_UNICODE"
249249
flake8-ignore = ["*.py", "E121", "E126", "E203", "E226", "E231", "W503", "W504", "docs/conf.py", "docs/cheatsheet/conf.py", "ALL"]
250250
flake8-max-line-length = 120

renku/data/pre-commit.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,10 @@
1919
# RENKU HOOK. DO NOT REMOVE OR MODIFY.
2020
######################################
2121

22+
if [ "$RENKU_SKIP_HOOK_CHECKS" == "1" ]; then
23+
exit 0
24+
fi
25+
2226
# Find all modified or added files, and do nothing if there aren't any.
2327
export RENKU_DISABLE_VERSION_CHECK=true
2428
IFS=$'\n' read -r -d '' -a MODIFIED_FILES \

run-tests.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ check_styles(){
4747
build_docs(){
4848
sphinx-build -qnNW docs docs/_build/html
4949
sphinx-build -nNW -b spelling -d docs/_build/doctrees docs docs/_build/spelling
50-
pytest -v -m "not integration and not publish" -o testpaths="docs conftest.py" --ignore=docs/conf.py
50+
pytest --black --flake8 -v -m "not integration and not publish" -o testpaths="docs conftest.py" --ignore=docs/conf.py
5151
}
5252

5353
run_tests(){

tests/cli/fixtures/cli_workflow.py

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -19,19 +19,22 @@
1919

2020

2121
@pytest.fixture
22-
def workflow_graph(run_shell, project):
22+
def workflow_graph(run_shell, project, cache_test_project):
2323
"""Setup a project with a workflow graph."""
24+
cache_test_project.set_name("workflow_graph_fixture")
25+
if not cache_test_project.setup():
2426

25-
def _run_workflow(name, command, extra_args=""):
26-
output = run_shell(f"renku run --name {name} {extra_args} -- {command}")
27-
# Assert not allocated stderr.
28-
assert output[1] is None
27+
def _run_workflow(name, command, extra_args=""):
28+
output = run_shell(f"renku run --name {name} {extra_args} -- {command}")
29+
# Assert not allocated stderr.
30+
assert output[1] is None
2931

30-
_run_workflow("r1", "echo 'test' > A")
31-
_run_workflow("r2", "tee B C < A")
32-
_run_workflow("r3", "cp A Z")
33-
_run_workflow("r4", "cp B X")
34-
_run_workflow("r5", "cat C Z > Y")
35-
_run_workflow("r6", "bash -c 'cat X Y | tee R S'", extra_args="--input X --input Y --output R --output S")
36-
_run_workflow("r7", "echo 'other' > H")
37-
_run_workflow("r8", "tee I J < H")
32+
_run_workflow("r1", "echo 'test' > A")
33+
_run_workflow("r2", "tee B C < A")
34+
_run_workflow("r3", "cp A Z")
35+
_run_workflow("r4", "cp B X")
36+
_run_workflow("r5", "cat C Z > Y")
37+
_run_workflow("r6", "bash -c 'cat X Y | tee R S'", extra_args="--input X --input Y --output R --output S")
38+
_run_workflow("r7", "echo 'other' > H")
39+
_run_workflow("r8", "tee I J < H")
40+
cache_test_project.save()

tests/cli/test_datasets.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1770,7 +1770,7 @@ def test_pull_data_from_lfs(runner, project, tmpdir, subdirectory, no_lfs_size_l
17701770
assert 0 == result.exit_code, format_result_exception(result)
17711771

17721772

1773-
def test_lfs_hook(project_with_injection, subdirectory, large_file):
1773+
def test_lfs_hook(project_with_injection, subdirectory, large_file, enable_precommit_hook):
17741774
"""Test committing large files to Git."""
17751775
filenames = {"large-file", "large file with whitespace", "large*file?with wildcards"}
17761776

@@ -1799,7 +1799,7 @@ def test_lfs_hook(project_with_injection, subdirectory, large_file):
17991799

18001800

18011801
@pytest.mark.parametrize("use_env_var", [False, True])
1802-
def test_lfs_hook_autocommit(runner, project, subdirectory, large_file, use_env_var):
1802+
def test_lfs_hook_autocommit(runner, project, subdirectory, large_file, use_env_var, enable_precommit_hook):
18031803
"""Test committing large files to Git gets automatically added to lfs."""
18041804
if use_env_var:
18051805
os.environ["AUTOCOMMIT_LFS"] = "true"
@@ -1831,7 +1831,7 @@ def test_lfs_hook_autocommit(runner, project, subdirectory, large_file, use_env_
18311831
assert filenames == tracked_lfs_files
18321832

18331833

1834-
def test_lfs_hook_can_be_avoided(runner, project, subdirectory, large_file):
1834+
def test_lfs_hook_can_be_avoided(runner, project, subdirectory, large_file, enable_precommit_hook):
18351835
"""Test committing large files to Git."""
18361836
result = runner.invoke(
18371837
cli, ["--no-external-storage", "dataset", "add", "--copy", "-c", "my-dataset", str(large_file)]
@@ -1840,7 +1840,7 @@ def test_lfs_hook_can_be_avoided(runner, project, subdirectory, large_file):
18401840
assert "OK" in result.output
18411841

18421842

1843-
def test_datadir_hook(runner, project, subdirectory):
1843+
def test_datadir_hook(runner, project, subdirectory, enable_precommit_hook):
18441844
"""Test pre-commit hook fir checking datadir files."""
18451845
set_value(section="renku", key="check_datadir_files", value="true", global_only=True)
18461846

tests/cli/test_graph.py

Lines changed: 33 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,16 @@
2727

2828

2929
@pytest.mark.parametrize("revision", ["", "HEAD", "HEAD^", "HEAD^..HEAD"])
30-
def test_graph_export_validation(runner, project, directory_tree, run, revision):
30+
def test_graph_export_validation(runner, project, directory_tree, run, revision, cache_test_project):
3131
"""Test graph validation when exporting."""
32-
assert 0 == runner.invoke(cli, ["dataset", "add", "--copy", "-c", "my-data", str(directory_tree)]).exit_code
32+
if not cache_test_project.setup():
33+
assert 0 == runner.invoke(cli, ["dataset", "add", "--copy", "-c", "my-data", str(directory_tree)]).exit_code
3334

34-
file1 = project.path / DATA_DIR / "my-data" / directory_tree.name / "file1"
35-
file2 = project.path / DATA_DIR / "my-data" / directory_tree.name / "dir1" / "file2"
36-
assert 0 == run(["run", "head", str(file1)], stdout="out1")
37-
assert 0 == run(["run", "tail", str(file2)], stdout="out2")
35+
file1 = project.path / DATA_DIR / "my-data" / directory_tree.name / "file1"
36+
file2 = project.path / DATA_DIR / "my-data" / directory_tree.name / "dir1" / "file2"
37+
assert 0 == run(["run", "head", str(file1)], stdout="out1")
38+
assert 0 == run(["run", "tail", str(file2)], stdout="out2")
39+
cache_test_project.save()
3840

3941
result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict", "--revision", revision])
4042

@@ -57,12 +59,14 @@ def test_graph_export_validation(runner, project, directory_tree, run, revision)
5759

5860
@pytest.mark.serial
5961
@pytest.mark.shelled
60-
def test_graph_export_strict_run(runner, project, run_shell):
62+
def test_graph_export_strict_run(runner, project, run_shell, cache_test_project):
6163
"""Test graph export output of run command."""
62-
# Run a shell command with pipe.
63-
assert run_shell('renku run --name run1 echo "my input string" > my_output_file')[1] is None
64-
assert run_shell("renku run --name run2 cp my_output_file my_output_file2")[1] is None
65-
assert run_shell("renku workflow compose my-composite-plan run1 run2")[1] is None
64+
if not cache_test_project.setup():
65+
# Run a shell command with pipe.
66+
assert run_shell('renku run --name run1 echo "my input string" > my_output_file')[1] is None
67+
assert run_shell("renku run --name run2 cp my_output_file my_output_file2")[1] is None
68+
assert run_shell("renku workflow compose my-composite-plan run1 run2")[1] is None
69+
cache_test_project.save()
6670

6771
# Assert created output file.
6872
result = runner.invoke(cli, ["graph", "export", "--full", "--strict", "--format=json-ld"])
@@ -80,21 +84,25 @@ def test_graph_export_strict_run(runner, project, run_shell):
8084
assert 0 == result.exit_code, format_result_exception(result)
8185

8286

83-
def test_graph_export_strict_dataset(tmpdir, runner, project, subdirectory):
87+
def test_graph_export_strict_dataset(tmpdir, runner, project, subdirectory, cache_test_project):
8488
"""Test output of graph export for dataset add."""
85-
result = runner.invoke(cli, ["dataset", "create", "my-dataset"])
86-
assert 0 == result.exit_code, format_result_exception(result)
87-
paths = []
88-
test_paths = []
89-
for i in range(3):
90-
new_file = tmpdir.join(f"file_{i}")
91-
new_file.write(str(i))
92-
paths.append(str(new_file))
93-
test_paths.append(os.path.relpath(str(new_file), str(project.path)))
94-
95-
# add data
96-
result = runner.invoke(cli, ["dataset", "add", "--copy", "my-dataset"] + paths)
97-
assert 0 == result.exit_code, format_result_exception(result)
89+
if not cache_test_project.setup():
90+
result = runner.invoke(cli, ["dataset", "create", "my-dataset"])
91+
assert 0 == result.exit_code, format_result_exception(result)
92+
paths = []
93+
test_paths = []
94+
for i in range(3):
95+
new_file = tmpdir.join(f"file_{i}")
96+
new_file.write(str(i))
97+
paths.append(str(new_file))
98+
test_paths.append(os.path.relpath(str(new_file), str(project.path)))
99+
100+
# add data
101+
result = runner.invoke(cli, ["dataset", "add", "--copy", "my-dataset"] + paths)
102+
assert 0 == result.exit_code, format_result_exception(result)
103+
cache_test_project.save()
104+
else:
105+
test_paths = [f"../file_{i}" for i in range(3)]
98106

99107
result = runner.invoke(cli, ["graph", "export", "--strict", "--format=json-ld", "--revision", "HEAD"])
100108
assert 0 == result.exit_code, format_result_exception(result)

0 commit comments

Comments
 (0)