Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,13 @@ Unreleased

* Support Python 3.14.

* Use a different random seed per test, based on the test ID.

This change should mean that tests exercise more random data values in a given run, and that any randomly-generated identifiers have a lower chance of collision when stored in a shared resource like a database.

`PR #687 <https://github.com/pytest-dev/pytest-randomly/issues/687>`__.
Thanks to Bryce Drennan for the suggestion in `Issue #600 <https://github.com/pytest-dev/pytest-randomly/issues/600>`__ and initial implementation in `PR #617 <https://github.com/pytest-dev/pytest-randomly/pull/617>`__.

3.16.0 (2024-10-25)
-------------------

Expand Down
53 changes: 27 additions & 26 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -36,32 +36,33 @@ All of these features are on by default but can be disabled with flags.
modules, then at the level of test classes (if you have them), then at the
order of functions. This also works with things like doctests.

* Resets the global ``random.seed()`` at the start of every test case and test
to a fixed number - this defaults to ``time.time()`` from the start of your
test run, but you can pass in ``--randomly-seed`` to repeat a
randomness-induced failure.

* If
`factory boy <https://factoryboy.readthedocs.io/en/latest/reference.html>`_
is installed, its random state is reset at the start of every test. This
allows for repeatable use of its random 'fuzzy' features.

* If `faker <https://pypi.org/project/faker>`_ is installed, its random
state is reset at the start of every test. This is also for repeatable fuzzy
data in tests - factory boy uses faker for lots of data. This is also done
if you're using the ``faker`` pytest fixture, by defining the ``faker_seed``
fixture
(`docs <https://faker.readthedocs.io/en/master/pytest-fixtures.html#seeding-configuration>`__).

* If
`Model Bakery <https://model-bakery.readthedocs.io/en/latest/>`_
is installed, its random state is reset at the start of every test. This
allows for repeatable use of its random fixture field values.

* If `numpy <http://www.numpy.org/>`_ is installed, its legacy global random state in |numpy.random|__ is reset at the start of every test.

.. |numpy.random| replace:: ``numpy.random``
__ https://numpy.org/doc/stable/reference/random/index.html
* Generates a base random seed or accepts one for reproduction with ``--randomly-seed``.
The base random seed is printed at the start of the test run, and can be passed in to repeat a failure caused by test ordering or random data.

* At the start of the test run, and before each test setup, run, and teardown, it resets Python’s global random seed to a fixed value, using |random.seed()|__.
The fixed value is derived from the base random seed, the pytest test ID, and an offset for setup or teardown.
This ensures each test gets a different but repeatable random seed.

.. |random.seed()| replace:: ``random.seed()``
__ https://docs.python.org/3/library/random.html#random.seed

* pytest-randomly also resets several libraries’ random states at the start of
every test, if they are installed:

* `factory boy <https://factoryboy.readthedocs.io/en/latest/reference.html>`__

* `Faker <https://pypi.org/project/faker>`__

The ``faker`` pytest fixture is also affected, as pytest-randomly defines |the faker_seed fixture|__.

.. |the faker_seed fixture| replace:: the ``faker_seed`` fixture
__ https://faker.readthedocs.io/en/master/pytest-fixtures.html#seeding-configuration

* `Model Bakery <https://model-bakery.readthedocs.io/en/latest/>`__

* `NumPy <https://www.numpy.org/>`_

Only its `legacy random state <https://numpy.org/doc/stable/reference/random/legacy.html>`__ is affected.

* If additional random generators are used, they can be registered under the
``pytest_randomly.random_seeder``
Expand Down
17 changes: 12 additions & 5 deletions src/pytest_randomly/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,14 @@
import hashlib
import random
import sys
from functools import lru_cache
from itertools import groupby
from types import ModuleType
from typing import Any, Callable, TypeVar

from _pytest.config import Config
from _pytest.config.argparsing import Parser
from _pytest.fixtures import SubRequest
from _pytest.nodes import Item
from pytest import Collector, fixture, hookimpl

Expand Down Expand Up @@ -196,17 +198,17 @@ def pytest_report_header(config: Config) -> str:

def pytest_runtest_setup(item: Item) -> None:
if item.config.getoption("randomly_reset_seed"):
_reseed(item.config, -1)
_reseed(item.config, int.from_bytes(_md5(item.nodeid), "big") - 1)


def pytest_runtest_call(item: Item) -> None:
if item.config.getoption("randomly_reset_seed"):
_reseed(item.config)
_reseed(item.config, int.from_bytes(_md5(item.nodeid), "big"))


def pytest_runtest_teardown(item: Item) -> None:
if item.config.getoption("randomly_reset_seed"):
_reseed(item.config, 1)
_reseed(item.config, int.from_bytes(_md5(item.nodeid), "big") + 1)


@hookimpl(tryfirst=True)
Expand Down Expand Up @@ -279,6 +281,7 @@ def reduce_list_of_lists(lists: list[list[T]]) -> list[T]:
return new_list


@lru_cache
def _md5(string: str) -> bytes:
hasher = hashlib.md5(usedforsecurity=False)
hasher.update(string.encode())
Expand All @@ -288,6 +291,10 @@ def _md5(string: str) -> bytes:
if have_faker: # pragma: no branch

@fixture(autouse=True)
def faker_seed(pytestconfig: Config) -> int:
result: int = pytestconfig.getoption("randomly_seed")
def faker_seed(pytestconfig: Config, request: SubRequest) -> int:
print(type(request))
result: int = pytestconfig.getoption("randomly_seed") + int.from_bytes(
_md5(request.node.nodeid),
"big",
)
return result
119 changes: 34 additions & 85 deletions tests/test_pytest_randomly.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_it_reports_a_header_when_set(simpletester):
assert lines == ["Using --randomly-seed=10"]


def test_it_reuses_the_same_random_seed_per_test(ourtester):
def test_it_uses_different_random_seeds_per_test(ourtester):
"""
Run a pair of tests that generate the a number and then assert they got
what the other did.
Expand All @@ -67,18 +67,16 @@ def test_it_reuses_the_same_random_seed_per_test(ourtester):
test_one="""
import random


def test_a():
test_a.num = random.random()
if hasattr(test_b, 'num'):
assert test_a.num == test_b.num
global num
num = random.random()

def test_b():
test_b.num = random.random()
if hasattr(test_a, 'num'):
assert test_b.num == test_a.num
assert random.random() != num
"""
)
out = ourtester.runpytest("--randomly-dont-reorganize")
out = ourtester.runpytest("--randomly-dont-reorganize", "--randomly-seed=1")
out.assert_outcomes(passed=2, failed=0)


Expand Down Expand Up @@ -157,9 +155,8 @@ class A(TestCase):

@classmethod
def setUpClass(cls):
super(A, cls).setUpClass()
super().setUpClass()
cls.suc_num = random.random()
assert cls.suc_num == getattr(B, 'suc_num', cls.suc_num)

def test_fake(self):
assert True
Expand All @@ -169,15 +166,15 @@ class B(TestCase):

@classmethod
def setUpClass(cls):
super(B, cls).setUpClass()
super().setUpClass()
cls.suc_num = random.random()
assert cls.suc_num == getattr(A, 'suc_num', cls.suc_num)
assert cls.suc_num != A.suc_num

def test_fake(self):
assert True
"""
)
out = ourtester.runpytest()
out = ourtester.runpytest("--randomly-seed=1")
out.assert_outcomes(passed=2, failed=0)


Expand All @@ -195,9 +192,8 @@ def test_fake(self):

@classmethod
def tearDownClass(cls):
super(A, cls).tearDownClass()
super().tearDownClass()
cls.suc_num = random.random()
assert cls.suc_num == getattr(B, 'suc_num', cls.suc_num)


class B(TestCase):
Expand All @@ -207,12 +203,12 @@ def test_fake(self):

@classmethod
def tearDownClass(cls):
super(B, cls).tearDownClass()
super().tearDownClass()
cls.suc_num = random.random()
assert cls.suc_num == getattr(A, 'suc_num', cls.suc_num)
assert cls.suc_num != A.suc_num
"""
)
out = ourtester.runpytest()
out = ourtester.runpytest("--randomly-seed=1")
out.assert_outcomes(passed=2, failed=0)


Expand Down Expand Up @@ -574,62 +570,20 @@ def test_one(myfixture):
out.assert_outcomes(passed=1)


def test_fixtures_dont_interfere_with_tests_getting_same_random_state(ourtester):
ourtester.makepyfile(
test_one="""
import random

import pytest


random.seed(2)
state_at_seed_two = random.getstate()


@pytest.fixture(scope='module')
def myfixture():
return random.random()


@pytest.mark.one()
def test_one(myfixture):
assert random.getstate() == state_at_seed_two


@pytest.mark.two()
def test_two(myfixture):
assert random.getstate() == state_at_seed_two
"""
)
args = ["--randomly-seed=2"]

out = ourtester.runpytest(*args)
out.assert_outcomes(passed=2)

out = ourtester.runpytest("-m", "one", *args)
out.assert_outcomes(passed=1)
out = ourtester.runpytest("-m", "two", *args)
out.assert_outcomes(passed=1)


def test_factory_boy(ourtester):
"""
Rather than set up factories etc., just check the random generator it uses
is set between two tests to output the same number.
Check that the random generator factory boy uses is different between two tests.
"""
ourtester.makepyfile(
test_one="""
from factory.random import randgen

def test_a():
test_a.num = randgen.random()
if hasattr(test_b, 'num'):
assert test_a.num == test_b.num
assert randgen.random() == 0.9988532989147809


def test_b():
test_b.num = randgen.random()
if hasattr(test_a, 'num'):
assert test_b.num == test_a.num
assert randgen.random() == 0.18032546798434612
"""
)

Expand All @@ -645,10 +599,10 @@ def test_faker(ourtester):
fake = Faker()

def test_one():
assert fake.name() == 'Ryan Gallagher'
assert fake.name() == 'Mrs. Lisa Ryan'

def test_two():
assert fake.name() == 'Ryan Gallagher'
assert fake.name() == 'Kaitlyn Mitchell'
"""
)

Expand All @@ -660,10 +614,10 @@ def test_faker_fixture(ourtester):
ourtester.makepyfile(
test_one="""
def test_one(faker):
assert faker.name() == 'Ryan Gallagher'
assert faker.name() == 'Mrs. Lisa Ryan'

def test_two(faker):
assert faker.name() == 'Ryan Gallagher'
assert faker.name() == 'Kaitlyn Mitchell'
"""
)

Expand All @@ -673,22 +627,17 @@ def test_two(faker):

def test_model_bakery(ourtester):
"""
Rather than set up models, just check the random generator it uses is set
between two tests to output the same number.
Check the Model Bakery random generator is reset between tests.
"""
ourtester.makepyfile(
test_one="""
from model_bakery.random_gen import baker_random
from model_bakery.random_gen import gen_slug

def test_a():
test_a.num = baker_random.random()
if hasattr(test_b, 'num'):
assert test_a.num == test_b.num
assert gen_slug(10) == 'XjpU5br7ej'

def test_b():
test_b.num = baker_random.random()
if hasattr(test_a, 'num'):
assert test_b.num == test_a.num
assert gen_slug(10) == 'xJHS-PD_WT'
"""
)

Expand All @@ -702,10 +651,10 @@ def test_numpy(ourtester):
import numpy as np

def test_one():
assert np.random.rand() == 0.417022004702574
assert np.random.rand() == 0.36687834264514585

def test_two():
assert np.random.rand() == 0.417022004702574
assert np.random.rand() == 0.7050715833365834
"""
)

Expand Down Expand Up @@ -769,19 +718,19 @@ def fake_entry_points(*, group):
assert reseed.mock_calls == [
mock.call(1),
mock.call(1),
mock.call(0),
mock.call(1),
mock.call(2),
mock.call(116362448262735926321257785636175308268),
mock.call(116362448262735926321257785636175308269),
mock.call(116362448262735926321257785636175308270),
]

reseed.mock_calls[:] = []
pytester.runpytest_inprocess("--randomly-seed=424242")
assert reseed.mock_calls == [
mock.call(424242),
mock.call(424242),
mock.call(424241),
mock.call(424242),
mock.call(424243),
mock.call(116362448262735926321257785636175732509),
mock.call(116362448262735926321257785636175732510),
mock.call(116362448262735926321257785636175732511),
]


Expand Down