diff --git a/examples/pkg1/test_mod1.py b/examples/pkg1/test_mod1.py index 79307d7..ed3de7d 100644 --- a/examples/pkg1/test_mod1.py +++ b/examples/pkg1/test_mod1.py @@ -13,7 +13,7 @@ def test_sleep2(): @pytest.mark.parametrize( - ("range_max", "other"), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")] + ("range_max", "other"), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")], ) def test_heavy(range_max, other): assert len(["a" * i for i in range(range_max)]) == range_max diff --git a/pytest_monitor/handler.py b/pytest_monitor/handler.py index cbbdecd..e9c1bbb 100644 --- a/pytest_monitor/handler.py +++ b/pytest_monitor/handler.py @@ -91,7 +91,7 @@ def prepare(self): RUN_DATE varchar(64), -- Date of test run SCM_ID varchar(128), -- SCM change id RUN_DESCRIPTION json -);""" +);""", ) cursor.execute( """ @@ -112,7 +112,7 @@ def prepare(self): MEM_USAGE float, -- Max resident memory used. FOREIGN KEY (ENV_H) REFERENCES EXECUTION_CONTEXTS(ENV_H), FOREIGN KEY (SESSION_H) REFERENCES TEST_SESSIONS(SESSION_H) -);""" +);""", ) cursor.execute( """ @@ -129,6 +129,6 @@ def prepare(self): SYSTEM_INFO varchar(256), PYTHON_INFO varchar(512) ); -""" +""", ) self.__cnx.commit() diff --git a/pytest_monitor/pytest_monitor.py b/pytest_monitor/pytest_monitor.py index 9243333..1e4d923 100644 --- a/pytest_monitor/pytest_monitor.py +++ b/pytest_monitor/pytest_monitor.py @@ -47,7 +47,7 @@ def pytest_addoption(parser): " This requires the parameters to be stringifiable.", ) group.addoption( - "--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces" + "--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces", ) group.addoption( "--remote-server", @@ -106,7 +106,7 @@ def pytest_addoption(parser): def pytest_configure(config): config.addinivalue_line( - "markers", "monitor_skip_test: mark test to be executed but not monitored." + "markers", "monitor_skip_test: mark test to be executed but not monitored.", ) config.addinivalue_line( "markers", @@ -147,7 +147,7 @@ def pytest_runtest_setup(item): mark_to_del.append(set_marker) if set_marker in PYTEST_MONITOR_DEPRECATED_MARKERS: warnings.warn( - f"Marker {set_marker} is deprecated. Consider upgrading your tests" + f"Marker {set_marker} is deprecated. Consider upgrading your tests", ) for marker in mark_to_del: @@ -221,7 +221,7 @@ def wrapped_function(): def prof(): m = memory_profiler.memory_usage( - (wrapped_function, ()), max_iterations=1, max_usage=True, retval=True + (wrapped_function, ()), max_iterations=1, max_usage=True, retval=True, ) if isinstance(m[1], BaseException): # Do we have any outcome? raise m[1] @@ -254,7 +254,7 @@ def pytest_sessionstart(session): and session.config.option.mtr_component_prefix ): raise pytest.UsageError( - "Invalid usage: --force-component and --component-prefix are incompatible options!" + "Invalid usage: --force-component and --component-prefix are incompatible options!", ) if ( session.config.option.mtr_no_db @@ -262,7 +262,7 @@ def pytest_sessionstart(session): and not session.config.option.mtr_none ): warnings.warn( - "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." + "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.", ) session.config.option.mtr_none = True component = ( @@ -282,12 +282,12 @@ def pytest_sessionstart(session): None if session.config.option.mtr_none else session.config.option.mtr_remote ) session.pytest_monitor = PyTestMonitorSession( - db=db, remote=remote, component=component, scope=session.config.option.mtr_scope + db=db, remote=remote, component=component, scope=session.config.option.mtr_scope, ) global PYTEST_MONITORING_ENABLED PYTEST_MONITORING_ENABLED = not session.config.option.mtr_none session.pytest_monitor.compute_info( - session.config.option.mtr_description, session.config.option.mtr_tags + session.config.option.mtr_description, session.config.option.mtr_tags, ) yield @@ -330,7 +330,7 @@ def prf_tracer(request): yield ptimes_b = request.session.pytest_monitor.process.cpu_times() if not request.node.monitor_skip_test and getattr( - request.node, "monitor_results", False + request.node, "monitor_results", False, ): item_name = request.node.originalname or request.node.name item_loc = getattr(request.node, PYTEST_MONITOR_ITEM_LOC_MEMBER)[0] diff --git a/pytest_monitor/session.py b/pytest_monitor/session.py index 0b001f8..3958d49 100644 --- a/pytest_monitor/session.py +++ b/pytest_monitor/session.py @@ -51,7 +51,7 @@ def get_env_id(self, env): db, remote = None, None if self.__db: row = self.__db.query( - "SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env.hash(),) + "SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env.hash(),), ) db = row[0] if row else None if self.__remote: @@ -112,14 +112,14 @@ def set_environment_info(self, env): if self.__db and db_id is None: self.__db.insert_execution_context(env) db_id = self.__db.query( - "select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", (env.hash(),) + "select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", (env.hash(),), )[0] if self.__remote and remote_id is None: # We must postpone that to be run at the end of the pytest session. r = requests.post(f"{self.__remote}/contexts/", json=env.to_dict()) if r.status_code != HTTPStatus.CREATED: warnings.warn( - f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating..." + f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating...", ) self.__remote = "" else: @@ -131,7 +131,7 @@ def dummy(): return True memuse = memory_profiler.memory_usage( - (dummy,), max_iterations=1, max_usage=True + (dummy,), max_iterations=1, max_usage=True, ) self.__mem_usage_base = memuse[0] if type(memuse) is list else memuse diff --git a/pytest_monitor/sys_utils.py b/pytest_monitor/sys_utils.py index a389ea7..cc7c8fc 100644 --- a/pytest_monitor/sys_utils.py +++ b/pytest_monitor/sys_utils.py @@ -13,18 +13,17 @@ def collect_ci_info(): d = dict() # Test for jenkins - if "BUILD_NUMBER" in os.environ: - if "BRANCH_NAME" in os.environ or "JOB_NAME" in os.environ: - br = ( - os.environ["BRANCH_NAME"] - if "BRANCH_NAME" in os.environ - else os.environ["JOB_NAME"] - ) - d = dict( - pipeline_branch=br, - pipeline_build_no=os.environ["BUILD_NUMBER"], - __ci__="jenkinsci", - ) + if "BUILD_NUMBER" in os.environ and ("BRANCH_NAME" in os.environ or "JOB_NAME" in os.environ): + br = ( + os.environ["BRANCH_NAME"] + if "BRANCH_NAME" in os.environ + else os.environ["JOB_NAME"] + ) + d = dict( + pipeline_branch=br, + pipeline_build_no=os.environ["BUILD_NUMBER"], + __ci__="jenkinsci", + ) # Test for CircleCI if "CIRCLE_JOB" in os.environ and "CIRCLE_BUILD_NUM" in os.environ: d = dict( @@ -59,7 +58,7 @@ def collect_ci_info(): def determine_scm_revision(): for scm, cmd in (("git", r"git rev-parse HEAD"), ("p4", r"p4 changes -m1 \#have")): p = subprocess.Popen( - cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE + cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) p_out, _ = p.communicate() if p.returncode == 0: @@ -73,7 +72,7 @@ def determine_scm_revision(): def _get_cpu_string(): if platform.system().lower() == "darwin": old_path = os.environ["PATH"] - os.environ["PATH"] = old_path + ":" + "/usr/sbin" + os.environ["PATH"] = f"{old_path}:/usr/sbin" ret = ( subprocess.check_output("sysctl -n machdep.cpu.brand_string", shell=True) .decode() @@ -100,7 +99,7 @@ def __init__(self): self.__cpu_freq_base = psutil.cpu_freq().current except (AttributeError, NotImplementedError, FileNotFoundError): warnings.warn( - "Unable to fetch CPU frequency. Trying to read it from environment.." + "Unable to fetch CPU frequency. Trying to read it from environment..", ) self._read_cpu_freq_from_env() self.__proc_typ = platform.processor() @@ -114,11 +113,11 @@ def __init__(self): def _read_cpu_freq_from_env(self): try: self.__cpu_freq_base = float( - os.environ.get("PYTEST_MONITOR_CPU_FREQ", "0.") + os.environ.get("PYTEST_MONITOR_CPU_FREQ", "0."), ) except (ValueError, TypeError): warnings.warn( - "Wrong type/value while reading cpu frequency from environment. Forcing to 0.0." + "Wrong type/value while reading cpu frequency from environment. Forcing to 0.0.", ) self.__cpu_freq_base = 0.0 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..29158bd --- /dev/null +++ b/setup.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import pathlib +import re + +from setuptools import find_packages, setup + + +def read_version(): + p = pathlib.Path(__file__) + p = p.parent / "pytest_monitor" / "__init__.py" + with p.open("r") as f: + for line in f: + if line.startswith("__version__"): + line = line.split("=")[1].strip() + match = re.match(r"^['\"](\d+\.\d+\.\d+\w*)['\"]", line) + if match: + return match.group(1) + raise ValueError("Unable to compute version") + + +def read(fname): + file_path = pathlib.Path(__file__).parent / fname + with file_path.open("r", encoding="utf-8") as f: + return f.read() + + +setup( + name="pytest-monitor", + version=read_version(), + author="Jean-Sébastien Dieu", + author_email="jean-sebastien.dieu@cfm.fr", + maintainer="Jean-Sébastien Dieu", + maintainer_email="jean-sebastien.dieu@cfm.fr", + license="MIT", + project_urls=dict( + Source="https://github.com/CFMTech/pytest-monitor", + Tracker="https://github.com/CFMTech/pytest-monitor/issues", + ), + url="https://pytest-monitor.readthedocs.io/", + description="Pytest plugin for analyzing resource usage.", + long_description=read("README.rst"), + packages=find_packages(".", exclude=("tests", "example", "docs")), + python_requires=">=3.5", + install_requires=[ + "pytest", + "requests", + "psutil>=5.1.0", + "memory_profiler>=0.58", + "wheel", + ], + options={"bdist_wheel": {"universal": False}}, + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Framework :: Pytest", + "Intended Audience :: Developers", + "Topic :: Software Development :: Testing", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Operating System :: OS Independent", + "License :: OSI Approved :: MIT License", + ], + entry_points={ + "pytest11": [ + "monitor = pytest_monitor.pytest_monitor", + ], + }, +) diff --git a/tests/test_monitor.py b/tests/test_monitor.py index f5ff765..8ca11d9 100644 --- a/tests/test_monitor.py +++ b/tests/test_monitor.py @@ -19,7 +19,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -40,7 +40,7 @@ def test_ok(): assert 1 == len(cursor.fetchall()) # current test cursor = db.cursor() tags = json.loads( - cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0] + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0], ) assert "description" not in tags assert "version" in tags @@ -60,12 +60,12 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args result = testdir.runpytest( - "-vv", "--description", '"Test"', "--tag", "version=12.3.5" + "-vv", "--description", '"Test"', "--tag", "version=12.3.5", ) # fnmatch_lines does an assertion internally @@ -83,7 +83,7 @@ def test_ok(): assert 1 == len(cursor.fetchall()) # current test cursor = db.cursor() tags = json.loads( - cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0] + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0], ) assert "description" in tags assert tags["description"] == '"Test"' @@ -103,7 +103,7 @@ def test_monitor_pytest_skip_marker(testdir): def test_skipped(): assert True -""" +""", ) # run pytest with the following cmd args @@ -139,7 +139,7 @@ def a_fixture(): def test_skipped(a_fixture): assert True -""" +""", ) # run pytest with the following cmd args @@ -175,7 +175,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 - """ + """, ) # run pytest with the following cmd args @@ -183,7 +183,7 @@ def test_ok(): # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines( - ["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"] + ["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"], ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" @@ -215,7 +215,7 @@ def test_ok_not_monitored(): def test_another_function_ok_not_monitored(): assert True -""" +""", ) # run pytest with the following cmd args @@ -226,7 +226,7 @@ def test_another_function_ok_not_monitored(): [ "*::test_ok_not_monitored PASSED*", "*::test_another_function_ok_not_monitored PASSED*", - ] + ], ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" @@ -256,7 +256,7 @@ def test_not_monitored(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -299,7 +299,7 @@ def test_monitored(): x = ['a' *i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -307,7 +307,7 @@ def test_monitored(): # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines( - ["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"] + ["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"], ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" @@ -342,7 +342,7 @@ def test_that(): x = ['a' *i for i in range(100)] assert len(x) == 100 -""" +""", ) wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." @@ -367,7 +367,7 @@ def test_monitor_basic_output(testdir): """ def test_it(): print('Hello World') - """ + """, ) wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." @@ -394,7 +394,7 @@ def run(a, b): 33 """ return a + b - ''' + ''', ) # run pytest with the following cmd args diff --git a/tests/test_monitor_component.py b/tests/test_monitor_component.py index e176109..c872a35 100644 --- a/tests/test_monitor_component.py +++ b/tests/test_monitor_component.py @@ -16,7 +16,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -36,7 +36,7 @@ def test_ok(): cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) cursor.execute( - "SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';" + "SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';", ) assert not len(cursor.fetchall()) @@ -54,7 +54,7 @@ def test_force_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -75,7 +75,7 @@ def test_force_ok(): assert 1 == len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';", ) assert 1 == len(cursor.fetchall()) @@ -94,7 +94,7 @@ def test_prefix_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -115,12 +115,12 @@ def test_prefix_ok(): assert 1 == len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';", ) assert not len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';" + " WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';", ) assert 1 == len(cursor.fetchall()) @@ -138,7 +138,7 @@ def test_prefix_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -159,6 +159,6 @@ def test_prefix_ok(): assert 1 == len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';", ) assert 1 == len(cursor.fetchall()) diff --git a/tests/test_monitor_context.py b/tests/test_monitor_context.py index 943d289..c1bcd68 100644 --- a/tests/test_monitor_context.py +++ b/tests/test_monitor_context.py @@ -77,10 +77,10 @@ def test_force_cpu_freq(testdir): @pytest.mark.parametrize( - "effect", [AttributeError, NotImplementedError, FileNotFoundError] + "effect", [AttributeError, NotImplementedError, FileNotFoundError], ) def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback( - effect, testdir + effect, testdir, ): """Make sure that pytest-monitor fallback takes value of CPU FREQ from special env var""" # create a temporary pytest test module @@ -102,7 +102,7 @@ def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback( @pytest.mark.parametrize( - "effect", [AttributeError, NotImplementedError, FileNotFoundError] + "effect", [AttributeError, NotImplementedError, FileNotFoundError], ) def test_when_cpu_freq_cannot_fetch_frequency_set_freq_to_0(effect, testdir): """Make sure that pytest-monitor's fallback mechanism is efficient enough.""" diff --git a/tests/test_monitor_in_ci.py b/tests/test_monitor_in_ci.py index f3fe9e8..cb04652 100644 --- a/tests/test_monitor_in_ci.py +++ b/tests/test_monitor_in_ci.py @@ -17,7 +17,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) envs = dict() @@ -73,7 +73,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -159,7 +159,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -236,7 +236,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -313,7 +313,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -390,7 +390,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match):