diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index b8303342a254c..66e7c997f4fad 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -44,7 +44,7 @@ jobs: with: python-version: '3.12' - name: Install tox - run: pip install tox==4.53.1 + run: pip install tox==4.26.0 - name: Setup tox environment run: tox run -e ${{ env.TOXENV }} --notest - name: Test diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b06f181746c35..5242739f8f846 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -59,12 +59,6 @@ jobs: toxenv: py tox_extra_args: "-n 4" test_mypyc: true - - name: Test suite with py315-ubuntu, mypyc-compiled - python: '3.15' - os: ubuntu-24.04-arm - toxenv: py - tox_extra_args: "-n 4" - test_mypyc: true - name: Test suite with py314t-ubuntu, mypyc-compiled python: '3.14t' os: ubuntu-24.04-arm @@ -202,7 +196,6 @@ jobs: if: ${{ !(matrix.debug_build || endsWith(matrix.python, '-dev')) }} with: python-version: ${{ matrix.python }} - allow-prereleases: true - name: Install tox run: | @@ -213,7 +206,7 @@ jobs: echo debug build; python -c 'import sysconfig; print(bool(sysconfig.get_config_var("Py_DEBUG")))' echo os.cpu_count; python -c 'import os; print(os.cpu_count())' echo os.sched_getaffinity; python -c 'import os; print(len(getattr(os, "sched_getaffinity", lambda *args: [])(0)))' - pip install tox==4.53.1 + pip install tox==4.26.0 - name: Compiled with mypyc if: ${{ matrix.test_mypyc }} @@ -278,7 +271,7 @@ jobs: default: 3.11.1 command: python -c "import platform; print(f'{platform.architecture()=} {platform.machine()=}');" - name: Install tox - run: pip install tox==4.53.1 + run: pip install tox==4.26.0 - name: Setup tox environment run: tox run -e py --notest - name: Test diff --git a/CHANGELOG.md b/CHANGELOG.md index d01af76edf0e0..cf5548d0bcaae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,99 +2,7 @@ ## Next Release -## Mypy 2.1 - -We’ve just uploaded mypy 2.1.0 to the Python Package Index ([PyPI](https://pypi.org/project/mypy/)). -Mypy is a static type checker for Python. This release includes new features, performance -improvements and bug fixes. You can install it as follows: - - python3 -m pip install -U mypy - -You can read the full documentation for this release on [Read the Docs](http://mypy.readthedocs.io). - -### librt.vecs: Fast Growable Array Type for Mypyc - -The new `librt.vecs` module provides an efficient growable array type `vec` that is -optimized for mypyc use. It provides fast, packed arrays with integer and floating point -value types, which can be **several times faster** than `list`, and tens of times faster -than `array.array` in code compiled using mypyc. It also supports nested `vec` objects and -non-value-type items, such as ``vec[vec[str]]``. - -Refer to the [documentation](https://mypyc.readthedocs.io/en/latest/librt_vecs.html) for -the details. - -Contributed by Jukka Lehtosalo. - -### librt.random: Fast Pseudo-Random Number Generation - -The new `librt.random` module provides fast pseudo-random number generation that is -optimized for code compiled using mypyc. It can be 3x to 10x faster than the stdlib -`random` module in compiled code. - -Refer to the [documentation](https://mypyc.readthedocs.io/en/latest/librt_random.html) for -the details. - -Contributed by Jukka Lehtosalo (PR [21433](https://github.com/python/mypy/pull/21433)). - -### Mypyc Improvements - -- Make compilation order with multiple files consistent (Piotr Sawicki, PR [21419](https://github.com/python/mypy/pull/21419)) -- Fix crash on accessing `StopAsyncIteration` (Piotr Sawicki, PR [21406](https://github.com/python/mypy/pull/21406)) -- Fix incremental compilation with `separate` flag (Vaggelis Danias, PR [21299](https://github.com/python/mypy/pull/21299)) - -### Fixes to Crashes - -- Fix crash on partial type with `--allow-redefinition` and `global` declaration (Jukka Lehtosalo, PR [21428](https://github.com/python/mypy/pull/21428)) -- Fix broken awaitable generator patching (Ivan Levkivskyi, PR [21435](https://github.com/python/mypy/pull/21435)) - -### Changes to Messages - -- Fix function call error message for small number of arguments (sobolevn, PR [21432](https://github.com/python/mypy/pull/21432)) - -### Other Notable Fixes and Improvements - -- Rely on typeshed stubs for `slice` typing (Ivan Levkivskyi, PR [21401](https://github.com/python/mypy/pull/21401)) -- Improve negative narrowing for membership checks on tuples (Shantanu, PR [21456](https://github.com/python/mypy/pull/21456)) -- Narrow match captures based on previous cases (Shantanu, PR [21405](https://github.com/python/mypy/pull/21405)) -- Fix nondeterminism in overload resolution (Shantanu, PR [21455](https://github.com/python/mypy/pull/21455)) -- Respect file config comments for stale modules (Adam Turner, PR [21444](https://github.com/python/mypy/pull/21444)) -- Fix JSON output mode for syntax errors in parallel mode (Adam Turner, PR [21434](https://github.com/python/mypy/pull/21434)) -- Fix type variable with values as a supertype (Ivan Levkivskyi, PR [21431](https://github.com/python/mypy/pull/21431)) -- Add support for configuring `--num-workers` with an environment variable (Kevin Kannammalil, PR [21407](https://github.com/python/mypy/pull/21407)) -- Respect JSON output mode for syntax errors (Adam Turner, PR [21386](https://github.com/python/mypy/pull/21386)) -- Analyze `TypedDict` decorators (Pranav Manglik, PR [21267](https://github.com/python/mypy/pull/21267)) - -### Typeshed Updates - -Please see [git log](https://github.com/python/typeshed/commits/main?after=e4d32e01bee44241a5e7c33298c261175b9f1bdb+0&branch=main&path=stdlib) for full list of standard library typeshed stub changes. - -### Acknowledgements - -Thanks to all mypy contributors who contributed to this release: - -- Adam Turner -- Ivan Levkivskyi -- Jukka Lehtosalo -- Kevin Kannammalil -- Piotr Sawicki -- Shantanu -- sobolevn -- Vaggelis Danias - -I’d also like to thank my employer, Dropbox, for supporting mypy development. - -## Mypy 2.0 - -We’ve just uploaded mypy 2.0.0 to the Python Package Index ([PyPI](https://pypi.org/project/mypy/)). -Mypy is a static type checker for Python. This release includes new features, performance -improvements and bug fixes. There are also changes to options and defaults. -You can install it as follows: - - python3 -m pip install -U mypy - -You can read the full documentation for this release on [Read the Docs](http://mypy.readthedocs.io). - -### Enable `--local-partial-types` by Default +### Enabling `--local-partial-types` by Default This flag affects the inference of types based on assignments in other scopes. For now, explicitly disabling this continues to be supported, but this support will be removed @@ -103,7 +11,7 @@ in mypy, like the daemon or the new implementation of flexible redefinitions. Contributed by Ivan Levkivskyi, Jukka Lehtosalo, Shantanu in [PR 21163](https://github.com/python/mypy/pull/21163). -### Enable `--strict-bytes` by Default +### Enabling `--strict-bytes` by Default Per [PEP 688](https://peps.python.org/pep-0688), mypy no longer treats `bytearray` and `memoryview` values as assignable to the `bytes` type. @@ -164,25 +72,6 @@ in future mypy releases. Contributed by Ivan Levkivskyi, with additional contributions from Emma Smith and Jukka Lehtosalo. -Recent related changes since the last release: - -- Freeze garbage collection in parallel workers for 4-5% speedup (Ivan Levkivskyi, PR [21302](https://github.com/python/mypy/pull/21302)) -- Expose `--num-workers` and `--native-parser` (Ivan Levkivskyi, PR [21387](https://github.com/python/mypy/pull/21387)) -- Split type checking into interface and implementation in parallel workers (Ivan Levkivskyi, PR [21119](https://github.com/python/mypy/pull/21119)) -- Batch module groups for parallel processing (Ivan Levkivskyi, PR [21287](https://github.com/python/mypy/pull/21287)) -- Optimize parallel worker startup (Ivan Levkivskyi, PR [21203](https://github.com/python/mypy/pull/21203)) -- Parse files in parallel when possible (Ivan Levkivskyi, PR [21175](https://github.com/python/mypy/pull/21175)) -- Use parallel parsing at all stages (Ivan Levkivskyi, PR [21266](https://github.com/python/mypy/pull/21266)) -- Fix sequential bottleneck in parallel parsing (Jukka Lehtosalo, PR [21291](https://github.com/python/mypy/pull/21291)) -- Fail fast when a user tries to generate reports with parallel workers (Ivan Levkivskyi, PR [21341](https://github.com/python/mypy/pull/21341)) -- Partially support old NumPy plugin in parallel type checking (Ivan Levkivskyi, PR [21324](https://github.com/python/mypy/pull/21324)) -- Handle reachability consistently in parallel type checking (Ivan Levkivskyi, PR [21322](https://github.com/python/mypy/pull/21322)) -- Always respect `@no_type_check` in parallel type checking (Ivan Levkivskyi, PR [21320](https://github.com/python/mypy/pull/21320)) -- Minor fixes in parallel checking (Ivan Levkivskyi, PR [21319](https://github.com/python/mypy/pull/21319)) -- Fix plugin logic in parallel type checking (Ivan Levkivskyi, PR [21252](https://github.com/python/mypy/pull/21252)) -- Fix Windows IPC race condition when using parallel checking (Jukka Lehtosalo, PR [21228](https://github.com/python/mypy/pull/21228)) -- Report parallel worker exit status on receive failure (Jukka Lehtosalo, PR [21224](https://github.com/python/mypy/pull/21224)) - ### Drop Support for Targeting Python 3.9 Mypy no longer supports type checking code with `--python-version 3.9`. @@ -222,114 +111,6 @@ the details. Contributed by Jukka Lehtosalo. -### Mypyc Improvements - -- Document `librt.time` (Jukka Lehtosalo, PR [21372](https://github.com/python/mypy/pull/21372)) -- Mark `librt.time.time()` non-experimental (Ivan Levkivskyi, PR [21310](https://github.com/python/mypy/pull/21310)) -- Fix `librt.time` primitive now that it is no longer experimental (Ivan Levkivskyi, PR [21318](https://github.com/python/mypy/pull/21318)) -- Fix `librt` API/ABI version checks (Jukka Lehtosalo, PR [21311](https://github.com/python/mypy/pull/21311)) -- Generate more type methods for classes with attribute dictionaries (Piotr Sawicki, PR [21290](https://github.com/python/mypy/pull/21290)) -- Fix reference counting for tuple items during deallocation (Shantanu, PR [21245](https://github.com/python/mypy/pull/21245)) -- Release new instances when `__init__` raises (Shantanu, PR [21248](https://github.com/python/mypy/pull/21248)) -- Fix `@property` getter memory leak (Vaggelis Danias, PR [21230](https://github.com/python/mypy/pull/21230)) -- Fix semantics for walrus expression in tuple (Shantanu, PR [21249](https://github.com/python/mypy/pull/21249)) -- Fix crash on import errors during cleanup (Shantanu, PR [21247](https://github.com/python/mypy/pull/21247)) -- Fix reference leak in str index (Shantanu, PR [21251](https://github.com/python/mypy/pull/21251)) -- Fix memory leak in integer true division (Shantanu, PR [21246](https://github.com/python/mypy/pull/21246)) -- Fix reference leaks in `list.clear()`/`dict.clear()` (Shantanu, PR [21244](https://github.com/python/mypy/pull/21244)) -- Resolve type aliases in function specialization (esarp, PR [21233](https://github.com/python/mypy/pull/21233)) -- Report an error if an acyclic class inherits from non-acyclic (Piotr Sawicki, PR [21227](https://github.com/python/mypy/pull/21227)) -- Fix `b64decode` to match new CPython behavior (Piotr Sawicki, PR [21200](https://github.com/python/mypy/pull/21200)) - -### Fixes to Crashes - -- Fix crash when a file does not exist during semantic analysis (Ivan Levkivskyi, PR [21379](https://github.com/python/mypy/pull/21379)) -- Fix parallel worker crash on syntax error (Ivan Levkivskyi, PR [21202](https://github.com/python/mypy/pull/21202)) - -### Changes to Messages - -- Improve error messages for unexpected keyword arguments in overloaded functions (Kevin Kannammalil, PR [20592](https://github.com/python/mypy/pull/20592)) -- Don't suggest `Foo[...]` when `Foo(arg=...)` is used in annotation (Yosof Badr, PR [21238](https://github.com/python/mypy/pull/21238)) -- Mention what codes are actually ignored in "not covered by type: ignore comment" note (wyattscarpenter, PR [19904](https://github.com/python/mypy/pull/19904)) -- Improve error messages when positional argument is missing (Kevin Kannammalil, PR [20591](https://github.com/python/mypy/pull/20591)) -- Improve "name is not defined" errors with fuzzy matching (Kevin Kannammalil, PR [20693](https://github.com/python/mypy/pull/20693)) -- Add suggestions for misspelled module imports (Kevin Kannammalil, PR [20695](https://github.com/python/mypy/pull/20695)) - -### Performance Improvements - -- Replace `NamedTuple` with faster regular classes in hot paths (Shantanu, PR [21326](https://github.com/python/mypy/pull/21326)) -- Avoid calling best-match suggestions unless the message is shown (Ivan Levkivskyi, PR [21307](https://github.com/python/mypy/pull/21307)) -- Order cases in native parser based on AST node frequency (Jukka Lehtosalo, PR [21219](https://github.com/python/mypy/pull/21219)) - -### Stubtest Improvements - -- Basic support for unpack kwargs (Shantanu, PR [21024](https://github.com/python/mypy/pull/21024)) -- Fix false positive for properties with a deleter (Pranav Manglik, PR [21259](https://github.com/python/mypy/pull/21259)) - -### Documentation Updates - -- Rename "value restriction" to "value-constrained type variable" (Leo Ji, PR [21112](https://github.com/python/mypy/pull/21112)) -- Clarify that invariant-by-default applies to legacy `TypeVar` syntax (Leo Ji, PR [21108](https://github.com/python/mypy/pull/21108)) - -### Improvements to the Native Parser - -The new native parser is still experimental. - -- Make new parser consistent with the old one (Ivan Levkivskyi, PR [21377](https://github.com/python/mypy/pull/21377)) -- Support `--package-root` with the native parser (Ivan Levkivskyi, PR [21321](https://github.com/python/mypy/pull/21321)) -- Improve call expressions in type annotations with the native parser (Jukka Lehtosalo, PR [21300](https://github.com/python/mypy/pull/21300)) -- Depend on `ast-serialize` by default (Jukka Lehtosalo, PR [21297](https://github.com/python/mypy/pull/21297)) - -### Other Notable Fixes and Improvements - -- Fix narrowing for `AbstractSet` and `Mapping` (Shantanu, PR [21352](https://github.com/python/mypy/pull/21352)) -- Preserve gradual guarantee when narrowing `Any` union via equality (Shantanu, PR [21368](https://github.com/python/mypy/pull/21368)) -- Make type variable upper bound narrowing symmetric (Ivan Levkivskyi, PR [21350](https://github.com/python/mypy/pull/21350)) -- Behave consistently when type-checking a stub package directly (Ivan Levkivskyi, PR [21330](https://github.com/python/mypy/pull/21330)) -- Add support for `Final[...]` in dataclasses (Ivan Levkivskyi, PR [21334](https://github.com/python/mypy/pull/21334)) -- Narrow more sequence parents (Shantanu, PR [21327](https://github.com/python/mypy/pull/21327)) -- Better narrowing for enums and other types with known equality (Shantanu, PR [21281](https://github.com/python/mypy/pull/21281)) -- Fix pathspec error (Ivan Levkivskyi, PR [21296](https://github.com/python/mypy/pull/21296)) -- Use sharding for the SQLite cache (Jukka Lehtosalo, PR [21292](https://github.com/python/mypy/pull/21292)) -- Limit type inference context fallback to the walrus operator only (Ivan Levkivskyi, PR [21294](https://github.com/python/mypy/pull/21294)) -- Support `.git/info/exclude` for `--exclude-gitignore` (RogerJinIS, PR [21286](https://github.com/python/mypy/pull/21286)) -- Let `--allow-redefinition` widen a global in a function with `None` initialization (Jukka Lehtosalo, PR [21285](https://github.com/python/mypy/pull/21285)) -- Delete Python 2 extra (Shantanu, PR [18374](https://github.com/python/mypy/pull/18374)) -- No longer narrow final globals in functions (Ivan Levkivskyi, PR [21241](https://github.com/python/mypy/pull/21241)) -- Narrow unions containing `Any` in conditional branches (Shantanu, PR [21231](https://github.com/python/mypy/pull/21231)) -- Propagate narrowing within chained comparisons (Shantanu, PR [21160](https://github.com/python/mypy/pull/21160)) -- Add proper lazy deserialization (Ivan Levkivskyi, PR [21198](https://github.com/python/mypy/pull/21198)) -- Add `install_types` to options affecting cache (Brian Schubert, PR [21070](https://github.com/python/mypy/pull/21070)) -- Narrow `Any` in conditional type checks (Shantanu, PR [21167](https://github.com/python/mypy/pull/21167)) -- Fix exception handler target location in new parser (Ivan Levkivskyi, PR [21185](https://github.com/python/mypy/pull/21185)) -- Improve traceback display (Shantanu, PR [21155](https://github.com/python/mypy/pull/21155)) -- Include two more files in the sdist: `CREDITS` and the typeshed `README` (Michael R. Crusoe, PR [21131](https://github.com/python/mypy/pull/21131)) - -### Typeshed Updates - -Please see [git log](https://github.com/python/typeshed/commits/main?after=c5e47faeda2cf9d233f91bc1dc95814b0cc7ccba+0&branch=main&path=stdlib) for full list of standard library typeshed stub changes. - -### Acknowledgements - -Thanks to all mypy contributors who contributed to this release: -- Brian Schubert -- Ethan Sarp -- Ivan Levkivskyi -- Jukka Lehtosalo -- Kevin Kannammalil -- Leo Ji -- Marc Mueller -- Michael R. Crusoe -- Piotr Sawicki -- Pranav Manglik -- RogerJinIS -- Shantanu -- Vaggelis Danias -- wyattscarpenter -- Yosof Badr - -I’d also like to thank my employer, Dropbox, for supporting mypy development. - ## Mypy 1.20 We’ve just uploaded mypy 1.20.0 to the Python Package Index ([PyPI](https://pypi.org/project/mypy/)). diff --git a/docs/source/error_code_list2.rst b/docs/source/error_code_list2.rst index 5dfe483d2cfe2..36cf05d4f3f85 100644 --- a/docs/source/error_code_list2.rst +++ b/docs/source/error_code_list2.rst @@ -550,7 +550,7 @@ Example: Check that overrides of mutable attributes are safe [mutable-override] ---------------------------------------------------------------------- -``mutable-override`` will enable the check for unsafe overrides of mutable attributes. +`mutable-override` will enable the check for unsafe overrides of mutable attributes. For historical reasons, and because this is a relatively common pattern in Python, this check is not enabled by default. The example below is unsafe, and will be flagged when this error code is enabled: diff --git a/misc/typeshed_patches/0001-Revert-operator-changes.patch b/misc/typeshed_patches/0001-Revert-operator-changes.patch deleted file mode 100644 index 71b0ae9b47abd..0000000000000 --- a/misc/typeshed_patches/0001-Revert-operator-changes.patch +++ /dev/null @@ -1,91 +0,0 @@ -From 7f38b86464d59188a87ff8c9913c257e788a2f0b Mon Sep 17 00:00:00 2001 -From: hauntsaninja -Date: Wed, 6 May 2026 19:49:41 -0700 -Subject: [PATCH] Revert operator changes - ---- - mypy/typeshed/stdlib/_operator.pyi | 51 ++++++++---------------------- - 1 file changed, 14 insertions(+), 37 deletions(-) - -diff --git a/mypy/typeshed/stdlib/_operator.pyi b/mypy/typeshed/stdlib/_operator.pyi -index e7d85f811..8c705065b 100644 ---- a/mypy/typeshed/stdlib/_operator.pyi -+++ b/mypy/typeshed/stdlib/_operator.pyi -@@ -1,15 +1,5 @@ - import sys --from _typeshed import ( -- SupportsAdd, -- SupportsGetItem, -- SupportsMod, -- SupportsMul, -- SupportsRAdd, -- SupportsRMod, -- SupportsRMul, -- SupportsRSub, -- SupportsSub, --) -+from _typeshed import SupportsGetItem - from collections.abc import Callable, Container, Iterable, MutableMapping, MutableSequence, Sequence - from operator import attrgetter as attrgetter, itemgetter as itemgetter, methodcaller as methodcaller - from typing import Any, AnyStr, Protocol, SupportsAbs, SupportsIndex, TypeVar, overload, type_check_only -@@ -18,7 +8,6 @@ from typing_extensions import ParamSpec, TypeAlias, TypeIs - _R = TypeVar("_R") - _T = TypeVar("_T") - _T_co = TypeVar("_T_co", covariant=True) --_T_contra = TypeVar("_T_contra", contravariant=True) - _K = TypeVar("_K") - _V = TypeVar("_V") - _P = ParamSpec("_P") -@@ -69,36 +58,24 @@ def truth(a: object, /) -> bool: ... - def is_(a: object, b: object, /) -> bool: ... - def is_not(a: object, b: object, /) -> bool: ... - def abs(a: SupportsAbs[_T], /) -> _T: ... --@overload --def add(a: SupportsAdd[_T_contra, _T_co], b: _T_contra, /) -> _T_co: ... --@overload --def add(a: _T_contra, b: SupportsRAdd[_T_contra, _T_co], /) -> _T_co: ... --def and_(a, b, /): ... --def floordiv(a, b, /): ... -+def add(a: Any, b: Any, /) -> Any: ... -+def and_(a: Any, b: Any, /) -> Any: ... -+def floordiv(a: Any, b: Any, /) -> Any: ... - def index(a: SupportsIndex, /) -> int: ... - def inv(a: _SupportsInversion[_T_co], /) -> _T_co: ... - def invert(a: _SupportsInversion[_T_co], /) -> _T_co: ... --def lshift(a, b, /): ... --@overload --def mod(a: SupportsMod[_T_contra, _T_co], b: _T_contra, /) -> _T_co: ... --@overload --def mod(a: _T_contra, b: SupportsRMod[_T_contra, _T_co], /) -> _T_co: ... --@overload --def mul(a: SupportsMul[_T_contra, _T_co], b: _T_contra, /) -> _T_co: ... --@overload --def mul(a: _T_contra, b: SupportsRMul[_T_contra, _T_co], /) -> _T_co: ... --def matmul(a, b, /): ... -+def lshift(a: Any, b: Any, /) -> Any: ... -+def mod(a: Any, b: Any, /) -> Any: ... -+def mul(a: Any, b: Any, /) -> Any: ... -+def matmul(a: Any, b: Any, /) -> Any: ... - def neg(a: _SupportsNeg[_T_co], /) -> _T_co: ... --def or_(a, b, /): ... -+def or_(a: Any, b: Any, /) -> Any: ... - def pos(a: _SupportsPos[_T_co], /) -> _T_co: ... --def pow(a, b, /): ... --def rshift(a, b, /): ... --@overload --def sub(a: SupportsSub[_T_contra, _T_co], b: _T_contra, /) -> _T_co: ... --@overload --def sub(a: _T_contra, b: SupportsRSub[_T_contra, _T_co], /) -> _T_co: ... --def truediv(a, b, /): ... --def xor(a, b, /): ... -+def pow(a: Any, b: Any, /) -> Any: ... -+def rshift(a: Any, b: Any, /) -> Any: ... -+def sub(a: Any, b: Any, /) -> Any: ... -+def truediv(a: Any, b: Any, /) -> Any: ... -+def xor(a: Any, b: Any, /) -> Any: ... - def concat(a: Sequence[_T], b: Sequence[_T], /) -> Sequence[_T]: ... - def contains(a: Container[object], b: object, /) -> bool: ... - def countOf(a: Iterable[object], b: object, /) -> int: ... --- -2.53.0 - diff --git a/mypy-requirements.txt b/mypy-requirements.txt index 0216f47852baa..27c76a0f3f6a8 100644 --- a/mypy-requirements.txt +++ b/mypy-requirements.txt @@ -5,5 +5,5 @@ typing_extensions>=4.14.0; python_version>='3.15' mypy_extensions>=1.0.0 pathspec>=1.0.0 tomli>=1.1.0; python_version<'3.11' -librt>=0.11.0; platform_python_implementation != 'PyPy' +librt>=0.10.0; platform_python_implementation != 'PyPy' ast-serialize>=0.3.0,<1.0.0 diff --git a/mypy/build.py b/mypy/build.py index 8d5db0bab8dfa..21a5559b329a3 100644 --- a/mypy/build.py +++ b/mypy/build.py @@ -4769,13 +4769,13 @@ def process_stale_scc(graph: Graph, ascc: SCC, manager: BuildManager) -> None: t2 = time.time() stale = scc - # Parse before verify_dependencies so that inline config comments - # (e.g. "# mypy: disable-error-code") are applied to options. - manager.parse_all([graph[id] for id in stale], post_parse=False) for id in stale: # Re-generate import errors in case this module was loaded from the cache. if graph[id].meta: graph[id].verify_dependencies(suppressed_only=True) + # We may already have parsed the modules, or not. + # If the former, parse_file() is a no-op. + manager.parse_all([graph[id] for id in stale], post_parse=False) if "typing" in scc: # For historical reasons we need to manually add typing aliases # for built-in generic collections, see docstring of diff --git a/mypy/build_worker/worker.py b/mypy/build_worker/worker.py index e9f7a026037e4..4a0bac99b440b 100644 --- a/mypy/build_worker/worker.py +++ b/mypy/build_worker/worker.py @@ -113,12 +113,7 @@ def main(argv: list[str]) -> None: fscache = FileSystemCache() fscache.set_package_root(options.package_root) cached_read = fscache.read - error_formatter = None if options.output is None else OUTPUT_CHOICES.get(options.output) - errors = Errors( - options, - read_source=lambda path: read_py_file(path, cached_read), - error_formatter=error_formatter, - ) + errors = Errors(options, read_source=lambda path: read_py_file(path, cached_read)) ctx = ServerContext(options, disable_error_code, enable_error_code, errors, fscache) try: diff --git a/mypy/checker.py b/mypy/checker.py index e7c546628bc5e..58b7fedf55f20 100644 --- a/mypy/checker.py +++ b/mypy/checker.py @@ -5721,12 +5721,11 @@ def visit_decorator_inner( # Fix the type if decorated with `@types.coroutine` or `@asyncio.coroutine`. defn = e.func if defn.is_awaitable_coroutine: - typ = self.function_type(defn) - assert isinstance(typ, CallableType) + assert isinstance(defn.type, CallableType) # Update the return type to AwaitableGenerator (unless we already did). # Note, this doesn't exist in typing.py, only in typing.pyi. - if not is_named_instance(typ.ret_type, "typing.AwaitableGenerator"): - t = typ.ret_type + if not is_named_instance(defn.type.ret_type, "typing.AwaitableGenerator"): + t = defn.type.ret_type c = defn.is_coroutine ty = self.get_generator_yield_type(t, c) tc = self.get_generator_receive_type(t, c) @@ -5735,7 +5734,8 @@ def visit_decorator_inner( else: tr = self.get_generator_return_type(t, c) ret_type = self.named_generic_type("typing.AwaitableGenerator", [ty, tc, tr, t]) - defn.type = typ.copy_modified(ret_type=ret_type) + typ = defn.type.copy_modified(ret_type=ret_type) + defn.type = typ # Type check initialization expressions as part of top-level. if not self.can_skip_diagnostics: @@ -6771,45 +6771,25 @@ def comparison_type_narrowing_helper(self, node: ComparisonExpr) -> tuple[TypeMa else_map = {} if left_index in narrowable_operand_index_to_hash: - p_iterable_type = get_proper_type(iterable_type) - if ( - isinstance(p_iterable_type, TupleType) - and find_unpack_in_list(p_iterable_type.items) is None - ): - # For some tuples, we can do negative narrowing, e.g. `x not in (None,)` - all_if_maps = [] - all_else_maps = [] - for known_item in p_iterable_type.items: - # Match the should_coerce_literals logic from narrow_type_by_identity_equality - p_known_item = get_proper_type(known_item) - if is_literal_type_like(p_known_item) or ( - isinstance(p_known_item, Instance) and p_known_item.type.is_enum - ): - known_item = coerce_to_literal(known_item) - if_map, else_map = self.narrow_type_by_identity_equality( - "==", - operands=[operands[left_index], operands[right_index]], - operand_types=[item_type, known_item], - expr_indices=[0, 1], - narrowable_indices={0}, - ) - all_if_maps.append(if_map) - if is_singleton_equality_type(get_proper_type(known_item)): - all_else_maps.append(else_map) - if_map = reduce_or_conditional_type_maps(all_if_maps) - else_map = reduce_and_conditional_type_maps(all_else_maps, use_meet=True) - else: - collection_item_type = get_proper_type(builtin_item_type(iterable_type)) - if collection_item_type is not None: - if_map, else_map = self.narrow_type_by_identity_equality( - "==", - operands=[operands[left_index], operands[right_index]], - operand_types=[item_type, collection_item_type], - expr_indices=[0, 1], - narrowable_indices={0}, + collection_item_type = get_proper_type(builtin_item_type(iterable_type)) + if collection_item_type is not None: + if_map, else_map = self.narrow_type_by_identity_equality( + "==", + operands=[operands[left_index], operands[right_index]], + operand_types=[item_type, collection_item_type], + expr_indices=[0, 1], + narrowable_indices={0}, + ) + if else_map and not ( + isinstance(p_typ := get_proper_type(iterable_type), TupleType) + and all( + is_singleton_equality_type(get_proper_type(item)) + for item in p_typ.items ) - # We can't do negative narrowing, since e.g. the container could - # just be empty. + ): + # In general, we can't do negative narrowing, since e.g. the container + # could just be empty. However, we can do negative narrowing for some + # tuples e.g. `x not in (None,)` else_map = {} if right_index in narrowable_operand_index_to_hash: @@ -8465,9 +8445,7 @@ def visit_global_decl(self, o: GlobalDecl, /) -> None: n.node = sym.node n.kind = GDEF n.fullname = sym.node.fullname - typ = get_declaration(n) - if typ is not None: - self.binder.assign_type(n, typ, typ) + self.binder.assign_type(n, sym.node.type, sym.node.type) class TypeCheckerAsSemanticAnalyzer(SemanticAnalyzerCoreInterface): diff --git a/mypy/checkexpr.py b/mypy/checkexpr.py index 48ea7ab51f61b..123c5f821ed29 100644 --- a/mypy/checkexpr.py +++ b/mypy/checkexpr.py @@ -1787,7 +1787,6 @@ def check_callable_call( might_have_shifted_args = ( not self.msg.prefer_simple_messages() - and len(args) >= 2 # see gh-21427 and all(k == ARG_POS for k in callee.arg_kinds) and all(k == ARG_POS for k in arg_kinds) and len(arg_kinds) == len(callee.arg_kinds) - 1 @@ -3243,7 +3242,6 @@ def combine_function_signatures(self, types: list[ProperType]) -> AnyType | Call assert types, "Trying to merge no callables" if not all(isinstance(c, CallableType) for c in types): return AnyType(TypeOfAny.special_form) - callables = cast("list[CallableType]", types) if len(callables) == 1: return callables[0] @@ -3261,11 +3259,11 @@ def combine_function_signatures(self, types: list[ProperType]) -> AnyType | Call # confusing and ought to be re-written anyways.) callables, variables = merge_typevars_in_callables_by_name(callables) - new_args: list[list[Type]] = [[] for _ in callables[0].arg_types] + new_args: list[list[Type]] = [[] for _ in range(len(callables[0].arg_types))] new_kinds = list(callables[0].arg_kinds) new_returns: list[Type] = [] - too_complex = False + too_complex = False for target in callables: # We fall back to Callable[..., Union[]] if the functions do not have # the exact same signature. The only exception is if one arg is optional and @@ -3278,13 +3276,15 @@ def combine_function_signatures(self, types: list[ProperType]) -> AnyType | Call for i, (new_kind, target_kind) in enumerate(zip(new_kinds, target.arg_kinds)): if new_kind == target_kind: continue - if new_kind.is_positional() and target_kind.is_positional(): + elif new_kind.is_positional() and target_kind.is_positional(): new_kinds[i] = ARG_POS else: too_complex = True break + if too_complex: - break + break # outer loop + for i, arg in enumerate(target.arg_types): new_args[i].append(arg) new_returns.append(target.ret_type) @@ -3301,8 +3301,13 @@ def combine_function_signatures(self, types: list[ProperType]) -> AnyType | Call implicit=True, ) + final_args = [] + for args_list in new_args: + new_type = make_simplified_union(args_list) + final_args.append(new_type) + return callables[0].copy_modified( - arg_types=[make_simplified_union(args) for args in new_args], + arg_types=final_args, arg_kinds=new_kinds, ret_type=union_return, variables=variables, diff --git a/mypy/config_parser.py b/mypy/config_parser.py index 97fa01b8dd215..ffc18fe541de2 100644 --- a/mypy/config_parser.py +++ b/mypy/config_parser.py @@ -189,6 +189,7 @@ def split_commas(value: str) -> list[str]: "quickstart_file": expand_path, "junit_xml": expand_path, "junit_format": check_junit_format, + "output": str, "follow_imports": check_follow_imports, "no_site_packages": bool, "plugins": lambda s: [p.strip() for p in split_commas(s)], diff --git a/mypy/nodes.py b/mypy/nodes.py index 32a694560b24b..3dafffa5570dd 100644 --- a/mypy/nodes.py +++ b/mypy/nodes.py @@ -1068,11 +1068,7 @@ def max_fixed_argc(self) -> int: return self.max_pos def is_dynamic(self) -> bool: - return ( - self.type is None - or isinstance(self.type, mypy.types.CallableType) - and self.type.implicit - ) + return self.type is None FUNCDEF_FLAGS: Final = FUNCITEM_FLAGS + [ diff --git a/mypy/semanal.py b/mypy/semanal.py index fb6f299a37956..a958043fa35c2 100644 --- a/mypy/semanal.py +++ b/mypy/semanal.py @@ -364,6 +364,13 @@ # string literal as a type expression. _MULTIPLE_WORDS_NONTYPE_RE = re.compile(r'\s*[^\s.\'"|\[]+\s+[^\s.\'"|\[]') +# Matches any valid Python identifier, including identifiers with Unicode characters. +# +# [^\d\W] = word character that is not a digit +# \w = word character +# \Z = match end of string; does not allow a trailing \n, unlike $ +_IDENTIFIER_RE = re.compile(r"^[^\d\W]\w*\Z", re.UNICODE) + class SemanticAnalyzer( NodeVisitor[None], SemanticAnalyzerInterface, SemanticAnalyzerPluginInterface, SplittingVisitor @@ -2103,20 +2110,18 @@ def analyze_typeddict_classdef(self, defn: ClassDef) -> bool: and defn.info.typeddict_type and not has_placeholder(defn.info.typeddict_type) ): - # Don't reprocess everything - is_typeddict = True - info = defn.info - else: - is_typeddict, info = self.typed_dict_analyzer.analyze_typeddict_classdef(defn) + # This is a valid TypedDict, and it is fully analyzed. + return True + is_typeddict, info = self.typed_dict_analyzer.analyze_typeddict_classdef(defn) if is_typeddict: + for decorator in defn.decorators: + decorator.accept(self) + if info is not None: + self.analyze_class_decorator_common(defn, info, decorator) if info is None: self.mark_incomplete(defn.name, defn) else: self.prepare_class_def(defn, info, custom_names=True) - for decorator in defn.decorators: - decorator.accept(self) - if defn.info: - self.analyze_class_decorator_common(defn, decorator) return True return False @@ -2148,7 +2153,7 @@ def analyze_namedtuple_classdef( with self.scope.class_scope(defn.info): for deco in defn.decorators: deco.accept(self) - self.analyze_class_decorator_common(defn, deco) + self.analyze_class_decorator_common(defn, defn.info, deco) with self.named_tuple_analyzer.save_namedtuple_body(info): self.analyze_class_body_common(defn) return True @@ -2230,7 +2235,7 @@ def leave_class(self) -> None: def analyze_class_decorator(self, defn: ClassDef, decorator: Expression) -> None: decorator.accept(self) - self.analyze_class_decorator_common(defn, decorator) + self.analyze_class_decorator_common(defn, defn.info, decorator) if isinstance(decorator, RefExpr): if decorator.fullname in RUNTIME_PROTOCOL_DECOS: if defn.info.is_protocol: @@ -2242,12 +2247,13 @@ def analyze_class_decorator(self, defn: ClassDef, decorator: Expression) -> None ): defn.info.dataclass_transform_spec = self.parse_dataclass_transform_spec(decorator) - def analyze_class_decorator_common(self, defn: ClassDef, decorator: Expression) -> None: + def analyze_class_decorator_common( + self, defn: ClassDef, info: TypeInfo, decorator: Expression + ) -> None: """Common method for applying class decorators. Called on regular classes, typeddicts, and namedtuples. """ - info = defn.info if refers_to_fullname(decorator, FINAL_DECORATOR_NAMES): info.is_final = True elif refers_to_fullname(decorator, DISJOINT_BASE_DECORATOR_NAMES): @@ -8036,9 +8042,16 @@ def try_parse_as_type_expression(self, maybe_type_expr: Expression) -> None: return elif isinstance(maybe_type_expr, StrExpr): str_value = maybe_type_expr.value # cache + # Filter out string literals with common patterns that could not + # possibly be in a type expression + if _MULTIPLE_WORDS_NONTYPE_RE.match(str_value): + # A common pattern in string literals containing a sentence. + # But cannot be a type expression. + maybe_type_expr.as_type = None + return # Filter out string literals which look like an identifier but # cannot be a type expression, for a few common reasons - if str_value.isidentifier(): + if _IDENTIFIER_RE.fullmatch(str_value): sym = self.lookup(str_value, UnboundType(str_value), suppress_errors=True) if sym is None: # Does not refer to anything in the local symbol table @@ -8064,21 +8077,13 @@ def try_parse_as_type_expression(self, maybe_type_expr: Expression) -> None: return else: # does not look like an identifier if '"' in str_value or "'" in str_value: - # Only valid inside a Literal[...] or Annotated[..., ...] type + # Only valid inside a Literal[...] type if "[" not in str_value: - # Cannot be a Literal[...] or Annotated[..., ...] type + # Cannot be a Literal[...] type maybe_type_expr.as_type = None return - elif len(str_value) < 2 or str_value.isspace(): - # Whitespace-only strings cannot be valid types. Very short strings can - # only be valid if they are identifiers, but we already checked for those. - maybe_type_expr.as_type = None - return - # Filter out string literals with common patterns that could not - # possibly be in a type expression - if _MULTIPLE_WORDS_NONTYPE_RE.match(str_value): - # A common pattern in string literals containing a sentence. - # But cannot be a type expression. + elif str_value == "": + # Empty string is not a valid type maybe_type_expr.as_type = None return elif isinstance(maybe_type_expr, IndexExpr): diff --git a/mypy/solve.py b/mypy/solve.py index 4a5eec47ca60d..e3709106996cd 100644 --- a/mypy/solve.py +++ b/mypy/solve.py @@ -17,7 +17,6 @@ AnyType, Instance, NoneType, - Overloaded, ParamSpecType, ProperType, TupleType, @@ -254,9 +253,6 @@ def _join_sorted_key(t: Type) -> int: return -2 if isinstance(t, NoneType): return -1 - - if isinstance(t, Overloaded): - return 1 return 0 diff --git a/mypy/subtypes.py b/mypy/subtypes.py index 5733797326e88..b8e8d5e3b79df 100644 --- a/mypy/subtypes.py +++ b/mypy/subtypes.py @@ -312,8 +312,6 @@ def _is_subtype( # ErasedType as we do for non-proper subtyping. return True - # Cases specific w.r.t. right type are easier to handle before entering the SubtypeVisitor. - # Currently, these include Union types and TypeVarType with values. if isinstance(right, UnionType) and not isinstance(left, UnionType): # Normally, when 'left' is not itself a union, the only way # 'left' can be a subtype of the union 'right' is if it is a @@ -362,17 +360,6 @@ def _is_subtype( elif is_subtype_of_item: return True # otherwise, fall through - - if isinstance(right, TypeVarType) and right.values and not isinstance(left, TypeVarType): - if proper_subtype: - if all( - is_proper_subtype(orig_left, v, subtype_context=subtype_context) - for v in right.values - ): - return True - elif all(is_subtype(orig_left, v, subtype_context=subtype_context) for v in right.values): - return True - return left.accept(SubtypeVisitor(orig_right, subtype_context, proper_subtype)) diff --git a/mypy/test/test_config_parser.py b/mypy/test/test_config_parser.py index 597143738f23c..a0aef0a209b7c 100644 --- a/mypy/test/test_config_parser.py +++ b/mypy/test/test_config_parser.py @@ -5,10 +5,12 @@ import tempfile import unittest from collections.abc import Iterator +from io import StringIO from pathlib import Path -from mypy.config_parser import _find_config_file +from mypy.config_parser import _find_config_file, parse_config_file from mypy.defaults import CONFIG_NAMES, SHARED_CONFIG_NAMES +from mypy.options import Options @contextlib.contextmanager @@ -128,3 +130,17 @@ def test_precedence_missing_section(self) -> None: result = _find_config_file() assert result is not None assert Path(result[2]).resolve() == parent_mypy.resolve() + + +class ParseConfigFileSuite(unittest.TestCase): + def test_output_option_with_none_default(self) -> None: + with tempfile.TemporaryDirectory() as _tmpdir: + config = Path(_tmpdir) / "mypy.ini" + write_config(config, content="[mypy]\noutput = json\n") + + options = Options() + stderr = StringIO() + parse_config_file(options, lambda: None, str(config), stderr=stderr) + + assert options.output == "json" + assert stderr.getvalue() == "" diff --git a/mypy/test/testoutput.py b/mypy/test/testoutput.py index e18302cbf92a0..41f6881658c8c 100644 --- a/mypy/test/testoutput.py +++ b/mypy/test/testoutput.py @@ -8,7 +8,6 @@ import os import os.path -import re from mypy import api from mypy.defaults import PYTHON3_VERSION @@ -25,12 +24,7 @@ def run_case(self, testcase: DataDrivenTestCase) -> None: def test_output_json(testcase: DataDrivenTestCase) -> None: """Runs Mypy in a subprocess, and ensures that `--output=json` works as intended.""" - program_text = "\n".join(testcase.input) - flags_match = re.search("# flags: (.*)$", program_text, flags=re.MULTILINE) - if flags_match is not None: - mypy_cmdline = flags_match.group(1).split() - else: - mypy_cmdline = ["--output=json"] + mypy_cmdline = ["--output=json"] mypy_cmdline.append(f"--python-version={'.'.join(map(str, PYTHON3_VERSION))}") # Write the program to a file. diff --git a/mypy/typeshed/stdlib/_codecs.pyi b/mypy/typeshed/stdlib/_codecs.pyi index 89cb78c33571d..89f97edb9ba81 100644 --- a/mypy/typeshed/stdlib/_codecs.pyi +++ b/mypy/typeshed/stdlib/_codecs.pyi @@ -77,9 +77,7 @@ def ascii_decode(data: ReadableBuffer, errors: str | None = None, /) -> tuple[st def ascii_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ... def charmap_decode(data: ReadableBuffer, errors: str | None = None, mapping: _CharMap | None = None, /) -> tuple[str, int]: ... def charmap_encode(str: str, errors: str | None = None, mapping: _CharMap | None = None, /) -> tuple[bytes, int]: ... - -# Docs say this accepts a bytes-like object, but in practice it also accepts str. -def escape_decode(data: str | ReadableBuffer, errors: str | None = None, /) -> tuple[bytes, int]: ... +def escape_decode(data: str | ReadableBuffer, errors: str | None = None, /) -> tuple[str, int]: ... def escape_encode(data: bytes, errors: str | None = None, /) -> tuple[bytes, int]: ... def latin_1_decode(data: ReadableBuffer, errors: str | None = None, /) -> tuple[str, int]: ... def latin_1_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ... diff --git a/mypy/typeshed/stdlib/_io.pyi b/mypy/typeshed/stdlib/_io.pyi index ed8eff2759a98..2d2a60e4dddf1 100644 --- a/mypy/typeshed/stdlib/_io.pyi +++ b/mypy/typeshed/stdlib/_io.pyi @@ -9,7 +9,7 @@ from types import TracebackType from typing import IO, Any, BinaryIO, Final, Generic, Literal, Protocol, TextIO, TypeVar, overload, type_check_only from typing_extensions import Self, disjoint_base -_S = TypeVar("_S", bound=str) +_T = TypeVar("_T") if sys.version_info >= (3, 14): DEFAULT_BUFFER_SIZE: Final = 131072 @@ -298,4 +298,4 @@ if sys.version_info >= (3, 10): @overload def text_encoding(encoding: None, stacklevel: int = 2, /) -> Literal["locale", "utf-8"]: ... @overload - def text_encoding(encoding: _S, stacklevel: int = 2, /) -> _S: ... + def text_encoding(encoding: _T, stacklevel: int = 2, /) -> _T: ... diff --git a/mypy/typeshed/stdlib/_operator.pyi b/mypy/typeshed/stdlib/_operator.pyi index 8c705065bde7d..e1ef5c4bf0678 100644 --- a/mypy/typeshed/stdlib/_operator.pyi +++ b/mypy/typeshed/stdlib/_operator.pyi @@ -97,20 +97,20 @@ def setitem(a: MutableSequence[_T], b: slice[int | None], c: Sequence[_T], /) -> @overload def setitem(a: MutableMapping[_K, _V], b: _K, c: _V, /) -> None: ... def length_hint(obj: object, default: int = 0, /) -> int: ... -def iadd(a, b, /): ... -def iand(a, b, /): ... -def iconcat(a, b, /): ... -def ifloordiv(a, b, /): ... -def ilshift(a, b, /): ... -def imod(a, b, /): ... -def imul(a, b, /): ... -def imatmul(a, b, /): ... -def ior(a, b, /): ... -def ipow(a, b, /): ... -def irshift(a, b, /): ... -def isub(a, b, /): ... -def itruediv(a, b, /): ... -def ixor(a, b, /): ... +def iadd(a: Any, b: Any, /) -> Any: ... +def iand(a: Any, b: Any, /) -> Any: ... +def iconcat(a: Any, b: Any, /) -> Any: ... +def ifloordiv(a: Any, b: Any, /) -> Any: ... +def ilshift(a: Any, b: Any, /) -> Any: ... +def imod(a: Any, b: Any, /) -> Any: ... +def imul(a: Any, b: Any, /) -> Any: ... +def imatmul(a: Any, b: Any, /) -> Any: ... +def ior(a: Any, b: Any, /) -> Any: ... +def ipow(a: Any, b: Any, /) -> Any: ... +def irshift(a: Any, b: Any, /) -> Any: ... +def isub(a: Any, b: Any, /) -> Any: ... +def itruediv(a: Any, b: Any, /) -> Any: ... +def ixor(a: Any, b: Any, /) -> Any: ... if sys.version_info >= (3, 11): def call(obj: Callable[_P, _R], /, *args: _P.args, **kwargs: _P.kwargs) -> _R: ... diff --git a/mypy/typeshed/stdlib/_socket.pyi b/mypy/typeshed/stdlib/_socket.pyi index 918bffc7f9085..372b35f22f175 100644 --- a/mypy/typeshed/stdlib/_socket.pyi +++ b/mypy/typeshed/stdlib/_socket.pyi @@ -741,7 +741,7 @@ class socket: def proto(self) -> int: ... # F811: "Redefinition of unused `timeout`" @property - def timeout(self) -> float | None: ... + def timeout(self) -> float | None: ... # noqa: F811 if sys.platform == "win32": def __init__( self, family: int = ..., type: int = ..., proto: int = ..., fileno: SupportsIndex | bytes | None = None @@ -838,7 +838,7 @@ def inet_ntop(address_family: int, packed_ip: ReadableBuffer, /) -> str: ... def getdefaulttimeout() -> float | None: ... # F811: "Redefinition of unused `timeout`" -def setdefaulttimeout(timeout: float | None, /) -> None: ... +def setdefaulttimeout(timeout: float | None, /) -> None: ... # noqa: F811 if sys.platform != "win32": def sethostname(name: str, /) -> None: ... diff --git a/mypy/typeshed/stdlib/_sqlite3.pyi b/mypy/typeshed/stdlib/_sqlite3.pyi index 5361584d6b184..437a9c9766829 100644 --- a/mypy/typeshed/stdlib/_sqlite3.pyi +++ b/mypy/typeshed/stdlib/_sqlite3.pyi @@ -171,7 +171,7 @@ if sys.version_info >= (3, 11): SQLITE_IOERR_VNODE: Final = 6922 SQLITE_IOERR_WRITE: Final = 778 SQLITE_LIMIT_ATTACHED: Final = 7 - SQLITE_LIMIT_COLUMN: Final = 2 + SQLITE_LIMIT_COLUMN: Final = 22 SQLITE_LIMIT_COMPOUND_SELECT: Final = 4 SQLITE_LIMIT_EXPR_DEPTH: Final = 3 SQLITE_LIMIT_FUNCTION_ARG: Final = 6 diff --git a/mypy/typeshed/stdlib/_ssl.pyi b/mypy/typeshed/stdlib/_ssl.pyi index e84b24e8f4db7..d8cb9d49e7820 100644 --- a/mypy/typeshed/stdlib/_ssl.pyi +++ b/mypy/typeshed/stdlib/_ssl.pyi @@ -183,8 +183,8 @@ CERT_REQUIRED: Final = 2 # verify flags VERIFY_DEFAULT: Final = 0 -VERIFY_CRL_CHECK_LEAF: Final = 0x04 -VERIFY_CRL_CHECK_CHAIN: Final = 0x0C +VERIFY_CRL_CHECK_LEAF: Final = 0x4 +VERIFY_CRL_CHECK_CHAIN: Final = 0x8 VERIFY_X509_STRICT: Final = 0x20 VERIFY_X509_TRUSTED_FIRST: Final = 0x8000 if sys.version_info >= (3, 10): @@ -230,7 +230,7 @@ PROTOCOL_TLSv1_1: Final = 4 PROTOCOL_TLSv1_2: Final = 5 # protocol options -OP_ALL: Final[int] +OP_ALL: Final = 0x80000050 OP_NO_SSLv2: Final = 0x0 OP_NO_SSLv3: Final = 0x2000000 OP_NO_TLSv1: Final = 0x4000000 diff --git a/mypy/typeshed/stdlib/_typeshed/__init__.pyi b/mypy/typeshed/stdlib/_typeshed/__init__.pyi index c006322b81451..89e93ab027069 100644 --- a/mypy/typeshed/stdlib/_typeshed/__init__.pyi +++ b/mypy/typeshed/stdlib/_typeshed/__init__.pyi @@ -126,12 +126,6 @@ class SupportsMul(Protocol[_T_contra, _T_co]): class SupportsRMul(Protocol[_T_contra, _T_co]): def __rmul__(self, x: _T_contra, /) -> _T_co: ... -class SupportsMod(Protocol[_T_contra, _T_co]): - def __mod__(self, other: _T_contra, /) -> _T_co: ... - -class SupportsRMod(Protocol[_T_contra, _T_co]): - def __rmod__(self, other: _T_contra, /) -> _T_co: ... - class SupportsDivMod(Protocol[_T_contra, _T_co]): def __divmod__(self, other: _T_contra, /) -> _T_co: ... diff --git a/mypy/typeshed/stdlib/argparse.pyi b/mypy/typeshed/stdlib/argparse.pyi index 7d4bd1a3a8418..c2a6f36911968 100644 --- a/mypy/typeshed/stdlib/argparse.pyi +++ b/mypy/typeshed/stdlib/argparse.pyi @@ -85,7 +85,7 @@ class _ActionsContainer: const: Any = ..., default: Any = ..., type: _ActionType = ..., - choices: Iterable[Any] | None = ..., # choices must match the type specified + choices: Iterable[_T] | None = ..., required: bool = ..., help: str | None = ..., metavar: str | tuple[str, ...] | None = ..., @@ -170,7 +170,7 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer): usage: str | None = None, description: str | None = None, epilog: str | None = None, - parents: Iterable[ArgumentParser] = [], + parents: Sequence[ArgumentParser] = [], formatter_class: _FormatterClass = ..., prefix_chars: str = "-", fromfile_prefix_chars: str | None = None, @@ -190,7 +190,7 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer): usage: str | None = None, description: str | None = None, epilog: str | None = None, - parents: Iterable[ArgumentParser] = [], + parents: Sequence[ArgumentParser] = [], formatter_class: _FormatterClass = ..., prefix_chars: str = "-", fromfile_prefix_chars: str | None = None, @@ -202,9 +202,9 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer): ) -> None: ... @overload - def parse_args(self, args: Iterable[str] | None = None, namespace: None = None) -> Namespace: ... + def parse_args(self, args: Sequence[str] | None = None, namespace: None = None) -> Namespace: ... @overload - def parse_args(self, args: Iterable[str] | None, namespace: _N) -> _N: ... + def parse_args(self, args: Sequence[str] | None, namespace: _N) -> _N: ... @overload def parse_args(self, *, namespace: _N) -> _N: ... @overload @@ -241,26 +241,26 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer): def format_usage(self) -> str: ... def format_help(self) -> str: ... @overload - def parse_known_args(self, args: Iterable[str] | None = None, namespace: None = None) -> tuple[Namespace, list[str]]: ... + def parse_known_args(self, args: Sequence[str] | None = None, namespace: None = None) -> tuple[Namespace, list[str]]: ... @overload - def parse_known_args(self, args: Iterable[str] | None, namespace: _N) -> tuple[_N, list[str]]: ... + def parse_known_args(self, args: Sequence[str] | None, namespace: _N) -> tuple[_N, list[str]]: ... @overload def parse_known_args(self, *, namespace: _N) -> tuple[_N, list[str]]: ... def convert_arg_line_to_args(self, arg_line: str) -> list[str]: ... def exit(self, status: int = 0, message: str | None = None) -> NoReturn: ... def error(self, message: str) -> NoReturn: ... @overload - def parse_intermixed_args(self, args: Iterable[str] | None = None, namespace: None = None) -> Namespace: ... + def parse_intermixed_args(self, args: Sequence[str] | None = None, namespace: None = None) -> Namespace: ... @overload - def parse_intermixed_args(self, args: Iterable[str] | None, namespace: _N) -> _N: ... + def parse_intermixed_args(self, args: Sequence[str] | None, namespace: _N) -> _N: ... @overload def parse_intermixed_args(self, *, namespace: _N) -> _N: ... @overload def parse_known_intermixed_args( - self, args: Iterable[str] | None = None, namespace: None = None + self, args: Sequence[str] | None = None, namespace: None = None ) -> tuple[Namespace, list[str]]: ... @overload - def parse_known_intermixed_args(self, args: Iterable[str] | None, namespace: _N) -> tuple[_N, list[str]]: ... + def parse_known_intermixed_args(self, args: Sequence[str] | None, namespace: _N) -> tuple[_N, list[str]]: ... @overload def parse_known_intermixed_args(self, *, namespace: _N) -> tuple[_N, list[str]]: ... # undocumented @@ -346,7 +346,7 @@ class HelpFormatter: def _metavar_formatter(self, action: Action, default_metavar: str) -> Callable[[int], tuple[str, ...]]: ... def _format_args(self, action: Action, default_metavar: str) -> str: ... def _expand_help(self, action: Action) -> str: ... - def _iter_indented_subactions(self, action: Action) -> Generator[Action]: ... + def _iter_indented_subactions(self, action: Action) -> Generator[Action, None, None]: ... def _split_lines(self, text: str, width: int) -> list[str]: ... def _fill_text(self, text: str, width: int, indent: str) -> str: ... def _get_help_string(self, action: Action) -> str | None: ... @@ -785,13 +785,13 @@ class _SubParsersAction(Action, Generic[_ArgumentParserT]): *, deprecated: bool = False, help: str | None = ..., - aliases: Iterable[str] = ..., + aliases: Sequence[str] = ..., # Kwargs from ArgumentParser constructor prog: str | None = ..., usage: str | None = ..., description: str | None = ..., epilog: str | None = ..., - parents: Iterable[_ArgumentParserT] = ..., + parents: Sequence[_ArgumentParserT] = ..., formatter_class: _FormatterClass = ..., prefix_chars: str = ..., fromfile_prefix_chars: str | None = ..., @@ -811,13 +811,13 @@ class _SubParsersAction(Action, Generic[_ArgumentParserT]): *, deprecated: bool = False, help: str | None = ..., - aliases: Iterable[str] = ..., + aliases: Sequence[str] = ..., # Kwargs from ArgumentParser constructor prog: str | None = ..., usage: str | None = ..., description: str | None = ..., epilog: str | None = ..., - parents: Iterable[_ArgumentParserT] = ..., + parents: Sequence[_ArgumentParserT] = ..., formatter_class: _FormatterClass = ..., prefix_chars: str = ..., fromfile_prefix_chars: str | None = ..., @@ -834,13 +834,13 @@ class _SubParsersAction(Action, Generic[_ArgumentParserT]): name: str, *, help: str | None = ..., - aliases: Iterable[str] = ..., + aliases: Sequence[str] = ..., # Kwargs from ArgumentParser constructor prog: str | None = ..., usage: str | None = ..., description: str | None = ..., epilog: str | None = ..., - parents: Iterable[_ArgumentParserT] = ..., + parents: Sequence[_ArgumentParserT] = ..., formatter_class: _FormatterClass = ..., prefix_chars: str = ..., fromfile_prefix_chars: str | None = ..., diff --git a/mypy/typeshed/stdlib/codecs.pyi b/mypy/typeshed/stdlib/codecs.pyi index 9164a4a626d4f..4dfe3fd9e8510 100644 --- a/mypy/typeshed/stdlib/codecs.pyi +++ b/mypy/typeshed/stdlib/codecs.pyi @@ -196,8 +196,8 @@ def open( filename: str, mode: str = "r", encoding: str | None = None, errors: str = "strict", buffering: int = -1 ) -> StreamReaderWriter: ... def EncodedFile(file: _Stream, data_encoding: str, file_encoding: str | None = None, errors: str = "strict") -> StreamRecoder: ... -def iterencode(iterator: Iterable[str], encoding: str, errors: str = "strict") -> Generator[bytes]: ... -def iterdecode(iterator: Iterable[bytes], encoding: str, errors: str = "strict") -> Generator[str]: ... +def iterencode(iterator: Iterable[str], encoding: str, errors: str = "strict") -> Generator[bytes, None, None]: ... +def iterdecode(iterator: Iterable[bytes], encoding: str, errors: str = "strict") -> Generator[str, None, None]: ... BOM: Final[Literal[b"\xff\xfe", b"\xfe\xff"]] # depends on `sys.byteorder` BOM_BE: Final = b"\xfe\xff" diff --git a/mypy/typeshed/stdlib/concurrent/futures/process.pyi b/mypy/typeshed/stdlib/concurrent/futures/process.pyi index 282bafa0b611c..071b3aba5d330 100644 --- a/mypy/typeshed/stdlib/concurrent/futures/process.pyi +++ b/mypy/typeshed/stdlib/concurrent/futures/process.pyi @@ -97,7 +97,7 @@ class _SafeQueue(Queue[Future[Any]]): def _on_queue_feeder_error(self, e: Exception, obj: _CallItem) -> None: ... -def _get_chunks(*iterables: Any, chunksize: int) -> Generator[tuple[Any, ...]]: ... +def _get_chunks(*iterables: Any, chunksize: int) -> Generator[tuple[Any, ...], None, None]: ... def _process_chunk(fn: Callable[..., _T], chunk: Iterable[tuple[Any, ...]]) -> list[_T]: ... if sys.version_info >= (3, 11): diff --git a/mypy/typeshed/stdlib/concurrent/interpreters/__init__.pyi b/mypy/typeshed/stdlib/concurrent/interpreters/__init__.pyi index 171fadb2202be..3839e6bef09b6 100644 --- a/mypy/typeshed/stdlib/concurrent/interpreters/__init__.pyi +++ b/mypy/typeshed/stdlib/concurrent/interpreters/__init__.pyi @@ -5,7 +5,7 @@ from collections.abc import Callable from typing import Any, Literal, TypeVar from typing_extensions import ParamSpec, Self -if sys.version_info >= (3, 14): # needed to satisfy pyright checks for Python <= 3.13 +if sys.version_info >= (3, 13): # needed to satisfy pyright checks for Python <3.13 from _interpreters import ( InterpreterError as InterpreterError, InterpreterNotFoundError as InterpreterNotFoundError, diff --git a/mypy/typeshed/stdlib/concurrent/interpreters/_crossinterp.pyi b/mypy/typeshed/stdlib/concurrent/interpreters/_crossinterp.pyi index 50fe7cf0b4ba4..7cf1ea34786ed 100644 --- a/mypy/typeshed/stdlib/concurrent/interpreters/_crossinterp.pyi +++ b/mypy/typeshed/stdlib/concurrent/interpreters/_crossinterp.pyi @@ -3,7 +3,7 @@ from collections.abc import Callable from typing import Final, NewType from typing_extensions import Never, Self, TypeAlias -if sys.version_info >= (3, 14): # needed to satisfy pyright checks for Python <= 3.13 +if sys.version_info >= (3, 13): # needed to satisfy pyright checks for Python <3.13 from _interpqueues import _UnboundOp class ItemInterpreterDestroyed(Exception): ... diff --git a/mypy/typeshed/stdlib/concurrent/interpreters/_queues.pyi b/mypy/typeshed/stdlib/concurrent/interpreters/_queues.pyi index b4a4fd56dd45d..bdf08d93d1e00 100644 --- a/mypy/typeshed/stdlib/concurrent/interpreters/_queues.pyi +++ b/mypy/typeshed/stdlib/concurrent/interpreters/_queues.pyi @@ -3,7 +3,7 @@ import sys from typing import Final, SupportsIndex from typing_extensions import Self -if sys.version_info >= (3, 14): # needed to satisfy pyright checks for Python <= 3.13 +if sys.version_info >= (3, 13): # needed to satisfy pyright checks for Python <3.13 from _interpqueues import QueueError as QueueError, QueueNotFoundError as QueueNotFoundError from . import _crossinterp diff --git a/mypy/typeshed/stdlib/configparser.pyi b/mypy/typeshed/stdlib/configparser.pyi index 9b3f02324b7fd..1e11088c3ae7a 100644 --- a/mypy/typeshed/stdlib/configparser.pyi +++ b/mypy/typeshed/stdlib/configparser.pyi @@ -289,19 +289,19 @@ class RawConfigParser(_Parser): def getint(self, section: _SectionName, option: str, *, raw: bool = False, vars: _Section | None = None) -> int: ... @overload def getint( - self, section: _SectionName, option: str, *, raw: bool = False, vars: _Section | None = None, fallback: _T + self, section: _SectionName, option: str, *, raw: bool = False, vars: _Section | None = None, fallback: _T = ... ) -> int | _T: ... @overload def getfloat(self, section: _SectionName, option: str, *, raw: bool = False, vars: _Section | None = None) -> float: ... @overload def getfloat( - self, section: _SectionName, option: str, *, raw: bool = False, vars: _Section | None = None, fallback: _T + self, section: _SectionName, option: str, *, raw: bool = False, vars: _Section | None = None, fallback: _T = ... ) -> float | _T: ... @overload def getboolean(self, section: _SectionName, option: str, *, raw: bool = False, vars: _Section | None = None) -> bool: ... @overload def getboolean( - self, section: _SectionName, option: str, *, raw: bool = False, vars: _Section | None = None, fallback: _T + self, section: _SectionName, option: str, *, raw: bool = False, vars: _Section | None = None, fallback: _T = ... ) -> bool | _T: ... def _get_conv( self, diff --git a/mypy/typeshed/stdlib/contextlib.pyi b/mypy/typeshed/stdlib/contextlib.pyi index 0670787a5db1b..cf831e5dcae81 100644 --- a/mypy/typeshed/stdlib/contextlib.pyi +++ b/mypy/typeshed/stdlib/contextlib.pyi @@ -165,15 +165,9 @@ class _BaseExitStack(Generic[_ExitT_co]): def callback(self, callback: Callable[_P, _T], /, *args: _P.args, **kwds: _P.kwargs) -> Callable[_P, _T]: ... def pop_all(self) -> Self: ... -# this class is to avoid putting `metaclass=abc.ABCMeta` on the implementations directly, as this would make them -# appear explicitly abstract to some tools. this is due to the implementations not subclassing `AbstractContextManager` -# see note on the subclasses -@type_check_only -class _BaseExitStackAbstract(_BaseExitStack[_ExitT_co], metaclass=abc.ABCMeta): ... - -# In reality this is a subclass of `AbstractContextManager`, but we can't provide `Self` as the argument for `__enter__` -# https://discuss.python.org/t/self-as-typevar-default/90939 -class ExitStack(_BaseExitStackAbstract[_ExitT_co]): +# In reality this is a subclass of `AbstractContextManager`; +# see #7961 for why we don't do that in the stub +class ExitStack(_BaseExitStack[_ExitT_co], metaclass=abc.ABCMeta): def close(self) -> None: ... def __enter__(self) -> Self: ... def __exit__( @@ -185,9 +179,9 @@ _ExitCoroFunc: TypeAlias = Callable[ ] _ACM_EF = TypeVar("_ACM_EF", bound=AbstractAsyncContextManager[Any, Any] | _ExitCoroFunc) -# In reality this is a subclass of `AbstractContextManager`, but we can't provide `Self` as the argument for `__enter__` -# https://discuss.python.org/t/self-as-typevar-default/90939 -class AsyncExitStack(_BaseExitStackAbstract[_ExitT_co]): +# In reality this is a subclass of `AbstractAsyncContextManager`; +# see #7961 for why we don't do that in the stub +class AsyncExitStack(_BaseExitStack[_ExitT_co], metaclass=abc.ABCMeta): async def enter_async_context(self, cm: AbstractAsyncContextManager[_T, _ExitT_co]) -> _T: ... def push_async_exit(self, exit: _ACM_EF) -> _ACM_EF: ... def push_async_callback( diff --git a/mypy/typeshed/stdlib/email/message.pyi b/mypy/typeshed/stdlib/email/message.pyi index 08ba88b4ee6da..794882b140e61 100644 --- a/mypy/typeshed/stdlib/email/message.pyi +++ b/mypy/typeshed/stdlib/email/message.pyi @@ -130,7 +130,7 @@ class Message(Generic[_HeaderT_co, _HeaderParamT_contra]): def get_charsets(self, failobj: None = None) -> list[str | None]: ... @overload def get_charsets(self, failobj: _T) -> list[str | _T]: ... - def walk(self) -> Generator[Self]: ... + def walk(self) -> Generator[Self, None, None]: ... def get_content_disposition(self) -> str | None: ... def as_string(self, unixfrom: bool = False, maxheaderlen: int = 0, policy: Policy[Any] | None = None) -> str: ... def as_bytes(self, unixfrom: bool = False, policy: Policy[Any] | None = None) -> bytes: ... @@ -151,15 +151,13 @@ class Message(Generic[_HeaderT_co, _HeaderParamT_contra]): class MIMEPart(Message[_HeaderRegistryT_co, _HeaderRegistryParamT_contra]): def __init__(self, policy: Policy[Any] | None = None) -> None: ... - def get_body( - self, preferencelist: Sequence[str] = ("related", "html", "plain") - ) -> MIMEPart[_HeaderRegistryT_co, _HeaderRegistryParamT_contra] | None: ... + def get_body(self, preferencelist: Sequence[str] = ("related", "html", "plain")) -> MIMEPart[_HeaderRegistryT_co] | None: ... def attach(self, payload: Self) -> None: ... # type: ignore[override] # The attachments are created via type(self) in the attach method. It's theoretically # possible to sneak other attachment types into a MIMEPart instance, but could cause # cause unforseen consequences. def iter_attachments(self) -> Iterator[Self]: ... - def iter_parts(self) -> Iterator[MIMEPart[_HeaderRegistryT_co, _HeaderRegistryParamT_contra]]: ... + def iter_parts(self) -> Iterator[MIMEPart[_HeaderRegistryT_co]]: ... def get_content(self, *args: Any, content_manager: ContentManager | None = None, **kw: Any) -> Any: ... def set_content(self, *args: Any, content_manager: ContentManager | None = None, **kw: Any) -> None: ... def make_related(self, boundary: str | None = None) -> None: ... @@ -173,4 +171,4 @@ class MIMEPart(Message[_HeaderRegistryT_co, _HeaderRegistryParamT_contra]): def as_string(self, unixfrom: bool = False, maxheaderlen: int | None = None, policy: Policy[Any] | None = None) -> str: ... def is_attachment(self) -> bool: ... -class EmailMessage(MIMEPart[_HeaderRegistryT_co, _HeaderRegistryParamT_contra]): ... +class EmailMessage(MIMEPart): ... diff --git a/mypy/typeshed/stdlib/json/__init__.pyi b/mypy/typeshed/stdlib/json/__init__.pyi index 454a235ecf703..63e9718ee1512 100644 --- a/mypy/typeshed/stdlib/json/__init__.pyi +++ b/mypy/typeshed/stdlib/json/__init__.pyi @@ -1,6 +1,6 @@ from _typeshed import SupportsRead, SupportsWrite from collections.abc import Callable -from typing import Any, Literal +from typing import Any from .decoder import JSONDecodeError as JSONDecodeError, JSONDecoder as JSONDecoder from .encoder import JSONEncoder as JSONEncoder @@ -58,6 +58,4 @@ def load( object_pairs_hook: Callable[[list[tuple[Any, Any]]], Any] | None = None, **kwds: Any, ) -> Any: ... -def detect_encoding( - b: bytes | bytearray, -) -> Literal["utf-8", "utf-8-sig", "utf-16", "utf-16-be", "utf-16-le", "utf-32", "utf-32-be", "utf-32-le"]: ... # undocumented +def detect_encoding(b: bytes | bytearray) -> str: ... # undocumented diff --git a/mypy/typeshed/stdlib/lib2to3/fixes/fix_except.pyi b/mypy/typeshed/stdlib/lib2to3/fixes/fix_except.pyi index 0d856e6b0b7d2..30930a2c381e9 100644 --- a/mypy/typeshed/stdlib/lib2to3/fixes/fix_except.pyi +++ b/mypy/typeshed/stdlib/lib2to3/fixes/fix_except.pyi @@ -6,7 +6,7 @@ from ..pytree import Base _N = TypeVar("_N", bound=Base) -def find_excepts(nodes: Iterable[_N]) -> Generator[tuple[_N, _N]]: ... +def find_excepts(nodes: Iterable[_N]) -> Generator[tuple[_N, _N], None, None]: ... class FixExcept(fixer_base.BaseFix): BM_compatible: ClassVar[Literal[True]] diff --git a/mypy/typeshed/stdlib/lib2to3/fixes/fix_import.pyi b/mypy/typeshed/stdlib/lib2to3/fixes/fix_import.pyi index 2daa18327ec09..bf4b2d00925eb 100644 --- a/mypy/typeshed/stdlib/lib2to3/fixes/fix_import.pyi +++ b/mypy/typeshed/stdlib/lib2to3/fixes/fix_import.pyi @@ -5,7 +5,7 @@ from typing import ClassVar, Literal from .. import fixer_base from ..pytree import Node -def traverse_imports(names) -> Generator[str]: ... +def traverse_imports(names) -> Generator[str, None, None]: ... class FixImport(fixer_base.BaseFix): BM_compatible: ClassVar[Literal[True]] diff --git a/mypy/typeshed/stdlib/lib2to3/fixes/fix_imports.pyi b/mypy/typeshed/stdlib/lib2to3/fixes/fix_imports.pyi index d86ebbe215a14..c747af529f440 100644 --- a/mypy/typeshed/stdlib/lib2to3/fixes/fix_imports.pyi +++ b/mypy/typeshed/stdlib/lib2to3/fixes/fix_imports.pyi @@ -8,7 +8,7 @@ from ..pytree import Node MAPPING: Final[dict[str, str]] def alternates(members): ... -def build_pattern(mapping=...) -> Generator[str]: ... +def build_pattern(mapping=...) -> Generator[str, None, None]: ... class FixImports(fixer_base.BaseFix): BM_compatible: ClassVar[Literal[True]] diff --git a/mypy/typeshed/stdlib/lib2to3/fixes/fix_metaclass.pyi b/mypy/typeshed/stdlib/lib2to3/fixes/fix_metaclass.pyi index 6ad25e9aac368..1b1ec82032b4f 100644 --- a/mypy/typeshed/stdlib/lib2to3/fixes/fix_metaclass.pyi +++ b/mypy/typeshed/stdlib/lib2to3/fixes/fix_metaclass.pyi @@ -8,7 +8,7 @@ def has_metaclass(parent): ... def fixup_parse_tree(cls_node) -> None: ... def fixup_simple_stmt(parent, i, stmt_node) -> None: ... def remove_trailing_newline(node) -> None: ... -def find_metas(cls_node) -> Generator[tuple[Base, int, Base]]: ... +def find_metas(cls_node) -> Generator[tuple[Base, int, Base], None, None]: ... def fixup_indent(suite) -> None: ... class FixMetaclass(fixer_base.BaseFix): diff --git a/mypy/typeshed/stdlib/lib2to3/fixes/fix_renames.pyi b/mypy/typeshed/stdlib/lib2to3/fixes/fix_renames.pyi index f095b3083ba8b..652d8f15ea1a9 100644 --- a/mypy/typeshed/stdlib/lib2to3/fixes/fix_renames.pyi +++ b/mypy/typeshed/stdlib/lib2to3/fixes/fix_renames.pyi @@ -7,7 +7,7 @@ MAPPING: Final[dict[str, dict[str, str]]] LOOKUP: Final[dict[tuple[str, str], str]] def alternates(members): ... -def build_pattern() -> Generator[str]: ... +def build_pattern() -> Generator[str, None, None]: ... class FixRenames(fixer_base.BaseFix): BM_compatible: ClassVar[Literal[True]] diff --git a/mypy/typeshed/stdlib/lib2to3/fixes/fix_urllib.pyi b/mypy/typeshed/stdlib/lib2to3/fixes/fix_urllib.pyi index ab84114f90ea3..abdcc0f62970f 100644 --- a/mypy/typeshed/stdlib/lib2to3/fixes/fix_urllib.pyi +++ b/mypy/typeshed/stdlib/lib2to3/fixes/fix_urllib.pyi @@ -5,7 +5,7 @@ from .fix_imports import FixImports MAPPING: Final[dict[str, list[tuple[Literal["urllib.request", "urllib.parse", "urllib.error"], list[str]]]]] -def build_pattern() -> Generator[str]: ... +def build_pattern() -> Generator[str, None, None]: ... class FixUrllib(FixImports): def build_pattern(self): ... diff --git a/mypy/typeshed/stdlib/lib2to3/refactor.pyi b/mypy/typeshed/stdlib/lib2to3/refactor.pyi index c33347ede38fd..a7f3825406488 100644 --- a/mypy/typeshed/stdlib/lib2to3/refactor.pyi +++ b/mypy/typeshed/stdlib/lib2to3/refactor.pyi @@ -69,8 +69,8 @@ class RefactoringTool: def parse_block(self, block: Iterable[str], lineno: int, indent: int) -> Node: ... def wrap_toks( self, block: Iterable[str], lineno: int, indent: int - ) -> Generator[tuple[int, str, tuple[int, int], tuple[int, int], str]]: ... - def gen_lines(self, block: Iterable[str], indent: int) -> Generator[str]: ... + ) -> Generator[tuple[int, str, tuple[int, int], tuple[int, int], str], None, None]: ... + def gen_lines(self, block: Iterable[str], indent: int) -> Generator[str, None, None]: ... class MultiprocessingUnsupported(Exception): ... diff --git a/mypy/typeshed/stdlib/os/__init__.pyi b/mypy/typeshed/stdlib/os/__init__.pyi index 66a9d1dd3bc6d..9e02fbf9e1066 100644 --- a/mypy/typeshed/stdlib/os/__init__.pyi +++ b/mypy/typeshed/stdlib/os/__init__.pyi @@ -614,12 +614,9 @@ if sys.platform == "darwin" and sys.version_info >= (3, 12): SEEK_SET: Final = 0 SEEK_CUR: Final = 1 SEEK_END: Final = 2 -if sys.platform == "linux": +if sys.platform != "win32": SEEK_DATA: Final = 3 SEEK_HOLE: Final = 4 -elif sys.platform == "darwin": - SEEK_HOLE: Final = 3 - SEEK_DATA: Final = 4 O_RDONLY: Final[int] O_WRONLY: Final[int] @@ -829,9 +826,11 @@ class stat_result(structseq[float], tuple[int, int, int, int, int, int, int, flo # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows) if sys.version_info >= (3, 12) and sys.platform == "win32": @property - @deprecated("""\ + @deprecated( + """\ Use st_birthtime instead to retrieve the file creation time. \ -In the future, this property will contain the last metadata change time.""") +In the future, this property will contain the last metadata change time.""" + ) def st_ctime(self) -> float: ... else: @property @@ -1532,36 +1531,7 @@ else: def WSTOPSIG(status: int) -> int: ... def WTERMSIG(status: int) -> int: ... - if sys.version_info >= (3, 15): - def posix_spawn( - path: StrOrBytesPath, - argv: _ExecVArgs, - env: _ExecEnv | None, - /, - *, - file_actions: Sequence[tuple[Any, ...]] | None = (), - setpgroup: int | None = None, # None allowed starting in 3.15 - resetids: bool = False, - setsid: bool = False, - setsigmask: Iterable[int] = (), - setsigdef: Iterable[int] = (), - scheduler: tuple[Any, sched_param] | None = None, # None allowed starting in 3.15 - ) -> int: ... - def posix_spawnp( - path: StrOrBytesPath, - argv: _ExecVArgs, - env: _ExecEnv | None, - /, - *, - file_actions: Sequence[tuple[Any, ...]] | None = (), - setpgroup: int | None = None, # None allowed starting in 3.15 - resetids: bool = False, - setsid: bool = False, - setsigmask: Iterable[int] = (), - setsigdef: Iterable[int] = (), - scheduler: tuple[Any, sched_param] | None = None, # None allowed starting in 3.15 - ) -> int: ... - elif sys.version_info >= (3, 13): + if sys.version_info >= (3, 13): def posix_spawn( path: StrOrBytesPath, argv: _ExecVArgs, diff --git a/mypy/typeshed/stdlib/pathlib/__init__.pyi b/mypy/typeshed/stdlib/pathlib/__init__.pyi index 4f094130665c8..26dde2accd8dd 100644 --- a/mypy/typeshed/stdlib/pathlib/__init__.pyi +++ b/mypy/typeshed/stdlib/pathlib/__init__.pyi @@ -189,11 +189,11 @@ class Path(PurePath): self, pattern: str, *, case_sensitive: bool | None = None, recurse_symlinks: bool = False ) -> Iterator[Self]: ... elif sys.version_info >= (3, 12): - def glob(self, pattern: str, *, case_sensitive: bool | None = None) -> Generator[Self]: ... - def rglob(self, pattern: str, *, case_sensitive: bool | None = None) -> Generator[Self]: ... + def glob(self, pattern: str, *, case_sensitive: bool | None = None) -> Generator[Self, None, None]: ... + def rglob(self, pattern: str, *, case_sensitive: bool | None = None) -> Generator[Self, None, None]: ... else: - def glob(self, pattern: str) -> Generator[Self]: ... - def rglob(self, pattern: str) -> Generator[Self]: ... + def glob(self, pattern: str) -> Generator[Self, None, None]: ... + def rglob(self, pattern: str) -> Generator[Self, None, None]: ... if sys.version_info >= (3, 12): def exists(self, *, follow_symlinks: bool = True) -> bool: ... @@ -208,7 +208,7 @@ class Path(PurePath): if sys.version_info >= (3, 12): def is_junction(self) -> bool: ... - def iterdir(self) -> Generator[Self]: ... + def iterdir(self) -> Generator[Self, None, None]: ... def lchmod(self, mode: int) -> None: ... def lstat(self) -> stat_result: ... def mkdir(self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False) -> None: ... diff --git a/mypy/typeshed/stdlib/sqlite3/__init__.pyi b/mypy/typeshed/stdlib/sqlite3/__init__.pyi index ec37eed8c9277..f12f80a35a975 100644 --- a/mypy/typeshed/stdlib/sqlite3/__init__.pyi +++ b/mypy/typeshed/stdlib/sqlite3/__init__.pyi @@ -365,9 +365,9 @@ class Connection: def executescript(self, sql_script: str, /) -> Cursor: ... def interrupt(self) -> None: ... if sys.version_info >= (3, 13): - def iterdump(self, *, filter: str | None = None) -> Generator[str]: ... + def iterdump(self, *, filter: str | None = None) -> Generator[str, None, None]: ... else: - def iterdump(self) -> Generator[str]: ... + def iterdump(self) -> Generator[str, None, None]: ... def rollback(self) -> None: ... def set_authorizer( diff --git a/mypy/typeshed/stdlib/ssl.pyi b/mypy/typeshed/stdlib/ssl.pyi index 57952cf19bbed..2053005f1ba69 100644 --- a/mypy/typeshed/stdlib/ssl.pyi +++ b/mypy/typeshed/stdlib/ssl.pyi @@ -103,14 +103,14 @@ CERT_OPTIONAL: Final = VerifyMode.CERT_OPTIONAL CERT_REQUIRED: Final = VerifyMode.CERT_REQUIRED class VerifyFlags(enum.IntFlag): - VERIFY_DEFAULT = 0x00 - VERIFY_CRL_CHECK_LEAF = 0x04 - VERIFY_CRL_CHECK_CHAIN = 0x0C - VERIFY_X509_STRICT = 0x20 - VERIFY_X509_TRUSTED_FIRST = 0x8000 + VERIFY_DEFAULT = 0 + VERIFY_CRL_CHECK_LEAF = 4 + VERIFY_CRL_CHECK_CHAIN = 12 + VERIFY_X509_STRICT = 32 + VERIFY_X509_TRUSTED_FIRST = 32768 if sys.version_info >= (3, 10): - VERIFY_ALLOW_PROXY_CERTS = 0x40 - VERIFY_X509_PARTIAL_CHAIN = 0x80000 + VERIFY_ALLOW_PROXY_CERTS = 64 + VERIFY_X509_PARTIAL_CHAIN = 524288 VERIFY_DEFAULT: Final = VerifyFlags.VERIFY_DEFAULT VERIFY_CRL_CHECK_LEAF: Final = VerifyFlags.VERIFY_CRL_CHECK_LEAF @@ -144,7 +144,7 @@ PROTOCOL_TLS_CLIENT: Final = _SSLMethod.PROTOCOL_TLS_CLIENT PROTOCOL_TLS_SERVER: Final = _SSLMethod.PROTOCOL_TLS_SERVER class Options(enum.IntFlag): - OP_ALL: int + OP_ALL = 2147483728 OP_NO_SSLv2 = 0 OP_NO_SSLv3 = 33554432 OP_NO_TLSv1 = 67108864 diff --git a/mypy/typeshed/stdlib/tokenize.pyi b/mypy/typeshed/stdlib/tokenize.pyi index 0df8062d56891..00a24b4eea07d 100644 --- a/mypy/typeshed/stdlib/tokenize.pyi +++ b/mypy/typeshed/stdlib/tokenize.pyi @@ -151,8 +151,8 @@ class Untokenizer: # Returns str, unless the ENCODING token is present, in which case it returns bytes. def untokenize(iterable: Iterable[_Token]) -> str | Any: ... def detect_encoding(readline: Callable[[], bytes | bytearray]) -> tuple[str, Sequence[bytes]]: ... -def tokenize(readline: Callable[[], bytes | bytearray]) -> Generator[TokenInfo]: ... -def generate_tokens(readline: Callable[[], str]) -> Generator[TokenInfo]: ... +def tokenize(readline: Callable[[], bytes | bytearray]) -> Generator[TokenInfo, None, None]: ... +def generate_tokens(readline: Callable[[], str]) -> Generator[TokenInfo, None, None]: ... def open(filename: FileDescriptorOrPath) -> TextIO: ... def group(*choices: str) -> str: ... # undocumented def any(*choices: str) -> str: ... # undocumented diff --git a/mypy/typeshed/stdlib/traceback.pyi b/mypy/typeshed/stdlib/traceback.pyi index f9d88f25afd97..4305706afa20b 100644 --- a/mypy/typeshed/stdlib/traceback.pyi +++ b/mypy/typeshed/stdlib/traceback.pyi @@ -111,7 +111,7 @@ def walk_tb(tb: TracebackType | None) -> Iterator[tuple[FrameType, int]]: ... if sys.version_info >= (3, 11): class _ExceptionPrintContext: def indent(self) -> str: ... - def emit(self, text_gen: str | Iterable[str], margin_char: str | None = None) -> Generator[str]: ... + def emit(self, text_gen: str | Iterable[str], margin_char: str | None = None) -> Generator[str, None, None]: ... class TracebackException: __cause__: TracebackException | None @@ -232,14 +232,14 @@ class TracebackException: def __eq__(self, other: object) -> bool: ... __hash__: ClassVar[None] # type: ignore[assignment] if sys.version_info >= (3, 11): - def format(self, *, chain: bool = True, _ctx: _ExceptionPrintContext | None = None) -> Generator[str]: ... + def format(self, *, chain: bool = True, _ctx: _ExceptionPrintContext | None = None) -> Generator[str, None, None]: ... else: - def format(self, *, chain: bool = True) -> Generator[str]: ... + def format(self, *, chain: bool = True) -> Generator[str, None, None]: ... if sys.version_info >= (3, 13): - def format_exception_only(self, *, show_group: bool = False, _depth: int = 0) -> Generator[str]: ... + def format_exception_only(self, *, show_group: bool = False, _depth: int = 0) -> Generator[str, None, None]: ... else: - def format_exception_only(self) -> Generator[str]: ... + def format_exception_only(self) -> Generator[str, None, None]: ... if sys.version_info >= (3, 11): def print(self, *, file: SupportsWrite[str] | None = None, chain: bool = True) -> None: ... diff --git a/mypy/typeshed/stdlib/typing_extensions.pyi b/mypy/typeshed/stdlib/typing_extensions.pyi index 406005cc4c561..16cf0611a58ad 100644 --- a/mypy/typeshed/stdlib/typing_extensions.pyi +++ b/mypy/typeshed/stdlib/typing_extensions.pyi @@ -220,7 +220,7 @@ def runtime_checkable(cls: _TC) -> _TC: ... runtime = runtime_checkable Final: _SpecialForm -def final(f: _T) -> _T: ... +def final(f: _F) -> _F: ... def disjoint_base(cls: _TC) -> _TC: ... Literal: _SpecialForm diff --git a/mypy/typeshed/stdlib/xml/etree/ElementPath.pyi b/mypy/typeshed/stdlib/xml/etree/ElementPath.pyi index 5c03dd014b639..80f3c55c14899 100644 --- a/mypy/typeshed/stdlib/xml/etree/ElementPath.pyi +++ b/mypy/typeshed/stdlib/xml/etree/ElementPath.pyi @@ -8,10 +8,10 @@ xpath_tokenizer_re: Final[Pattern[str]] _Token: TypeAlias = tuple[str, str] _Next: TypeAlias = Callable[[], _Token] -_Callback: TypeAlias = Callable[[_SelectorContext, Iterable[Element]], Generator[Element]] +_Callback: TypeAlias = Callable[[_SelectorContext, Iterable[Element]], Generator[Element, None, None]] _T = TypeVar("_T") -def xpath_tokenizer(pattern: str, namespaces: dict[str, str] | None = None) -> Generator[_Token]: ... +def xpath_tokenizer(pattern: str, namespaces: dict[str, str] | None = None) -> Generator[_Token, None, None]: ... def get_parent_map(context: _SelectorContext) -> dict[Element, Element]: ... def prepare_child(next: _Next, token: _Token) -> _Callback: ... def prepare_star(next: _Next, token: _Token) -> _Callback: ... @@ -32,7 +32,7 @@ def iterfind( # type: ignore[overload-overlap] elem: Element[Any], path: Literal[""], namespaces: dict[str, str] | None = None ) -> None: ... @overload -def iterfind(elem: Element[Any], path: str, namespaces: dict[str, str] | None = None) -> Generator[Element]: ... +def iterfind(elem: Element[Any], path: str, namespaces: dict[str, str] | None = None) -> Generator[Element, None, None]: ... def find(elem: Element[Any], path: str, namespaces: dict[str, str] | None = None) -> Element | None: ... def findall(elem: Element[Any], path: str, namespaces: dict[str, str] | None = None) -> list[Element]: ... @overload diff --git a/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi b/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi index 6340a44bd51c8..d728fb975bfb9 100644 --- a/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi +++ b/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi @@ -106,12 +106,12 @@ class Element(Generic[_Tag]): def get(self, key: str, default: _T) -> str | _T: ... def insert(self, index: int, subelement: Element[Any], /) -> None: ... def items(self) -> ItemsView[str, str]: ... - def iter(self, tag: str | None = None) -> Generator[Element]: ... + def iter(self, tag: str | None = None) -> Generator[Element, None, None]: ... @overload def iterfind(self, path: Literal[""], namespaces: dict[str, str] | None = None) -> None: ... # type: ignore[overload-overlap] @overload - def iterfind(self, path: str, namespaces: dict[str, str] | None = None) -> Generator[Element]: ... - def itertext(self) -> Generator[str]: ... + def iterfind(self, path: str, namespaces: dict[str, str] | None = None) -> Generator[Element, None, None]: ... + def itertext(self) -> Generator[str, None, None]: ... def keys(self) -> dict_keys[str, str]: ... # makeelement returns the type of self in Python impl, but not in C impl def makeelement(self, tag: _OtherTag, attrib: dict[str, str], /) -> Element[_OtherTag]: ... @@ -159,7 +159,7 @@ class ElementTree(Generic[_Root]): def getroot(self) -> _Root: ... def _setroot(self, element: Element[Any]) -> None: ... def parse(self, source: _FileRead, parser: XMLParser | None = None) -> Element: ... - def iter(self, tag: str | None = None) -> Generator[Element]: ... + def iter(self, tag: str | None = None) -> Generator[Element, None, None]: ... def find(self, path: str, namespaces: dict[str, str] | None = None) -> Element | None: ... @overload def findtext(self, path: str, default: None = None, namespaces: dict[str, str] | None = None) -> str | None: ... @@ -169,7 +169,7 @@ class ElementTree(Generic[_Root]): @overload def iterfind(self, path: Literal[""], namespaces: dict[str, str] | None = None) -> None: ... # type: ignore[overload-overlap] @overload - def iterfind(self, path: str, namespaces: dict[str, str] | None = None) -> Generator[Element]: ... + def iterfind(self, path: str, namespaces: dict[str, str] | None = None) -> Generator[Element, None, None]: ... def write( self, file_or_filename: _FileWrite, diff --git a/mypy/typeshed/stubs/librt/librt/random.pyi b/mypy/typeshed/stubs/librt/librt/random.pyi deleted file mode 100644 index d1330aa56faf1..0000000000000 --- a/mypy/typeshed/stubs/librt/librt/random.pyi +++ /dev/null @@ -1,22 +0,0 @@ -from typing import final, overload - -from mypy_extensions import i64 - -def random() -> float: ... -def randint(a: i64, b: i64) -> i64: ... -@overload -def randrange(stop: i64, /) -> i64: ... -@overload -def randrange(start: i64, stop: i64, /) -> i64: ... -def seed(n: i64, /) -> None: ... - -@final -class Random: - def __init__(self, seed: i64 | None = None) -> None: ... - def randint(self, a: i64, b: i64) -> i64: ... - @overload - def randrange(self, stop: i64, /) -> i64: ... - @overload - def randrange(self, start: i64, stop: i64, /) -> i64: ... - def random(self) -> float: ... - def seed(self, n: i64, /) -> None: ... diff --git a/mypy/version.py b/mypy/version.py index a33ca938708f4..82a0d52db14f7 100644 --- a/mypy/version.py +++ b/mypy/version.py @@ -8,7 +8,7 @@ # - Release versions have the form "1.2.3". # - Dev versions have the form "1.2.3+dev" (PLUS sign to conform to PEP 440). # - Before 1.0 we had the form "0.NNN". -__version__ = "2.2.0+dev" +__version__ = "2.1.0+dev" base_version = __version__ mypy_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) diff --git a/mypyc/build.py b/mypyc/build.py index 84633086d2724..439734e39b9ec 100644 --- a/mypyc/build.py +++ b/mypyc/build.py @@ -121,7 +121,6 @@ class ModDesc(NamedTuple): ["vecs"], ), ModDesc("librt.time", ["time/librt_time.c"], ["time/librt_time.h"], []), - ModDesc("librt.random", ["random/librt_random.c"], ["random/librt_random.h"], ["random"]), ] try: @@ -450,70 +449,6 @@ def write_file(path: str, contents: str) -> None: os.utime(path, times=(new_mtime, new_mtime)) -_MYPYC_EXTENSION_MARKER = "_mypyc_skip_redundant_inplace_copy" -_setuptools_patch_applied = False - - -def _patch_setuptools_copy_extensions_to_source() -> None: - """Skip redundant `.so` copies for extensions we generated. - - setuptools' copy_extensions_to_source rewrites every `.so` in the - source tree on every build_ext, even when nothing changed. On macOS - this invalidates AMFI's signature cache (~100 ms re-verification per - `.so` on the next import), eating most of the separate=True - incremental speedup. - - The patch is global because copy_extensions_to_source runs during - setup()'s build_ext command, after mypycify() has already returned; - we can't scope a context manager around it. Instead the skip only - fires for extensions tagged by mypycify (via the marker attribute), - so other setuptools users in the same setup.py see the unmodified - upstream behavior, including stub writes. - """ - global _setuptools_patch_applied - if _setuptools_patch_applied: - return - _setuptools_patch_applied = True - - from setuptools.command.build_ext import build_ext as _build_ext - - original = _build_ext.copy_extensions_to_source - - def _files_match(a: str, b: str) -> bool: - try: - sa = os.stat(a) - sb = os.stat(b) - except OSError: - return False - # Compare size + whole-second mtime. distutils' copy_file - # propagates the source mtime, but macOS drops sub-second - # precision on write so the float values never match verbatim. - return sa.st_size == sb.st_size and int(sa.st_mtime) == int(sb.st_mtime) - - def patched(self: Any) -> None: - build_py = self.get_finalized_command("build_py") - - def is_redundant(ext: Any) -> bool: - if not getattr(ext, _MYPYC_EXTENSION_MARKER, False): - return False - inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) - return _files_match(regular_file, inplace_file) - - # Hide our already-fresh extensions from setuptools' loop and - # let it handle whatever's left. Delegating instead of - # reimplementing the body means future setuptools changes carry - # over for free. self.extensions is restored before we return - # so anything that inspects it later sees the original list. - saved = self.extensions - self.extensions = [ext for ext in saved if not is_redundant(ext)] - try: - original(self) - finally: - self.extensions = saved - - _build_ext.copy_extensions_to_source = patched # type: ignore[method-assign] - - def construct_groups( sources: list[BuildSource], separate: bool | list[tuple[list[str], str | None]], @@ -550,11 +485,8 @@ def construct_groups( else: groups = [(sources, None)] - # Generate missing names. - # Sort the modules to make the compilation results consistent regardless of - # the source file order passed to mypycify. + # Generate missing names for i, (group, name) in enumerate(groups): - group = sorted(group, key=lambda source: source.module) if use_shared_lib and not name: if group_name_override is not None: name = group_name_override @@ -562,7 +494,6 @@ def construct_groups( name = group_name([source.module for source in group]) groups[i] = (group, name) - groups = sorted(groups, key=lambda g: (g[1] or "", [s.module for s in g[0]])) return groups @@ -577,7 +508,7 @@ def get_header_deps(cfiles: list[tuple[str, str]]) -> list[str]: """ headers: set[str] = set() for _, contents in cfiles: - headers.update(re.findall(r'#include [<"]([^>"]+)[>"]', contents)) + headers.update(re.findall(r'#include "(.*)"', contents)) return sorted(headers) @@ -637,21 +568,12 @@ def mypyc_build( cfilenames = [] for cfile, ctext in cfiles: cfile = os.path.join(compiler_options.target_dir, cfile) - # Empty contents marks a file the previous run already wrote - # (fully-cached group): skip the rewrite and just reuse it. - if ctext and not options.mypyc_skip_c_generation: + if not options.mypyc_skip_c_generation: write_file(cfile, ctext) if os.path.splitext(cfile)[1] == ".c": cfilenames.append(cfile) - # The header regex matches both quote styles, so the result can - # include system headers like `` that don't live under - # target_dir. Joining those produces non-existent paths which - # would force a full rebuild on every run via Extension.depends. - candidate_deps = ( - os.path.join(compiler_options.target_dir, dep) for dep in get_header_deps(cfiles) - ) - deps = [d for d in candidate_deps if os.path.exists(d)] + deps = [os.path.join(compiler_options.target_dir, dep) for dep in get_header_deps(cfiles)] group_cfilenames.append((cfilenames, deps)) return groups, group_cfilenames, source_deps @@ -705,9 +627,6 @@ def get_cflags( # Disables C Preprocessor (cpp) warnings # See https://github.com/mypyc/mypyc/issues/956 "-Wno-cpp", - "-Wno-array-bounds", - "-Wno-stringop-overread", - "-Wno-stringop-overflow", ] if log_trace: cflags.append("-DMYPYC_LOG_TRACE") @@ -828,9 +747,6 @@ def mypycify( have no backward compatibility guarantees! """ - # Skip redundant inplace .so copies on every build_ext invocation. - _patch_setuptools_copy_extensions_to_source() - # Figure out our configuration compiler_options = CompilerOptions( strip_asserts=strip_asserts, @@ -945,9 +861,4 @@ def mypycify( ) ) - # Tag every extension we own so the build_ext patch knows it's - # safe to skip the redundant inplace copy for these specifically. - for ext in extensions: - setattr(ext, _MYPYC_EXTENSION_MARKER, True) - return extensions diff --git a/mypyc/codegen/emit.py b/mypyc/codegen/emit.py index b89c91343e66c..54e77836a76ca 100644 --- a/mypyc/codegen/emit.py +++ b/mypyc/codegen/emit.py @@ -83,18 +83,6 @@ NAMESPACE_TYPE_VAR: TYPE_VAR_PREFIX, } -# Map from RVec._ctype to C macro prefix for VEC_*_INCREF/DECREF/BUF macros -VEC_MACRO_PREFIX: Final = { - "VecI64": "VEC_I64", - "VecI32": "VEC_I32", - "VecI16": "VEC_I16", - "VecU8": "VEC_U8", - "VecFloat": "VEC_FLOAT", - "VecBool": "VEC_BOOL", - "VecT": "VEC_T", - "VecNested": "VEC_NESTED", -} - class HeaderDeclaration: """A representation of a declaration in C. @@ -245,7 +233,7 @@ def object_annotation(self, obj: object, line: str) -> str: If it contains illegal characters, an empty string is returned.""" line_width = self._indent + len(line) - formatted = pprint.pformat(obj, compact=True, indent=1, width=max(90 - line_width, 20)) + formatted = pprint.pformat(obj, compact=True, width=max(90 - line_width, 20)) if any(x in formatted for x in ("/*", "*/", "\0")): return "" @@ -326,18 +314,6 @@ def get_group_prefix(self, obj: ClassIR | FuncDecl) -> str: # See docs above return self.get_module_group_prefix(obj.module_name) - def register_group_dep(self, cl: ClassIR) -> None: - """Record `cl`'s defining group as a cross-group dep, if any. - - Call this when emitting code that refers to `cl`'s struct - layout: the .c file consuming that layout needs the defining - group's `__native_*.h` included, and group_deps drives which - headers get pulled in. - """ - target_group = self.context.group_map.get(cl.module_name) - if target_group and target_group != self.context.group_name: - self.context.group_deps.add(target_group) - def static_name(self, id: str, module: str | None, prefix: str = STATIC_PREFIX) -> str: """Create name of a C static variable. @@ -372,7 +348,7 @@ def ctype_spaced(self, rtype: RType) -> str: def set_undefined_value(self, target: str, rtype: RType) -> None: if isinstance(rtype, RVec): self.emit_line(f"{target}.len = -1;") - self.emit_line(f"{target}.items = NULL;") + self.emit_line(f"{target}.buf = NULL;") else: self.emit_line(f"{target} = {self.c_undefined_value(rtype)};") @@ -598,8 +574,8 @@ def emit_inc_ref(self, dest: str, rtype: RType, *, rare: bool = False) -> None: for i, item_type in enumerate(rtype.types): self.emit_inc_ref(f"{dest}.f{i}", item_type) elif isinstance(rtype, RVec): - prefix = VEC_MACRO_PREFIX[rtype._ctype] - self.emit_line(f"{prefix}_INCREF({dest});") + # TODO: Only use the X variant if buf can be NULL + self.emit_line(f"Py_XINCREF({dest}.buf);") elif not rtype.is_unboxed: # Always inline, since this is a simple but very hot op if rtype.may_be_immortal or not HAVE_IMMORTAL: @@ -629,8 +605,11 @@ def emit_dec_ref( for i, item_type in enumerate(rtype.types): self.emit_dec_ref(f"{dest}.f{i}", item_type, is_xdec=is_xdec, rare=rare) elif isinstance(rtype, RVec): - prefix = VEC_MACRO_PREFIX[rtype._ctype] - self.emit_line(f"{prefix}_DECREF({dest});") + # TODO: Only use the X variant if buf can be NULL + if rare: + self.emit_line(f"CPy_XDecRef({dest}.buf);") + else: + self.emit_line(f"CPy_XDECREF({dest}.buf);") elif not rtype.is_unboxed: if rare: self.emit_line(f"CPy_{x}DecRef({dest});") @@ -852,7 +831,7 @@ def emit_cast( item_type_c = self.vec_item_type_c(typ) check = ( f"(Py_TYPE({src}) == VecTApi.boxed_type && " - f"VEC_T_BUF(((VecTObject *){src})->vec)->item_type == {item_type_c})" + f"((VecTObject *){src})->vec.buf->item_type == {item_type_c})" ) else: # Nested vec types (vec[vec[...]]). Check boxed type, item type, and depth. @@ -863,8 +842,8 @@ def emit_cast( type_value = self.vec_item_type_c(typ) check = ( f"(Py_TYPE({src}) == VecNestedApi.boxed_type && " - f"VEC_NESTED_BUF(((VecNestedObject *){src})->vec)->item_type == {type_value} && " - f"VEC_NESTED_BUF(((VecNestedObject *){src})->vec)->depth == {depth})" + f"((VecNestedObject *){src})->vec.buf->item_type == {type_value} && " + f"((VecNestedObject *){src})->vec.buf->depth == {depth})" ) if likely: check = f"(likely{check})" @@ -1305,8 +1284,7 @@ def emit_gc_visit(self, target: str, rtype: RType) -> None: for i, item_type in enumerate(rtype.types): self.emit_gc_visit(f"{target}.f{i}", item_type) elif isinstance(rtype, RVec): - prefix = VEC_MACRO_PREFIX[rtype._ctype] - self.emit_line(f"if ({target}.items) {{ Py_VISIT({prefix}_BUF({target})); }}") + self.emit_line(f"Py_VISIT({target}.buf);") elif self.ctype(rtype) == "PyObject *": # The simplest case. self.emit_line(f"Py_VISIT({target});") @@ -1332,11 +1310,7 @@ def emit_gc_clear(self, target: str, rtype: RType) -> None: for i, item_type in enumerate(rtype.types): self.emit_gc_clear(f"{target}.f{i}", item_type) elif isinstance(rtype, RVec): - prefix = VEC_MACRO_PREFIX[rtype._ctype] - self.emit_line(f"if ({target}.items) {{") - self.emit_line(f" Py_DECREF({prefix}_BUF({target}));") - self.emit_line(f" {target}.items = NULL;") - self.emit_line("}") + self.emit_line(f"Py_CLEAR({target}.buf);") elif self.ctype(rtype) == "PyObject *" and self.c_undefined_value(rtype) == "NULL": # The simplest case. self.emit_line(f"Py_CLEAR({target});") diff --git a/mypyc/codegen/emitfunc.py b/mypyc/codegen/emitfunc.py index dcb606f6ab51b..737b002b4201b 100644 --- a/mypyc/codegen/emitfunc.py +++ b/mypyc/codegen/emitfunc.py @@ -86,18 +86,6 @@ is_tagged, ) -VEC_ITEMS_C_TYPE: Final = { - "VecI64": "int64_t *", - "VecI32": "int32_t *", - "VecI16": "int16_t *", - "VecU8": "uint8_t *", - "VecFloat": "double *", - "VecBool": "char *", - "VecT": "PyObject **", - "VecNested": "VecNestedBufItem *", - "VecNestedBufItem": "void *", -} - def native_function_type(fn: FuncIR, emitter: Emitter) -> str: return native_function_type_from_decl(fn.decl, emitter) @@ -360,11 +348,6 @@ def get_attr_expr(self, obj: str, op: GetAttr | SetAttr, decl_cl: ClassIR) -> st classes, and *(obj + attr_offset) for attributes defined by traits. We also insert all necessary C casts here. """ - # The struct cast below needs the defining group's __native.h - # included by the consuming .c file. Record both the receiver - # and declaring classes as cross-group deps. - self.emitter.register_group_dep(op.class_type.class_ir) - self.emitter.register_group_dep(decl_cl) cast = f"({op.class_type.struct_name(self.emitter.names)} *)" if decl_cl.is_trait and op.class_type.class_ir.is_trait: # For pure trait access find the offset first, offsets @@ -574,7 +557,7 @@ def visit_tuple_get(self, op: TupleGet) -> None: dest = self.reg(op) src = self.reg(op.src) self.emit_line(f"{dest} = {src}.f{op.index};") - if not op.is_borrowed and op.type.is_refcounted: + if not op.is_borrowed: self.emit_inc_ref(dest, op.type) def get_dest_assign(self, dest: Value) -> str: @@ -811,7 +794,7 @@ def visit_load_mem(self, op: LoadMem) -> None: # TODO: we shouldn't dereference to type that are pointer type so far type = self.ctype(op.type) self.emit_line(f"{dest} = *({type} *){src};") - if not op.is_borrowed and op.type.is_refcounted: + if not op.is_borrowed: self.emit_inc_ref(dest, op.type) def visit_set_mem(self, op: SetMem) -> None: @@ -846,8 +829,8 @@ def visit_get_element_ptr(self, op: GetElementPtr) -> None: def visit_set_element(self, op: SetElement) -> None: dest = self.reg(op) + item = self.reg(op.item) field = op.field - item = self.set_element_item(op.src.type, field, self.reg(op.item)) if isinstance(op.src, Undef): # First assignment to an undefined struct is trivial. self.emit_line(f"{dest}.{field} = {item};") @@ -860,7 +843,7 @@ def visit_set_element(self, op: SetElement) -> None: # TODO: Support tuples (or use RStruct for tuples)? src = self.reg(op.src) src_type = op.src.type - assert isinstance(src_type, (RStruct, RVec)), src_type + assert isinstance(src_type, RStruct), src_type init_items = [] for n in src_type.names: if n != field: @@ -869,11 +852,6 @@ def visit_set_element(self, op: SetElement) -> None: init_items.append(item) self.emit_line(f"{dest} = ({self.ctype(src_type)}) {{ {', '.join(init_items)} }};") - def set_element_item(self, src_type: RType, field: str, item: str) -> str: - if field == "items" and src_type._ctype in VEC_ITEMS_C_TYPE: - return f"({VEC_ITEMS_C_TYPE[src_type._ctype]}){item}" - return item - def visit_load_address(self, op: LoadAddress) -> None: typ = op.type dest = self.reg(op) diff --git a/mypyc/codegen/emitmodule.py b/mypyc/codegen/emitmodule.py index fa0a4385f4fb5..043a8929cbd92 100644 --- a/mypyc/codegen/emitmodule.py +++ b/mypyc/codegen/emitmodule.py @@ -59,7 +59,6 @@ from mypyc.errors import Errors from mypyc.ir.deps import ( LIBRT_BASE64, - LIBRT_RANDOM, LIBRT_STRINGS, LIBRT_TIME, LIBRT_VECS, @@ -306,7 +305,7 @@ def compile_modules_to_ir( # Process the graph by SCC in topological order, like we do in mypy.build for scc in sorted_components(result.graph): - scc_states = [result.graph[id] for id in sorted(scc.mod_ids)] + scc_states = [result.graph[id] for id in scc.mod_ids] trees = [st.tree for st in scc_states if st.id in mapper.group_map and st.tree] if not trees: @@ -363,12 +362,7 @@ def compile_ir_to_c( if source.module in modules } if not group_modules: - # Fully-cached group (e.g. pip's second setup.py invoke for - # the wheel phase): no fresh IR was produced. Reuse the file - # list recorded in any module's IR cache so the linker still - # sees the previous run's outputs; empty content is a "do - # not rewrite" sentinel for mypyc_build. - ctext[group_name] = _load_cached_group_files(group_sources, result) + ctext[group_name] = [] continue generator = GroupGenerator( group_modules, source_paths, group_name, mapper.group_map, names, compiler_options @@ -378,32 +372,6 @@ def compile_ir_to_c( return ctext -def _load_cached_group_files( - group_sources: list[BuildSource], result: BuildResult -) -> list[tuple[str, str]]: - """Read the .c/.h paths recorded for this group on the previous run. - - All modules in a group share the same src_hashes map, so the first - readable IR cache is sufficient. Returns paths paired with empty - content so callers can distinguish "reuse on disk" from "newly - generated". - """ - for source in group_sources: - state = result.graph.get(source.module) - if state is None: - continue - try: - ir_json = result.manager.metastore.read(get_state_ir_cache_name(state)) - except (FileNotFoundError, OSError): - continue - try: - ir_data = json.loads(ir_json) - except json.JSONDecodeError: - continue - return [(path, "") for path in ir_data.get("src_hashes", {})] - return [] - - def get_ir_cache_name(id: str, path: str, options: Options) -> str: meta_path, _, _ = get_cache_names(id, path, options) # Mypyc uses JSON cache even with --fixed-format-cache (for now). @@ -646,19 +614,16 @@ def generate_c_for_modules(self) -> list[tuple[str, str]]: base_emitter = Emitter(self.context) # Optionally just include the runtime library c files to - # reduce the number of compiler invocations needed. - # Use <> form (only -I paths) so a shim file with the same - # basename as a runtime file can't shadow it. Triggered by - # mypyc/lower/int_ops.py vs lib-rt/int_ops.c on mypy self-compile. + # reduce the number of compiler invocations needed if self.compiler_options.include_runtime_files: for name in RUNTIME_C_FILES: - base_emitter.emit_line(f"#include <{name}>") + base_emitter.emit_line(f'#include "{name}"') # Include conditional source files source_deps = collect_source_dependencies(self.modules) for source_dep in sorted(source_deps, key=lambda d: d.path): - base_emitter.emit_line(f"#include <{source_dep.path}>") + base_emitter.emit_line(f'#include "{source_dep.path}"') if self.compiler_options.depends_on_librt_internal: - base_emitter.emit_line("#include ") + base_emitter.emit_line('#include "internal/librt_internal_api.c"') base_emitter.emit_line(f'#include "__native{self.short_group_suffix}.h"') base_emitter.emit_line(f'#include "__native_internal{self.short_group_suffix}.h"') emitter = base_emitter @@ -1259,10 +1224,6 @@ def emit_module_exec_func( emitter.emit_line("if (import_librt_vecs() < 0) {") emitter.emit_line("return -1;") emitter.emit_line("}") - if LIBRT_RANDOM in module.dependencies: - emitter.emit_line("if (import_librt_random() < 0) {") - emitter.emit_line("return -1;") - emitter.emit_line("}") emitter.emit_line("PyObject* modname = NULL;") if self.multi_phase_init: emitter.emit_line(f"{module_static} = module;") @@ -1480,7 +1441,7 @@ def _toposort_visit(name: str) -> None: if decl.mark: return - for child in sorted(decl.declaration.dependencies): + for child in decl.declaration.dependencies: _toposort_visit(child) result.append(decl.declaration) diff --git a/mypyc/doc/index.rst b/mypyc/doc/index.rst index aacf275de9885..fe683c4188f20 100644 --- a/mypyc/doc/index.rst +++ b/mypyc/doc/index.rst @@ -33,10 +33,8 @@ generate fast code. librt librt_base64 - librt_random librt_strings librt_time - librt_vecs .. toctree:: :maxdepth: 2 diff --git a/mypyc/doc/librt.rst b/mypyc/doc/librt.rst index 23206f8cfe806..f18cc93c80294 100644 --- a/mypyc/doc/librt.rst +++ b/mypyc/doc/librt.rst @@ -26,14 +26,10 @@ Follow submodule links in the table to a detailed description of each submodule. - Description * - :doc:`librt.base64 ` - Fast Base64 encoding and decoding - * - :doc:`librt.random ` - - Pseudorandom number generation * - :doc:`librt.strings ` - String and bytes utilities * - :doc:`librt.time ` - Time utilities - * - :doc:`librt.vecs ` - - Fast growable array type ``vec`` Installing librt ---------------- diff --git a/mypyc/doc/librt_random.rst b/mypyc/doc/librt_random.rst deleted file mode 100644 index d5543661ce987..0000000000000 --- a/mypyc/doc/librt_random.rst +++ /dev/null @@ -1,97 +0,0 @@ -.. _librt-random: - -librt.random -============ - -The ``librt.random`` module is part of the ``librt`` package on PyPI, and it provides -pseudorandom number generation utilities. It can be used as a significantly faster -alternative to the stdlib :mod:`random` module in compiled code. It can also be faster -than stdlib ``random`` in interpreted code, depending on use case. - -The module uses the `ChaCha8 `__ algorithm with forward -secrecy. It is **not** suitable for cryptographic use, but it provides high-quality, -statistically uniform output. - -Functions ---------- - -The module provides module-level functions that use thread-local state, so they are -safe to call concurrently from multiple threads without external locking, and they -scale well even if used from multiple threads: - -.. function:: random() -> float - - Return a random floating-point number in the range [0.0, 1.0). - -.. function:: randint(a: i64, b: i64) -> i64 - - Return a random integer *n* such that *a* <= *n* <= *b*. - -.. function:: randrange(stop: i64, /) -> i64 - randrange(start: i64, stop: i64, /) -> i64 - - Return a random integer from the range. With one argument, the range is [0, *stop*). - With two arguments, the range is [*start*, *stop*). - -.. function:: seed(n: i64, /) -> None - - Seed the thread-local random number generator. This only affects module-level - functions called from the current thread. - -Random class ------------- - -.. class:: Random(seed: i64 | None = None) - - A pseudorandom number generator instance with its own independent state. Use this - when you need reproducible sequences or want to avoid interference with the - thread-local state used by the module-level functions. - - If *seed* is ``None``, the generator is seeded from OS entropy - (via :func:`os.urandom`). - - It's not safe to use the same ``Random`` instance concurrently from multiple - threads without synchronization on free-threaded Python builds. - - .. method:: random() -> float - - Return a random floating-point number in the range [0.0, 1.0). - - .. method:: randint(a: i64, b: i64) -> i64 - - Return a random integer *n* such that *a* <= *n* <= *b*. - - .. method:: randrange(stop: i64, /) -> i64 - randrange(start: i64, stop: i64, /) -> i64 - - Return a random integer from the range. With one argument, the range is [0, *stop*). - With two arguments, the range is [*start*, *stop*). - - .. method:: seed(n: i64, /) -> None - - Reseed the generator. - -Example -------- - -Using module-level functions:: - - from librt.random import randint, seed - - def roll_dice() -> i64: - return randint(1, 6) - -Using a ``Random`` instance for reproducible sequences:: - - from librt.random import Random - - def generate_data() -> list[i64]: - rng = Random(42) - return [rng.randint(0, 100) for _ in range(10)] - -Backward compatibility ----------------------- - -New versions of this module are not guaranteed to generate the same results when -using the same seed. A specific seed only produces predictable random numbers on a -specific version of ``librt``. In the future we might provide stronger guarantees. diff --git a/mypyc/doc/librt_vecs.rst b/mypyc/doc/librt_vecs.rst deleted file mode 100644 index dad3be621ff4c..0000000000000 --- a/mypyc/doc/librt_vecs.rst +++ /dev/null @@ -1,253 +0,0 @@ -librt.vecs -========== - -The ``librt.vecs`` module defines the ``vec`` type, a low-level, uniform growable array type. -It's part of the ``librt`` package on PyPI. - -When constructing a ``vec``, the item type ``T`` is always explicitly given via ``vec[T]``:: - - from librt.vecs import append, vec - - v = vec[float]([1.0, 2.5]) # Construct vec[float] with two items - -``vec`` supports many sequence operations, though it's not a full sequence type:: - - len(v) # 2 - v[0] # 1.0 - v[-1] # 2.5 - for x in v: - print(x) - -The length of each ``vec`` value is immutable. Appending an item is still a fast operation, -but it returns a new ``vec`` value:: - - v = append(v, -0.5) - print(v) # vec[float]([1.0, 2.5, -0.5]) - -``vec`` only supports simple, uniform item types. It uses an efficient packed binary encoding -for these *value item types*: - -* ``mypy_extensions.i64`` (signed 64-bit integer) -* ``mypy_extensions.i32`` (signed 32-bit integer) -* ``mypy_extensions.i16`` (signed 16-bit integer) -* ``mypy_extensions.u8`` (unsigned byte) -* ``float`` (64-bit float) -* ``bool`` - -``int`` is not a valid item type, since it has an arbitrary precision, and vec is an -efficiency-focused type. Use one of the fixed-length integer types instead. - -Class item types (e.g. ``str`` or ``MyNativeClass``) are represented as regular object references. -Optional class item types (e.g. ``str | None``) are supported for convenience, but arbitrary -union types are not supported as item types. Nested vecs are supported, e.g. ``vec[vec[i64]]``. - -A vec value is often used as an efficient alternative to ``list`` or ``array.array`` in code -compiled using mypyc. Its primary advantages are an efficient packed memory representation -for value item types and very fast inlined get and set item operations. - -Vec instances perform runtime checking of item types. Since values of type variables are -not available at runtime (they are *erased*), type variables can't be used as item types. - -A vec value is effectively an immutable (length, buffer) pair. This means that any operation -that changes the length of a vec, including ``append`` as we saw above, returns a modified -value. - -.. note:: - An immutable length allows more efficient code to be generated by mypyc, and vec values - can be allocated to machine registers effectively. However, vec values must be boxed - if used in a non-native context, such as if added to a list or dict. - -Here are some examples of valid vec types: - -.. list-table:: - :header-rows: 1 - - * - Type - - Item representation - * - ``vec[i32]`` - - Packed 32-bit integers - * - ``vec[float]`` - - Packed 64-bit floats - * - ``vec[str]`` - - Object references - * - ``vec[vec[u8]]`` - - Packed vec values - -The ``vec`` class ------------------ - -.. class:: vec[T](items: Iterable[T] = ..., *, capacity: i64 = ...) - - A generic growable array type. The runtime type parameter ``T`` used when - calling ``vec[T](...)`` determines the element type. - - The ``capacity`` parameter allows defining the minimum initial - capacity of the buffer, some of which may be unused after - construction. Unused capacity allows fast ``append`` and ``extend`` - operations that don't need to reallocate the buffer. Actual capacity - will be larger than ``capacity`` if ``items`` has more than ``capacity`` - items. - - Construction from ``list`` and ``tuple`` objects is optimized. - Also, for value item types, construction from an object that implements - the buffer protocol is optimized (such as ``bytes``), if the format - is compatible with the vec item type. - - Mypyc treats ``vec[T]([x] * n)`` as a special form. For example, - ``vec[u8]([0] * n)`` constructs a zero-initialized vec object - efficiently, without building an intermediate list. There are - also other constructor-related special forms -- see `Special - forms`_ below. - - It's an error to construct a ``vec`` object without providing an - item type: ``vec()`` raises an exception. - - .. describe:: len(v) → i64 - - Return the length of ``v``. - - .. describe:: v[i] → T - - Return item at index ``i`` (index may be negative). - - .. describe:: v[i:j] → vec[T] - - Return a slice. This constructs a new ``vec`` object. ``i`` and ``j`` may be negative. - - .. describe:: v[i] = o - - Assign to an item (index may be negative). - - .. describe:: o in v → bool - - Return True if ``v`` contains ``o``. - - .. describe:: for o in v - - Iterate over items. - - .. describe:: memoryview(v) - - ``vec`` implements the buffer protocol, but only for value item types that use a - packed representation. - -Functions ---------- - -Since the following operations return a modified value, they are module-level functions -instead of methods. - -.. function:: append(v: vec[T], o: T) -> vec[T] - - Return ``v`` with item ``o`` appended to it. If ``v`` has unused capacity, reuse - the existing buffer. The time complexity is O(1) on average. Example:: - - v = vec[i32]() - v = append(v, 1) - -.. function:: extend(v: vec[T], it: Iterable[T]) -> vec[T] - - Return ``v`` with all items from iterable ``it`` appended to it. If ``v`` has sufficient - unused capacity, reuse the existing buffer. The time complexity is O(n) on average, - where n is the length of ``it``. Example:: - - v = vec[u8]() - v = extend(v, b"foo") - -.. function:: remove(v: vec[T], o: T) -> vec[T] - - Return ``v`` with the first instance of item ``o`` removed. Reuse the buffer - from ``v``. Raise ``ValueError`` if value doesn't exist. Example:: - - v = vec[i32]([1, 2, 3]) - v = remove(v, 2) - # v has items [1, 3] - -.. function:: pop(v: vec[T], i: i64 = -1) -> tuple[vec[T], T] - - Return ``(new_v, item)``, where ``item`` is the value at index ``i`` and - ``new_v`` is ``v`` with that item removed. Reuse the buffer from ``v``. - Example:: - - v = vec[i32]([1, 2, 3]) - v, x = pop(v) - # x is 3; v has items [1, 2] - -Special forms --------------- - -Certain combinations of operations that would be multiple separate operations in -regular Python are guaranteed to be compiled by mypyc to direct operations -with no unnecessary temporary objects. - -.. list-table:: - :header-rows: 1 - - * - Special form - - Description - * - ``vec[T]()`` - - Construct empty vec with no buffer. This doesn't perform any dynamic allocation - (at least for non-nested vecs). - * - ``vec[T]([element1, ...])`` - - Directly construct a vec object with given items, without a temporary list. - * - ``vec[T]([element1] * n)`` - - Directly construct a vec with length n, without any temporary list. - * - ``vec[T]([ for ... in ])`` - - Vec comprehension creates no temporary list. - -Thread safety -------------- - -In free-threaded Python builds, it's unsafe to write or modify an item if other -threads might be concurrently accessing *the same item*. For example, writing ``v[4]`` -is not safe to do if another thread might be reading ``v[4]``. Similarly, two -threads concurrently calling ``append`` or ``remove`` on the same vec object is not safe. - -This is different from list objects, since vec is a lower-level type where implicit -synchronization would have a significant performance cost. However, since vec lengths -are immutable, some race conditions that lists can be susceptible to are not possible -with vecs. - -Implementation details ----------------------- - -In a native context, such as in a local variable or a parameter in a native function, -or in an attribute of a native class, vec values are implemented as value objects with two -fields: length and buffer. The buffer is a normal Python object, but it's not directly -accessible to users. If a vec object is empty, no buffer object is required. This means that -empty vecs are particularly efficient in a native context (usually 16 bytes). - -A packed representation is used for buffers with supported value item types, including for -nested vecs. The packed representation is much more efficient than a Python list object, and -it's also significantly more efficient than ``array.array`` for small sequences. - -Multiple vec values can share the same underlying buffer. For example, assigning a vec -to another variable creates an alias that refers to the same buffer:: - - v = vec[i32]([1, 2, 3], capacity=3) - w = v # v and w share the same buffer - - w[0] = 99 - print(v[0]) # 99 -- both see the change - -However, this sharing is not guaranteed to persist if there are operations that change -the length (such as ``append``). These may reallocate the buffer, breaking the sharing -silently:: - - v = append(v, 4) # reallocates the buffer since there is no free capacity - v[0] = 0 - print(w[0]) # still 99 -- v and w no longer share a buffer - -If you need independent copies, use slicing (``v[:]``) to explicitly create a vec with -its own buffer. It's not recommended to rely on the details of buffer reallocation, -as these might change between ``librt`` releases. - -Using vecs outside compiled code --------------------------------- - -``vec`` is fully supported in non-compiled code, but ``vec`` values will be boxed in such -non-native contexts. There will be always two objects, a boxed vec object and a buffer object, -whereas in native contexts usually only the buffer is a dynamically allocated object. -``vec`` is primarily useful in code compiled using mypyc, and it's been heavily optimized -for this use case. There may be no performance benefit in interpreted code over using -``list`` or ``array.array``. diff --git a/mypyc/ir/deps.py b/mypyc/ir/deps.py index 751845d3a324c..20b1f102ee383 100644 --- a/mypyc/ir/deps.py +++ b/mypyc/ir/deps.py @@ -109,7 +109,6 @@ def get_header(self) -> str: LIBRT_BASE64: Final = Capsule("librt.base64") LIBRT_VECS: Final = Capsule("librt.vecs") LIBRT_TIME: Final = Capsule("librt.time") -LIBRT_RANDOM: Final = Capsule("librt.random") BYTES_EXTRA_OPS: Final = SourceDep("bytes_extra_ops.c") BYTES_WRITER_EXTRA_OPS: Final = SourceDep("byteswriter_extra_ops.c") diff --git a/mypyc/ir/rtypes.py b/mypyc/ir/rtypes.py index db29f9e304d8d..1429e8a45cf66 100644 --- a/mypyc/ir/rtypes.py +++ b/mypyc/ir/rtypes.py @@ -41,7 +41,7 @@ class to enable the new behavior. In rare cases, adding a new from typing import TYPE_CHECKING, ClassVar, Final, Generic, TypeGuard, TypeVar, Union, final from mypyc.common import HAVE_IMMORTAL, IS_32_BIT_PLATFORM, PLATFORM_SIZE, JsonDict, short_name -from mypyc.ir.deps import LIBRT_RANDOM, LIBRT_STRINGS, LIBRT_VECS, Dependency +from mypyc.ir.deps import LIBRT_STRINGS, LIBRT_VECS, Dependency from mypyc.namegen import NameGenerator if TYPE_CHECKING: @@ -544,15 +544,10 @@ def __hash__(self) -> int: ("librt.strings.BytesWriter", (LIBRT_STRINGS,)), ("librt.strings.StringWriter", (LIBRT_STRINGS,)), ] -} | { - "librt.random.Random": RPrimitive( - "librt.random.Random", is_unboxed=False, is_refcounted=True, dependencies=(LIBRT_RANDOM,) - ) } bytes_writer_rprimitive: Final = KNOWN_NATIVE_TYPES["librt.strings.BytesWriter"] string_writer_rprimitive: Final = KNOWN_NATIVE_TYPES["librt.strings.StringWriter"] -random_rprimitive: Final = KNOWN_NATIVE_TYPES["librt.random.Random"] def is_native_rprimitive(rtype: RType) -> bool: @@ -1022,7 +1017,7 @@ class RVec(RType): def __init__(self, item_type: RType) -> None: self.name = "vec[%s]" % item_type self.item_type = item_type - self.names = ["len", "items"] + self.names = ["len", "buf"] self.dependencies = (LIBRT_VECS,) if isinstance(item_type, RUnion): non_opt = optional_value_type(item_type) @@ -1031,14 +1026,14 @@ def __init__(self, item_type: RType) -> None: if item_type in vec_buf_types: self._ctype = vec_c_types[item_type] self.buf_type = vec_buf_types[item_type] - self.types = [c_pyssize_t_rprimitive, pointer_rprimitive] + self.types = [c_pyssize_t_rprimitive, self.buf_type] elif isinstance(non_opt, RVec): self._ctype = "VecNested" - self.types = [c_pyssize_t_rprimitive, pointer_rprimitive] + self.types = [c_pyssize_t_rprimitive, VecTBufObject] self.buf_type = VecNestedBufObject else: self._ctype = "VecT" - self.types = [c_pyssize_t_rprimitive, pointer_rprimitive] + self.types = [c_pyssize_t_rprimitive, VecTBufObject] self.buf_type = VecTBufObject @property @@ -1081,8 +1076,8 @@ def depth(self) -> int: def field_type(self, name: str) -> RType: if name == "len": return c_pyssize_t_rprimitive - elif name == "items": - return pointer_rprimitive + elif name == "buf": + return object_rprimitive assert False, f"RVec has no field '{name}'" def accept(self, visitor: RTypeVisitor[T]) -> T: @@ -1351,7 +1346,7 @@ def check_native_int_range(rtype: RPrimitive, n: int) -> bool: # Struct type for vec[i64] (in most cases use RVec instead). VecI64 = RStruct( - name="VecI64", names=["len", "items"], types=[c_pyssize_t_rprimitive, pointer_rprimitive] + name="VecI64", names=["len", "buf"], types=[c_pyssize_t_rprimitive, object_rprimitive] ) @@ -1364,13 +1359,13 @@ def check_native_int_range(rtype: RPrimitive, n: int) -> bool: # Struct type for vec[t] (in most cases use RVec instead). VecT = RStruct( - name="VecT", names=["len", "items"], types=[c_pyssize_t_rprimitive, pointer_rprimitive] + name="VecT", names=["len", "buf"], types=[c_pyssize_t_rprimitive, object_rprimitive] ) VecNestedBufItem = RStruct( name="VecNestedBufItem", - names=["len", "items"], - types=[c_pyssize_t_rprimitive, pointer_rprimitive], + names=["len", "buf"], + types=[c_pyssize_t_rprimitive, object_non_refcounted_rprimitive], ) # Buffer for vec[vec[t]] @@ -1388,7 +1383,7 @@ def check_native_int_range(rtype: RPrimitive, n: int) -> bool: # Struct type for vec[vec[...]] (in most cases use RVec instead). VecNested = RStruct( - name="VecNested", names=["len", "items"], types=[c_pyssize_t_rprimitive, pointer_rprimitive] + name="VecNested", names=["len", "buf"], types=[c_pyssize_t_rprimitive, object_rprimitive] ) VecNestedBufObject_rprimitive = RPrimitive( diff --git a/mypyc/irbuild/builder.py b/mypyc/irbuild/builder.py index 066954e920165..67aa24b3641c8 100644 --- a/mypyc/irbuild/builder.py +++ b/mypyc/irbuild/builder.py @@ -1076,11 +1076,11 @@ def get_sequence_type_from_type(self, target_type: Type) -> RType: items = target_type.items assert items, "This function does not support empty tuples" # Tuple might have elements of different types. - rtypes = list(dict.fromkeys(self.mapper.type_to_rtype(item) for item in items)) + rtypes = set(map(self.mapper.type_to_rtype, items)) if len(rtypes) == 1: return rtypes.pop() else: - return RUnion.make_simplified_union(rtypes) + return RUnion.make_simplified_union(list(rtypes)) assert False, target_type def get_dict_base_type(self, expr: Expression) -> list[Instance]: diff --git a/mypyc/irbuild/prepare.py b/mypyc/irbuild/prepare.py index f143ce1b44025..09bfc8339b404 100644 --- a/mypyc/irbuild/prepare.py +++ b/mypyc/irbuild/prepare.py @@ -182,12 +182,7 @@ def load_type_map(mapper: Mapper, modules: list[MypyFile], deser_ctx: DeserMaps) continue mapper.type_to_ir[node.node] = ir mapper.symbol_fullnames.add(node.node.fullname) - # Trait/builtin-base classes have an ir.ctor FuncDecl - # but no emitted CPyDef_, so a cross-group direct - # call would hit an undefined symbol. Mirror the skip - # in prepare_init_method. - if not ir.is_trait and not ir.builtin_base: - mapper.func_to_decl[node.node] = ir.ctor + mapper.func_to_decl[node.node] = ir.ctor for module in modules: for func in get_module_func_defs(module): diff --git a/mypyc/irbuild/vec.py b/mypyc/irbuild/vec.py index bfcfabee45c21..00e6f0adcdd8f 100644 --- a/mypyc/irbuild/vec.py +++ b/mypyc/irbuild/vec.py @@ -2,11 +2,12 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Final +from typing import TYPE_CHECKING, Final, cast from mypyc.common import IS_32_BIT_PLATFORM, PLATFORM_SIZE from mypyc.ir.ops import ( ERR_MAGIC, + ERR_NEVER, Assign, BasicBlock, Branch, @@ -14,6 +15,7 @@ ComparisonOp, DecRef, GetElement, + GetElementPtr, Integer, IntOp, RaiseStandardError, @@ -168,7 +170,7 @@ def vec_create_initialized( for_loop = builder.begin_for( items_start, items_end, Integer(step, c_pyssize_t_rprimitive), signed=False ) - vec_set_mem_item(builder, for_loop.index, item_type, init) + builder.set_mem(for_loop.index, item_type, init) for_loop.finish() builder.keep_alive([vec], line) @@ -188,7 +190,7 @@ def vec_create_from_values( item_type = vtype.item_type step = step_size(item_type) for value in values: - vec_set_mem_item(builder, ptr, item_type, value) + builder.set_mem(ptr, item_type, value) ptr = builder.int_add(ptr, step) builder.keep_alive([vec], line) return vec @@ -242,11 +244,14 @@ def vec_len_native(builder: LowLevelIRBuilder, val: Value) -> Value: def vec_items(builder: LowLevelIRBuilder, vecobj: Value) -> Value: - """Return pointer to first item in vec. + """Return pointer to first item in vec's buf. - The items field points directly to the first element in the buffer. + Safe to call even when buf is NULL (empty vec), since GetElementPtr + uses offsetof-based arithmetic instead of &((T*)p)->field. """ - return builder.get_element(vecobj, "items") + vtype = cast(RVec, vecobj.type) + buf = builder.get_element(vecobj, "buf") + return builder.add(GetElementPtr(buf, vtype.buf_type, "items")) def vec_item_ptr(builder: LowLevelIRBuilder, vecobj: Value, index: Value) -> Value: @@ -264,20 +269,6 @@ def vec_item_ptr(builder: LowLevelIRBuilder, vecobj: Value, index: Value) -> Val return builder.int_add(items_addr, delta) -def vec_load_mem_item( - builder: LowLevelIRBuilder, ptr: Value, item_type: RType, *, can_borrow: bool = False -) -> Value: - """Load a vec item from storage, converting nested vec slots to RVec values.""" - return builder.load_mem(ptr, item_type, borrow=can_borrow) - - -def vec_set_mem_item( - builder: LowLevelIRBuilder, ptr: Value, item_type: RType, item: Value -) -> None: - """Store a vec item, converting RVec values to nested storage items.""" - builder.set_mem(ptr, item_type, item) - - def vec_check_and_adjust_index( builder: LowLevelIRBuilder, lenv: Value, index: Value, line: int ) -> Value: @@ -333,7 +324,7 @@ def vec_get_item_unsafe( index = as_platform_int(builder, index, line) vtype = base.type item_addr = vec_item_ptr(builder, base, index) - result = vec_load_mem_item(builder, item_addr, vtype.item_type, can_borrow=can_borrow) + result = builder.load_mem(item_addr, vtype.item_type, borrow=can_borrow) builder.keep_alives.append(base) return result @@ -352,9 +343,9 @@ def vec_set_item( if item_type.is_refcounted: # Read an unborrowed reference to cause a decref to be # generated for the old item. - old_item = vec_load_mem_item(builder, item_addr, item_type, can_borrow=True) + old_item = builder.load_mem(item_addr, item_type, borrow=True) builder.add(DecRef(old_item)) - vec_set_mem_item(builder, item_addr, item_type, item) + builder.set_mem(item_addr, item_type, item) builder.keep_alive([base], line) @@ -367,23 +358,32 @@ def vec_init_item_unsafe( item_addr = vec_item_ptr(builder, base, index) item_type = vtype.item_type item = builder.coerce(item, item_type, line) - vec_set_mem_item(builder, item_addr, item_type, item) + builder.set_mem(item_addr, item_type, item) builder.keep_alive([base], line) def convert_to_t_ext_item(builder: LowLevelIRBuilder, item: Value) -> Value: vec_len = builder.add(GetElement(item, "len")) - vec_items = builder.add(GetElement(item, "items")) + vec_buf = builder.add(GetElement(item, "buf")) temp = builder.add(SetElement(Undef(VecNestedBufItem), "len", vec_len)) - return builder.add(SetElement(temp, "items", vec_items)) + return builder.add(SetElement(temp, "buf", vec_buf)) def convert_from_t_ext_item(builder: LowLevelIRBuilder, item: Value, vec_type: RVec) -> Value: - """Convert an owned VecNestedBufItem to the corresponding RVec value.""" - vec_len = builder.add(GetElement(item, "len")) - vec_items = builder.add(GetElement(item, "items")) - temp = builder.add(SetElement(Undef(vec_type), "len", vec_len)) - return builder.add(SetElement(temp, "items", vec_items)) + """Convert a value of type VecNestedBufItem to the corresponding RVec value.""" + api_name = vec_api_by_item_type.get(vec_type.item_type) + if api_name is not None: + name = f"{api_name}.convert_from_nested" + elif isinstance(vec_type.item_type, RVec): + name = "VecNestedApi.convert_from_nested" + else: + name = "VecTApi.convert_from_nested" + + return builder.add( + CallC( + name, [item], vec_type, steals=[True], is_borrowed=False, error_kind=ERR_NEVER, line=-1 + ) + ) def vec_item_type(builder: LowLevelIRBuilder, item_type: RType, line: int) -> Value: @@ -551,7 +551,7 @@ def vec_contains(builder: LowLevelIRBuilder, vec: Value, target: Value, line: in for_loop = builder.begin_for( items_start, items_end, Integer(step, c_pyssize_t_rprimitive), signed=False ) - item = vec_load_mem_item(builder, for_loop.index, item_type, can_borrow=True) + item = builder.load_mem(for_loop.index, item_type, borrow=True) comp = builder.binary_op(item, target, "==", line) false = BasicBlock() builder.add(Branch(comp, true, false, Branch.BOOL)) diff --git a/mypyc/lib-rt/CPy.h b/mypyc/lib-rt/CPy.h index c22c4162669bd..89ef4d0749a45 100644 --- a/mypyc/lib-rt/CPy.h +++ b/mypyc/lib-rt/CPy.h @@ -3,8 +3,8 @@ #ifndef CPY_CPY_H #define CPY_CPY_H -#include #include +#include #include #include #include diff --git a/mypyc/lib-rt/byteswriter_extra_ops.h b/mypyc/lib-rt/byteswriter_extra_ops.h index 4aec322a730cd..dc715600653d5 100644 --- a/mypyc/lib-rt/byteswriter_extra_ops.h +++ b/mypyc/lib-rt/byteswriter_extra_ops.h @@ -1,9 +1,9 @@ #ifndef BYTESWRITER_EXTRA_OPS_H #define BYTESWRITER_EXTRA_OPS_H -#include #include #include +#include #include "mypyc_util.h" #include "strings/librt_strings_api.h" diff --git a/mypyc/lib-rt/function_wrapper.c b/mypyc/lib-rt/function_wrapper.c index 348c3316cd258..ccb1824d24b4a 100644 --- a/mypyc/lib-rt/function_wrapper.c +++ b/mypyc/lib-rt/function_wrapper.c @@ -1,6 +1,6 @@ #define PY_SSIZE_T_CLEAN -#include "CPy.h" #include +#include "CPy.h" #define CPyFunction_weakreflist(f) (((PyCFunctionObject *)f)->m_weakreflist) #define CPyFunction_class(f) ((PyObject*) ((PyCMethodObject *) (f))->mm_class) diff --git a/mypyc/lib-rt/misc_ops.c b/mypyc/lib-rt/misc_ops.c index 392dba0deca4c..2aaadb2ac47d2 100644 --- a/mypyc/lib-rt/misc_ops.c +++ b/mypyc/lib-rt/misc_ops.c @@ -1281,17 +1281,12 @@ static int CPyImport_SetModuleFile(PyObject *modobj, PyObject *module_name, Py_DECREF(file); return 0; } - // Derive __file__ from the shared lib's directory, the module - // name, and the extension suffix. Two layouts: - // - // Monolithic: one shared lib above the package tree holds many - // modules, so append the full dotted module path. - // separate=True: each module has its own "__mypyc.so" - // next to the module, so dirname(shared_lib) is already inside - // the parent package. Append only the last segment. - // - // Detect the separate=True case by matching the shared lib's - // basename against "__mypyc". + // Derive __file__ from the shared library's __file__ (for its + // directory), the module name (dots -> path separators), and the + // extension suffix. E.g. for module "a.b.c", shared lib + // "/path/to/group__mypyc.cpython-312-x86_64-linux-gnu.so", + // suffix ".cpython-312-x86_64-linux-gnu.so": + // => "/path/to/a/b/c.cpython-312-x86_64-linux-gnu.so" PyObject *derived_file = NULL; if (shared_lib_file != NULL && shared_lib_file != Py_None && PyUnicode_Check(shared_lib_file)) { @@ -1319,65 +1314,30 @@ static int CPyImport_SetModuleFile(PyObject *modobj, PyObject *module_name, if (module_path == NULL) { return -1; } - - // Compute the module's last dotted segment for the separate=True check. - Py_ssize_t name_len = PyUnicode_GetLength(module_name); - Py_ssize_t last_dot = PyUnicode_FindChar(module_name, '.', 0, name_len, -1); - PyObject *last_segment; - if (last_dot >= 0) { - last_segment = PyUnicode_Substring(module_name, last_dot + 1, name_len); - } else { - last_segment = module_name; - Py_INCREF(last_segment); - } - if (last_segment == NULL) { - Py_DECREF(module_path); - return -1; - } - // Compare shared_lib_file basename against "__mypyc". - PyObject *expected_basename = PyUnicode_FromFormat( - "%U__mypyc%U", last_segment, ext_suffix); - PyObject *actual_basename; - if (sep >= 0) { - actual_basename = PyUnicode_Substring(shared_lib_file, sep + 1, sf_len); - } else { - actual_basename = shared_lib_file; - Py_INCREF(actual_basename); - } - int is_per_module_lib = 0; - if (expected_basename != NULL && actual_basename != NULL) { - is_per_module_lib = - (PyUnicode_Compare(expected_basename, actual_basename) == 0); - } - Py_XDECREF(expected_basename); - Py_XDECREF(actual_basename); - // For packages, __file__ should point to __init__, // e.g. "a/b/__init__.cpython-312-x86_64-linux-gnu.so". - PyObject *file_path = is_per_module_lib ? last_segment : module_path; if (sep >= 0) { PyObject *dir = PyUnicode_Substring(shared_lib_file, 0, sep); if (dir != NULL) { if (is_package) { derived_file = PyUnicode_FromFormat( "%U%c%U%c__init__%U", dir, (int)sep_char, - file_path, (int)sep_char, ext_suffix); + module_path, (int)sep_char, ext_suffix); } else { derived_file = PyUnicode_FromFormat( "%U%c%U%U", dir, (int)sep_char, - file_path, ext_suffix); + module_path, ext_suffix); } Py_DECREF(dir); } } else { if (is_package) { derived_file = PyUnicode_FromFormat( - "%U%c__init__%U", file_path, (int)SEP[0], ext_suffix); + "%U%c__init__%U", module_path, (int)SEP[0], ext_suffix); } else { - derived_file = PyUnicode_FromFormat("%U%U", file_path, ext_suffix); + derived_file = PyUnicode_FromFormat("%U%U", module_path, ext_suffix); } } - Py_DECREF(last_segment); Py_DECREF(module_path); } if (derived_file == NULL && !PyErr_Occurred()) { diff --git a/mypyc/lib-rt/pythonsupport.h b/mypyc/lib-rt/pythonsupport.h index 1b0583543fe48..4c82ff6a3c037 100644 --- a/mypyc/lib-rt/pythonsupport.h +++ b/mypyc/lib-rt/pythonsupport.h @@ -6,8 +6,8 @@ #ifndef CPY_PYTHONSUPPORT_H #define CPY_PYTHONSUPPORT_H -#include #include +#include #include "pythoncapi_compat.h" #include #include diff --git a/mypyc/lib-rt/random/librt_random.c b/mypyc/lib-rt/random/librt_random.c deleted file mode 100644 index 7dc590eaa5946..0000000000000 --- a/mypyc/lib-rt/random/librt_random.c +++ /dev/null @@ -1,762 +0,0 @@ -#include "pythoncapi_compat.h" - -#define PY_SSIZE_T_CLEAN -#include -#include -#include - -#ifdef _WIN32 -#include -#else -#include -#endif - -#include "mypyc_util.h" -#include "CPy.h" -#include "librt_random.h" - -// -// ChaCha8 PRNG with forward secrecy -// - -#define CHACHA8_RESEED_INTERVAL 16 - -typedef struct { - uint32_t seed[8]; // 256-bit key - uint32_t buf[16]; // output buffer: one ChaCha8 block - uint32_t counter; // block counter - uint8_t used; // index into buf - uint8_t n; // usable values in buf (8 or 16) - uint8_t blocks_left; // blocks until next reseed -} chacha8_rng; - -static inline uint32_t -rotl32(uint32_t x, int n) { - return (x << n) | (x >> (32 - n)); -} - -#define QUARTERROUND(a, b, c, d) \ - do { \ - a += b; d ^= a; d = rotl32(d, 16); \ - c += d; b ^= c; b = rotl32(b, 12); \ - a += b; d ^= a; d = rotl32(d, 8); \ - c += d; b ^= c; b = rotl32(b, 7); \ - } while (0) - -static void -chacha8_block(const uint32_t seed[8], uint32_t counter, uint32_t out[16]) -{ - // "expand 32-byte k" - uint32_t s[16] = { - 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574, - seed[0], seed[1], seed[2], seed[3], - seed[4], seed[5], seed[6], seed[7], - counter, 0, 0, 0 // counter (low 32), counter (high 32), nonce - }; - - memcpy(out, s, sizeof(uint32_t) * 16); - - // 4 double-rounds = 8 rounds - for (int i = 0; i < 4; i++) { - // Column rounds - QUARTERROUND(out[0], out[4], out[ 8], out[12]); - QUARTERROUND(out[1], out[5], out[ 9], out[13]); - QUARTERROUND(out[2], out[6], out[10], out[14]); - QUARTERROUND(out[3], out[7], out[11], out[15]); - // Diagonal rounds - QUARTERROUND(out[0], out[5], out[10], out[15]); - QUARTERROUND(out[1], out[6], out[11], out[12]); - QUARTERROUND(out[2], out[7], out[ 8], out[13]); - QUARTERROUND(out[3], out[4], out[ 9], out[14]); - } - - // Add original state back (standard ChaCha finalization) - for (int i = 0; i < 16; i++) - out[i] += s[i]; -} - -// Fill entropy from OS via os.urandom(), which handles short reads, -// EINTR, and platform differences internally. -// Returns 0 on success, -1 on failure (with Python exception set). -static int -fill_os_entropy(void *buf, size_t len) -{ - PyObject *os_mod = PyImport_ImportModule("os"); - if (os_mod == NULL) - return -1; - PyObject *bytes = PyObject_CallMethod(os_mod, "urandom", "n", (Py_ssize_t)len); - Py_DECREF(os_mod); - if (bytes == NULL) - return -1; - memcpy(buf, PyBytes_AS_STRING(bytes), len); - Py_DECREF(bytes); - return 0; -} - -static void -chacha8_refill(chacha8_rng *rng) -{ - chacha8_block(rng->seed, rng->counter, rng->buf); - rng->counter++; - rng->used = 0; - rng->blocks_left--; - - if (unlikely(rng->blocks_left == 0)) { - // Forward secrecy reseed: steal last 8 words as new key - memcpy(rng->seed, rng->buf + 8, sizeof(uint32_t) * 8); - rng->n = 8; // only 8 words usable this block - rng->counter = 0; - rng->blocks_left = CHACHA8_RESEED_INTERVAL; - } else { - rng->n = 16; - } -} - -static inline uint32_t -chacha8_next(chacha8_rng *rng) -{ - if (unlikely(rng->used >= rng->n)) - chacha8_refill(rng); - return rng->buf[rng->used++]; -} - -// Return 64 bits of randomness (two consecutive 32-bit words, single bounds check). -static inline uint64_t -chacha8_next64(chacha8_rng *rng) -{ - // Need 2 words available; if fewer than 2, refill first. - if (unlikely(rng->used + 1 >= rng->n)) - // Use two separate calls to handle block boundary correctly. - return ((uint64_t)chacha8_next(rng) << 32) | chacha8_next(rng); - uint32_t hi = rng->buf[rng->used++]; - uint32_t lo = rng->buf[rng->used++]; - return ((uint64_t)hi << 32) | lo; -} - -// Return a uniformly distributed random value in [0, range). -// Use Lemire's nearly divisionless method for small ranges, and a portable -// rejection sampler for larger ranges to avoid non-standard 128-bit arithmetic. -static inline uint64_t -chacha8_next_ranged(chacha8_rng *rng, uint64_t range) -{ - assert(range != 0); - if (likely(range <= UINT32_MAX)) { - // 32-bit Lemire: multiply r * range to get 64-bit product, - // upper 32 bits are the result in [0, range). - uint64_t m = (uint64_t)chacha8_next(rng) * range; - uint32_t lo = (uint32_t)m; - if (unlikely(lo < range)) { - uint32_t thresh = (uint32_t)(-(uint32_t)range) % (uint32_t)range; - while (lo < thresh) { - m = (uint64_t)chacha8_next(rng) * range; - lo = (uint32_t)m; - } - } - return m >> 32; - } - // If range is a power of two, masking produces an unbiased result. - if ((range & (range - 1)) == 0) { - return chacha8_next64(rng) & (range - 1); - } - uint64_t r; - // In unsigned arithmetic, -range is 2**64 - range, so this computes - // 2**64 % range. Rejecting values below this threshold leaves exactly - // floor(2**64 / range) full buckets of size range, avoiding modulo bias. - uint64_t thresh = -range % range; - do { - r = chacha8_next64(rng); - } while (unlikely(r < thresh)); - return r % range; -} - -// Return a random i64 starting at 'start', with 'range' possible values. -// A zero range represents the full 2**64 i64 domain. -static inline int64_t -random_i64_from_range(chacha8_rng *rng, int64_t start, uint64_t range) -{ - uint64_t offset = range == 0 ? chacha8_next64(rng) : chacha8_next_ranged(rng, range); - return (int64_t)((uint64_t)start + offset); -} - -static void -chacha8_reset(chacha8_rng *rng) -{ - rng->counter = 0; - rng->used = 16; // force immediate refill on first call - rng->n = 16; - rng->blocks_left = CHACHA8_RESEED_INTERVAL; -} - -static int -chacha8_init(chacha8_rng *rng) -{ - if (fill_os_entropy(rng->seed, sizeof(rng->seed)) < 0) - return -1; - chacha8_reset(rng); - return 0; -} - -// Seed from an integer by hashing it through ChaCha8 to fill the 256-bit key. -static void -chacha8_seed_int(chacha8_rng *rng, int64_t seed_val) -{ - // Use the integer to construct a simple initial key, then run one - // ChaCha8 block to diffuse it across all 256 bits. - memset(rng->seed, 0, sizeof(rng->seed)); - rng->seed[0] = (uint32_t)(seed_val & 0xFFFFFFFF); - rng->seed[1] = (uint32_t)((uint64_t)seed_val >> 32); - - uint32_t out[16]; - chacha8_block(rng->seed, 0, out); - memcpy(rng->seed, out, sizeof(rng->seed)); - chacha8_reset(rng); -} - -// -// Thread-local global RNG for module-level random()/randint() -// -// thread_local pointer for fast access (direct %fs/%gs-relative load), -// platform TLS key with destructor for cleanup on thread exit. -// - -#ifdef _WIN32 -static __declspec(thread) chacha8_rng *tls_rng = NULL; -#else -static __thread chacha8_rng *tls_rng = NULL; -#endif - -#ifdef _WIN32 -static DWORD tls_key = FLS_OUT_OF_INDEXES; - -static void NTAPI -tls_rng_destructor(void *ptr) -{ - if (ptr != NULL) { - memset(ptr, 0, sizeof(chacha8_rng)); - PyMem_RawFree(ptr); - } -} -#else -static pthread_key_t tls_key; - -static void -tls_rng_destructor(void *ptr) -{ - if (ptr != NULL) { - memset(ptr, 0, sizeof(chacha8_rng)); - PyMem_RawFree(ptr); - } -} -#endif - -static int tls_key_created = 0; - -static int -ensure_tls_key(void) -{ - if (likely(tls_key_created)) - return 0; -#ifdef _WIN32 - tls_key = FlsAlloc(tls_rng_destructor); - if (tls_key == FLS_OUT_OF_INDEXES) { - PyErr_SetString(PyExc_OSError, "FlsAlloc failed"); - return -1; - } -#else - if (pthread_key_create(&tls_key, tls_rng_destructor) != 0) { - PyErr_SetString(PyExc_OSError, "pthread_key_create failed"); - return -1; - } -#endif - tls_key_created = 1; - return 0; -} - -// Get the thread-local RNG, initializing on first use. -// Returns NULL with Python exception set on failure. -static inline chacha8_rng * -get_thread_rng(void) -{ - chacha8_rng *rng = tls_rng; - if (likely(rng != NULL)) - return rng; - - // First use on this thread — allocate and seed - rng = PyMem_RawMalloc(sizeof(chacha8_rng)); - if (rng == NULL) { - PyErr_NoMemory(); - return NULL; - } - if (chacha8_init(rng) < 0) { - PyMem_RawFree(rng); - return NULL; - } - - // Register with platform TLS for destructor -#ifdef _WIN32 - FlsSetValue(tls_key, rng); -#else - pthread_setspecific(tls_key, rng); -#endif - - tls_rng = rng; - return rng; -} - -// Return a random double in [0.0, 1.0) with 53 bits of mantissa precision. -static inline double -random_double_impl(chacha8_rng *rng) -{ - uint64_t r = chacha8_next64(rng); - return (double)(r >> 11) * (1.0 / 9007199254740992.0); // 1/2^53 -} - -// -// Module-level random() and randint() -// - -static PyObject* -module_random(PyObject *module, PyObject *Py_UNUSED(ignored)) -{ - chacha8_rng *rng = get_thread_rng(); - if (rng == NULL) - return NULL; - return PyFloat_FromDouble(random_double_impl(rng)); -} - -// Generate random integer in [a, b] using the given RNG. -static inline PyObject* -randint_impl(chacha8_rng *rng, int64_t a, int64_t b) -{ - uint64_t range = (uint64_t)b - (uint64_t)a + 1; - return PyLong_FromLongLong(random_i64_from_range(rng, a, range)); -} - -static PyObject* -module_randint(PyObject *module, PyObject *const *args, Py_ssize_t nargs) -{ - if (nargs != 2) { - PyErr_Format(PyExc_TypeError, - "randint() takes exactly 2 arguments (%zd given)", nargs); - return NULL; - } - - int64_t a = CPyLong_AsInt64(args[0]); - if (unlikely(a == CPY_LL_INT_ERROR && PyErr_Occurred())) - return NULL; - - int64_t b = CPyLong_AsInt64(args[1]); - if (unlikely(b == CPY_LL_INT_ERROR && PyErr_Occurred())) - return NULL; - - if (a > b) { - PyErr_SetString(PyExc_ValueError, - "empty range for randint()"); - return NULL; - } - - chacha8_rng *rng = get_thread_rng(); - if (rng == NULL) - return NULL; - - return randint_impl(rng, a, b); -} - -// Parse 1 or 2 int args for randrange([start,] stop). -// Sets *a to start (default 0), *b to stop-1. -// Returns 0 on success, -1 on error (with exception set). -static int -parse_randrange_args(PyObject *const *args, Py_ssize_t nargs, - int64_t *a, int64_t *b) -{ - if (nargs == 1) { - *a = 0; - int64_t stop = CPyLong_AsInt64(args[0]); - if (unlikely(stop == CPY_LL_INT_ERROR && PyErr_Occurred())) - return -1; - if (stop <= 0) { - PyErr_SetString(PyExc_ValueError, "empty range for randrange()"); - return -1; - } - *b = stop - 1; - } else if (nargs == 2) { - *a = CPyLong_AsInt64(args[0]); - if (unlikely(*a == CPY_LL_INT_ERROR && PyErr_Occurred())) - return -1; - int64_t stop = CPyLong_AsInt64(args[1]); - if (unlikely(stop == CPY_LL_INT_ERROR && PyErr_Occurred())) - return -1; - if (*a >= stop) { - PyErr_SetString(PyExc_ValueError, "empty range for randrange()"); - return -1; - } - *b = stop - 1; - } else { - PyErr_Format(PyExc_TypeError, - "randrange() takes 1 or 2 arguments (%zd given)", nargs); - return -1; - } - return 0; -} - -static PyObject* -module_randrange(PyObject *module, PyObject *const *args, Py_ssize_t nargs) -{ - int64_t a, b; - if (parse_randrange_args(args, nargs, &a, &b) < 0) - return NULL; - - chacha8_rng *rng = get_thread_rng(); - if (rng == NULL) - return NULL; - - return randint_impl(rng, a, b); -} - -static PyObject* -module_seed(PyObject *module, PyObject *const *args, Py_ssize_t nargs) -{ - if (nargs != 1) { - PyErr_Format(PyExc_TypeError, - "seed() takes exactly 1 argument (%zd given)", nargs); - return NULL; - } - int64_t seed_val = CPyLong_AsInt64(args[0]); - if (unlikely(seed_val == CPY_LL_INT_ERROR && PyErr_Occurred())) - return NULL; - - chacha8_rng *rng = get_thread_rng(); - if (rng == NULL) - return NULL; - - chacha8_seed_int(rng, seed_val); - Py_RETURN_NONE; -} - -// -// Random Python type -// - -typedef struct { - PyObject_HEAD - chacha8_rng rng; -} RandomObject; - -static PyTypeObject RandomType; - -static PyObject* -Random_new(PyTypeObject *type, PyObject *args, PyObject *kwds) -{ - if (type != &RandomType) { - PyErr_SetString(PyExc_TypeError, "Random cannot be subclassed"); - return NULL; - } - - RandomObject *self = (RandomObject *)type->tp_alloc(type, 0); - // Seeding is done in tp_init - return (PyObject *)self; -} - -static int -Random_init(RandomObject *self, PyObject *args, PyObject *kwds) -{ - PyObject *seed_obj = NULL; - - if (!PyArg_ParseTuple(args, "|O", &seed_obj)) { - return -1; - } - - if (kwds != NULL && PyDict_Size(kwds) > 0) { - PyErr_SetString(PyExc_TypeError, - "Random() takes no keyword arguments"); - return -1; - } - - if (seed_obj == NULL || seed_obj == Py_None) { - if (chacha8_init(&self->rng) < 0) - return -1; - } else { - int64_t seed_val = CPyLong_AsInt64(seed_obj); - if (unlikely(seed_val == CPY_LL_INT_ERROR && PyErr_Occurred())) - return -1; - chacha8_seed_int(&self->rng, seed_val); - } - - return 0; -} - -// Internal constructors for capsule API (bypass tp_new/tp_init) - -static PyObject * -Random_internal(void) { - RandomObject *self = (RandomObject *)RandomType.tp_alloc(&RandomType, 0); - if (self == NULL) - return NULL; - if (chacha8_init(&self->rng) < 0) { - Py_DECREF(self); - return NULL; - } - return (PyObject *)self; -} - -static PyObject * -Random_from_seed_internal(int64_t seed_val) { - RandomObject *self = (RandomObject *)RandomType.tp_alloc(&RandomType, 0); - if (self == NULL) - return NULL; - chacha8_seed_int(&self->rng, seed_val); - return (PyObject *)self; -} - -static PyTypeObject * -Random_type_internal(void) { - return &RandomType; -} - -static int64_t -Random_randrange1_internal(PyObject *self, int64_t stop) { - if (unlikely(stop <= 0)) { - PyErr_SetString(PyExc_ValueError, "empty range for randrange()"); - return CPY_LL_INT_ERROR; - } - return (int64_t)chacha8_next_ranged(&((RandomObject *)self)->rng, (uint64_t)stop); -} - -static int64_t -Random_randrange2_internal(PyObject *self, int64_t start, int64_t stop) { - if (unlikely(start >= stop)) { - PyErr_SetString(PyExc_ValueError, "empty range for randrange()"); - return CPY_LL_INT_ERROR; - } - uint64_t range = (uint64_t)stop - (uint64_t)start; - return random_i64_from_range(&((RandomObject *)self)->rng, start, range); -} - -static int64_t -Random_randint_internal(PyObject *self, int64_t a, int64_t b) { - if (unlikely(a > b)) { - PyErr_SetString(PyExc_ValueError, "empty range for randint()"); - return CPY_LL_INT_ERROR; - } - uint64_t range = (uint64_t)b - (uint64_t)a + 1; - return random_i64_from_range(&((RandomObject *)self)->rng, a, range); -} - -static double -Random_random_internal(PyObject *self) { - return random_double_impl(&((RandomObject *)self)->rng); -} - -static PyObject* -Random_randint(RandomObject *self, PyObject *const *args, Py_ssize_t nargs) { - if (nargs != 2) { - PyErr_Format(PyExc_TypeError, - "randint() takes exactly 2 arguments (%zd given)", nargs); - return NULL; - } - - int64_t a = CPyLong_AsInt64(args[0]); - if (unlikely(a == CPY_LL_INT_ERROR && PyErr_Occurred())) - return NULL; - - int64_t b = CPyLong_AsInt64(args[1]); - if (unlikely(b == CPY_LL_INT_ERROR && PyErr_Occurred())) - return NULL; - - if (a > b) { - PyErr_SetString(PyExc_ValueError, - "empty range for randint()"); - return NULL; - } - - return randint_impl(&self->rng, a, b); -} - -static PyObject* -Random_randrange(RandomObject *self, PyObject *const *args, Py_ssize_t nargs) { - int64_t a, b; - if (parse_randrange_args(args, nargs, &a, &b) < 0) - return NULL; - return randint_impl(&self->rng, a, b); -} - -static PyObject* -Random_random(RandomObject *self, PyObject *Py_UNUSED(ignored)) { - return PyFloat_FromDouble(random_double_impl(&self->rng)); -} - -static PyObject* -Random_seed(RandomObject *self, PyObject *const *args, Py_ssize_t nargs) { - if (nargs != 1) { - PyErr_Format(PyExc_TypeError, - "seed() takes exactly 1 argument (%zd given)", nargs); - return NULL; - } - int64_t seed_val = CPyLong_AsInt64(args[0]); - if (unlikely(seed_val == CPY_LL_INT_ERROR && PyErr_Occurred())) - return NULL; - chacha8_seed_int(&self->rng, seed_val); - Py_RETURN_NONE; -} - -static PyMethodDef Random_methods[] = { - {"randint", (PyCFunction) Random_randint, METH_FASTCALL, - PyDoc_STR("Return random integer in range [a, b], including both end points.") - }, - {"randrange", (PyCFunction) Random_randrange, METH_FASTCALL, - PyDoc_STR("Return random integer in range [start, stop).") - }, - {"random", (PyCFunction) Random_random, METH_NOARGS, - PyDoc_STR("Return random float in [0.0, 1.0).") - }, - {"seed", (PyCFunction) Random_seed, METH_FASTCALL, - PyDoc_STR("Seed the random number generator with an integer.") - }, - {NULL} /* Sentinel */ -}; - -static PyTypeObject RandomType = { - .ob_base = PyVarObject_HEAD_INIT(NULL, 0) - .tp_name = "Random", - .tp_doc = PyDoc_STR("Fast random number generator using ChaCha8"), - .tp_basicsize = sizeof(RandomObject), - .tp_itemsize = 0, - .tp_flags = Py_TPFLAGS_DEFAULT, - .tp_new = Random_new, - .tp_init = (initproc) Random_init, - .tp_methods = Random_methods, -}; - -// Module definition - -static PyMethodDef librt_random_module_methods[] = { - {"random", (PyCFunction) module_random, METH_NOARGS, - PyDoc_STR("Return random float in [0.0, 1.0) using thread-local RNG.") - }, - {"randint", (PyCFunction) module_randint, METH_FASTCALL, - PyDoc_STR("Return random integer in range [a, b] using thread-local RNG.") - }, - {"randrange", (PyCFunction) module_randrange, METH_FASTCALL, - PyDoc_STR("Return random integer in range [start, stop) using thread-local RNG.") - }, - {"seed", (PyCFunction) module_seed, METH_FASTCALL, - PyDoc_STR("Seed the thread-local RNG with an integer.") - }, - {NULL, NULL, 0, NULL} -}; - -// Module-level internal functions for mypyc primitives (use thread-local RNG) - -static double -module_random_internal(void) { - chacha8_rng *rng = get_thread_rng(); - if (rng == NULL) - return CPY_FLOAT_ERROR; - return random_double_impl(rng); -} - -static int64_t -module_randint_internal(int64_t a, int64_t b) { - if (unlikely(a > b)) { - PyErr_SetString(PyExc_ValueError, "empty range for randint()"); - return CPY_LL_INT_ERROR; - } - chacha8_rng *rng = get_thread_rng(); - if (rng == NULL) - return CPY_LL_INT_ERROR; - uint64_t range = (uint64_t)b - (uint64_t)a + 1; - return random_i64_from_range(rng, a, range); -} - -static int64_t -module_randrange1_internal(int64_t stop) { - if (unlikely(stop <= 0)) { - PyErr_SetString(PyExc_ValueError, "empty range for randrange()"); - return CPY_LL_INT_ERROR; - } - chacha8_rng *rng = get_thread_rng(); - if (rng == NULL) - return CPY_LL_INT_ERROR; - return (int64_t)chacha8_next_ranged(rng, (uint64_t)stop); -} - -static int64_t -module_randrange2_internal(int64_t start, int64_t stop) { - if (unlikely(start >= stop)) { - PyErr_SetString(PyExc_ValueError, "empty range for randrange()"); - return CPY_LL_INT_ERROR; - } - chacha8_rng *rng = get_thread_rng(); - if (rng == NULL) - return CPY_LL_INT_ERROR; - uint64_t range = (uint64_t)stop - (uint64_t)start; - return random_i64_from_range(rng, start, range); -} - -static int -random_abi_version(void) { - return LIBRT_RANDOM_ABI_VERSION; -} - -static int -random_api_version(void) { - return LIBRT_RANDOM_API_VERSION; -} - -static int -librt_random_module_exec(PyObject *m) -{ - if (ensure_tls_key() < 0) { - return -1; - } - if (PyType_Ready(&RandomType) < 0) { - return -1; - } - if (PyModule_AddObjectRef(m, "Random", (PyObject *) &RandomType) < 0) { - return -1; - } - // Export mypyc internal C API via capsule - static void *librt_random_api[LIBRT_RANDOM_API_LEN] = { - (void *)random_abi_version, - (void *)random_api_version, - (void *)Random_internal, - (void *)Random_from_seed_internal, - (void *)Random_type_internal, - (void *)Random_random_internal, - (void *)Random_randint_internal, - (void *)Random_randrange1_internal, - (void *)Random_randrange2_internal, - (void *)module_random_internal, - (void *)module_randint_internal, - (void *)module_randrange1_internal, - (void *)module_randrange2_internal, - }; - PyObject *c_api_object = PyCapsule_New((void *)librt_random_api, "librt.random._C_API", NULL); - if (PyModule_Add(m, "_C_API", c_api_object) < 0) { - return -1; - } - return 0; -} - -static PyModuleDef_Slot librt_random_module_slots[] = { - {Py_mod_exec, librt_random_module_exec}, -#ifdef Py_MOD_GIL_NOT_USED - {Py_mod_gil, Py_MOD_GIL_NOT_USED}, -#endif - {0, NULL} -}; - -static PyModuleDef librt_random_module = { - .m_base = PyModuleDef_HEAD_INIT, - .m_name = "random", - .m_doc = "Fast random number generation using ChaCha8", - .m_size = 0, - .m_methods = librt_random_module_methods, - .m_slots = librt_random_module_slots, -}; - -PyMODINIT_FUNC -PyInit_random(void) -{ - return PyModuleDef_Init(&librt_random_module); -} diff --git a/mypyc/lib-rt/random/librt_random.h b/mypyc/lib-rt/random/librt_random.h deleted file mode 100644 index 2eabfbd021bc9..0000000000000 --- a/mypyc/lib-rt/random/librt_random.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef LIBRT_RANDOM_H -#define LIBRT_RANDOM_H - -#include - -#define LIBRT_RANDOM_ABI_VERSION 1 -#define LIBRT_RANDOM_API_VERSION 9 -#define LIBRT_RANDOM_API_LEN 13 - -#endif // LIBRT_RANDOM_H diff --git a/mypyc/lib-rt/random/librt_random_api.c b/mypyc/lib-rt/random/librt_random_api.c deleted file mode 100644 index 157fa82b82eb3..0000000000000 --- a/mypyc/lib-rt/random/librt_random_api.c +++ /dev/null @@ -1,45 +0,0 @@ -#include - -#include "librt_random_api.h" - -void *LibRTRandom_API[LIBRT_RANDOM_API_LEN] = {0}; - -int -import_librt_random(void) -{ - PyObject *mod = PyImport_ImportModule("librt.random"); - if (mod == NULL) - return -1; - Py_DECREF(mod); // we import just for the side effect of making the below work. - void **capsule = (void **)PyCapsule_Import("librt.random._C_API", 0); - if (capsule == NULL) - return -1; - - // Only after version validation succeeds can we safely copy the full table. - int (*abi_version)(void) = (int (*)(void))capsule[0]; - int (*api_version)(void) = (int (*)(void))capsule[1]; - if (abi_version() != LIBRT_RANDOM_ABI_VERSION) { - char err[128]; - snprintf(err, sizeof(err), "ABI version conflict for librt.random, expected %d, found %d", - LIBRT_RANDOM_ABI_VERSION, - abi_version() - ); - PyErr_SetString(PyExc_ValueError, err); - return -1; - } - if (api_version() < LIBRT_RANDOM_API_VERSION) { - char err[128]; - snprintf(err, sizeof(err), - "API version conflict for librt.random, expected %d or newer, found %d (hint: upgrade librt)", - LIBRT_RANDOM_API_VERSION, - api_version() - ); - PyErr_SetString(PyExc_ValueError, err); - return -1; - } - // Provider API version is >= our expected version, which (by the API - // compatibility contract) means it has at least LIBRT_RANDOM_API_LEN - // entries, so this copy is safe. - memcpy(LibRTRandom_API, capsule, sizeof(LibRTRandom_API)); - return 0; -} diff --git a/mypyc/lib-rt/random/librt_random_api.h b/mypyc/lib-rt/random/librt_random_api.h deleted file mode 100644 index 2794de0dd7e58..0000000000000 --- a/mypyc/lib-rt/random/librt_random_api.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef LIBRT_RANDOM_API_H -#define LIBRT_RANDOM_API_H - -#include -#include -#include -#include "librt_random.h" - -int -import_librt_random(void); - -extern void *LibRTRandom_API[LIBRT_RANDOM_API_LEN]; - -#define LibRTRandom_ABIVersion (*(int (*)(void)) LibRTRandom_API[0]) -#define LibRTRandom_APIVersion (*(int (*)(void)) LibRTRandom_API[1]) -#define LibRTRandom_Random_internal (*(PyObject* (*)(void)) LibRTRandom_API[2]) -#define LibRTRandom_Random_from_seed_internal (*(PyObject* (*)(int64_t)) LibRTRandom_API[3]) -#define LibRTRandom_Random_type_internal (*(PyTypeObject* (*)(void)) LibRTRandom_API[4]) -#define LibRTRandom_Random_random_internal (*(double (*)(PyObject*)) LibRTRandom_API[5]) -#define LibRTRandom_Random_randint_internal (*(int64_t (*)(PyObject*, int64_t, int64_t)) LibRTRandom_API[6]) -#define LibRTRandom_Random_randrange1_internal (*(int64_t (*)(PyObject*, int64_t)) LibRTRandom_API[7]) -#define LibRTRandom_Random_randrange2_internal (*(int64_t (*)(PyObject*, int64_t, int64_t)) LibRTRandom_API[8]) -#define LibRTRandom_module_random_internal (*(double (*)(void)) LibRTRandom_API[9]) -#define LibRTRandom_module_randint_internal (*(int64_t (*)(int64_t, int64_t)) LibRTRandom_API[10]) -#define LibRTRandom_module_randrange1_internal (*(int64_t (*)(int64_t)) LibRTRandom_API[11]) -#define LibRTRandom_module_randrange2_internal (*(int64_t (*)(int64_t, int64_t)) LibRTRandom_API[12]) - -static inline bool CPyRandom_Check(PyObject *obj) { - return Py_TYPE(obj) == LibRTRandom_Random_type_internal(); -} - -#endif // LIBRT_RANDOM_API_H diff --git a/mypyc/lib-rt/setup.py b/mypyc/lib-rt/setup.py index 371b322ca18b2..49b6c10201317 100644 --- a/mypyc/lib-rt/setup.py +++ b/mypyc/lib-rt/setup.py @@ -151,18 +151,5 @@ def run(self) -> None: Extension( "librt.time", ["time/librt_time.c"], include_dirs=["."], extra_compile_args=cflags ), - Extension( - "librt.random", - [ - "random/librt_random.c", - "init.c", - "int_ops.c", - "exc_ops.c", - "pythonsupport.c", - "getargsfast.c", - ], - include_dirs=["."], - extra_compile_args=cflags, - ), ] ) diff --git a/mypyc/lib-rt/strings/librt_strings.h b/mypyc/lib-rt/strings/librt_strings.h index e6236f7950929..0bf1587e6f44f 100644 --- a/mypyc/lib-rt/strings/librt_strings.h +++ b/mypyc/lib-rt/strings/librt_strings.h @@ -1,8 +1,8 @@ #ifndef LIBRT_STRINGS_H #define LIBRT_STRINGS_H -#include #include +#include #include "librt_strings_common.h" // ABI version -- only an exact match is compatible. This will only be changed in diff --git a/mypyc/lib-rt/strings/librt_strings_api.h b/mypyc/lib-rt/strings/librt_strings_api.h index 536b90ad7f21c..f0bb761bcaa2e 100644 --- a/mypyc/lib-rt/strings/librt_strings_api.h +++ b/mypyc/lib-rt/strings/librt_strings_api.h @@ -4,8 +4,8 @@ int import_librt_strings(void); -#include #include +#include #include "librt_strings.h" extern void *LibRTStrings_API[LIBRT_STRINGS_API_LEN]; diff --git a/mypyc/lib-rt/stringwriter_extra_ops.h b/mypyc/lib-rt/stringwriter_extra_ops.h index bac6dd6b3e95c..0da9a4d9d7f71 100644 --- a/mypyc/lib-rt/stringwriter_extra_ops.h +++ b/mypyc/lib-rt/stringwriter_extra_ops.h @@ -1,9 +1,9 @@ #ifndef STRINGWRITER_EXTRA_OPS_H #define STRINGWRITER_EXTRA_OPS_H -#include #include #include +#include #include "mypyc_util.h" #include "strings/librt_strings_api.h" diff --git a/mypyc/lib-rt/test_capi.cc b/mypyc/lib-rt/test_capi.cc index bf15a47e63dd1..4b183de5743e9 100644 --- a/mypyc/lib-rt/test_capi.cc +++ b/mypyc/lib-rt/test_capi.cc @@ -1,8 +1,8 @@ // Test cases +#include #include #include "CPy.h" -#include static PyObject *moduleDict; diff --git a/mypyc/lib-rt/vecs/librt_vecs.c b/mypyc/lib-rt/vecs/librt_vecs.c index 67c2148005b25..8726b63de7a10 100644 --- a/mypyc/lib-rt/vecs/librt_vecs.c +++ b/mypyc/lib-rt/vecs/librt_vecs.c @@ -74,6 +74,8 @@ #include #include "librt_vecs.h" +#ifdef MYPYC_EXPERIMENTAL + PyTypeObject *LibRTVecs_I64TypeObj; PyTypeObject *LibRTVecs_I32TypeObj; PyTypeObject *LibRTVecs_I16TypeObj; @@ -564,7 +566,7 @@ static PyObject *vec_append(PyObject *self, PyObject *args) return NULL; } VecI64 v = ((VecI64Object *)vec)->vec; - VEC_I64_INCREF(v); + VEC_INCREF(v); v = VecI64_Append(v, x); if (VEC_IS_ERROR(v)) return NULL; @@ -575,7 +577,7 @@ static PyObject *vec_append(PyObject *self, PyObject *args) return NULL; } VecU8 v = ((VecU8Object *)vec)->vec; - VEC_U8_INCREF(v); + VEC_INCREF(v); v = VecU8_Append(v, x); if (VEC_IS_ERROR(v)) return NULL; @@ -586,7 +588,7 @@ static PyObject *vec_append(PyObject *self, PyObject *args) return NULL; } VecFloat v = ((VecFloatObject *)vec)->vec; - VEC_FLOAT_INCREF(v); + VEC_INCREF(v); v = VecFloat_Append(v, x); if (VEC_IS_ERROR(v)) return NULL; @@ -597,7 +599,7 @@ static PyObject *vec_append(PyObject *self, PyObject *args) return NULL; } VecI32 v = ((VecI32Object *)vec)->vec; - VEC_I32_INCREF(v); + VEC_INCREF(v); v = VecI32_Append(v, x); if (VEC_IS_ERROR(v)) return NULL; @@ -608,7 +610,7 @@ static PyObject *vec_append(PyObject *self, PyObject *args) return NULL; } VecI16 v = ((VecI16Object *)vec)->vec; - VEC_I16_INCREF(v); + VEC_INCREF(v); v = VecI16_Append(v, x); if (VEC_IS_ERROR(v)) return NULL; @@ -619,27 +621,27 @@ static PyObject *vec_append(PyObject *self, PyObject *args) return NULL; } VecBool v = ((VecBoolObject *)vec)->vec; - VEC_BOOL_INCREF(v); + VEC_INCREF(v); v = VecBool_Append(v, x); if (VEC_IS_ERROR(v)) return NULL; return VecBool_Box(v); } else if (VecT_Check(vec)) { VecT v = ((VecTObject *)vec)->vec; - if (!VecT_ItemCheck(v, item, VEC_T_BUF(v)->item_type)) { + if (!VecT_ItemCheck(v, item, v.buf->item_type)) { return NULL; } - VEC_T_INCREF(v); - v = VecT_Append(v, item, VEC_T_BUF(v)->item_type); + VEC_INCREF(v); + v = VecT_Append(v, item, v.buf->item_type); if (VEC_IS_ERROR(v)) return NULL; - return VecT_Box(v, VEC_T_BUF(v)->item_type); + return VecT_Box(v, v.buf->item_type); } else if (VecNested_Check(vec)) { VecNested v = ((VecNestedObject *)vec)->vec; VecNestedBufItem vecitem; if (VecNested_UnboxItem(v, item, &vecitem) < 0) return NULL; - VEC_NESTED_INCREF(v); + VEC_INCREF(v); v = VecNested_Append(v, vecitem); if (VEC_IS_ERROR(v)) return NULL; @@ -665,7 +667,7 @@ static PyObject *vec_remove(PyObject *self, PyObject *args) return NULL; } VecI64 v = ((VecI64Object *)vec)->vec; - VEC_I64_INCREF(v); + VEC_INCREF(v); v = VecI64_Remove(v, x); if (VEC_IS_ERROR(v)) return NULL; @@ -676,7 +678,7 @@ static PyObject *vec_remove(PyObject *self, PyObject *args) return NULL; } VecU8 v = ((VecU8Object *)vec)->vec; - VEC_U8_INCREF(v); + VEC_INCREF(v); v = VecU8_Remove(v, x); if (VEC_IS_ERROR(v)) return NULL; @@ -687,7 +689,7 @@ static PyObject *vec_remove(PyObject *self, PyObject *args) return NULL; } VecFloat v = ((VecFloatObject *)vec)->vec; - VEC_FLOAT_INCREF(v); + VEC_INCREF(v); v = VecFloat_Remove(v, x); if (VEC_IS_ERROR(v)) return NULL; @@ -698,7 +700,7 @@ static PyObject *vec_remove(PyObject *self, PyObject *args) return NULL; } VecI32 v = ((VecI32Object *)vec)->vec; - VEC_I32_INCREF(v); + VEC_INCREF(v); v = VecI32_Remove(v, x); if (VEC_IS_ERROR(v)) return NULL; @@ -709,7 +711,7 @@ static PyObject *vec_remove(PyObject *self, PyObject *args) return NULL; } VecI16 v = ((VecI16Object *)vec)->vec; - VEC_I16_INCREF(v); + VEC_INCREF(v); v = VecI16_Remove(v, x); if (VEC_IS_ERROR(v)) return NULL; @@ -720,27 +722,27 @@ static PyObject *vec_remove(PyObject *self, PyObject *args) return NULL; } VecBool v = ((VecBoolObject *)vec)->vec; - VEC_BOOL_INCREF(v); + VEC_INCREF(v); v = VecBool_Remove(v, x); if (VEC_IS_ERROR(v)) return NULL; return VecBool_Box(v); } else if (VecT_Check(vec)) { VecT v = ((VecTObject *)vec)->vec; - if (!VecT_ItemCheck(v, item, VEC_T_BUF(v)->item_type)) { + if (!VecT_ItemCheck(v, item, v.buf->item_type)) { return NULL; } - VEC_T_INCREF(v); + VEC_INCREF(v); v = VecT_Remove(v, item); if (VEC_IS_ERROR(v)) return NULL; - return VecT_Box(v, VEC_T_BUF(v)->item_type); + return VecT_Box(v, v.buf->item_type); } else if (VecNested_Check(vec)) { VecNested v = ((VecNestedObject *)vec)->vec; VecNestedBufItem vecitem; if (VecNested_UnboxItem(v, item, &vecitem) < 0) return NULL; - VEC_NESTED_INCREF(v); + VEC_INCREF(v); v = VecNested_Remove(v, vecitem); if (VEC_IS_ERROR(v)) return NULL; @@ -765,7 +767,7 @@ static PyObject *vec_pop(PyObject *self, PyObject *args) if (VecI64_Check(vec)) { VecI64 v = ((VecI64Object *)vec)->vec; - VEC_I64_INCREF(v); + VEC_INCREF(v); VecI64PopResult r; r = VecI64_Pop(v, index); if (VEC_IS_ERROR(r.f0)) @@ -775,7 +777,7 @@ static PyObject *vec_pop(PyObject *self, PyObject *args) result_item1 = VecI64_BoxItem(r.f1); } else if (VecU8_Check(vec)) { VecU8 v = ((VecU8Object *)vec)->vec; - VEC_U8_INCREF(v); + VEC_INCREF(v); VecU8PopResult r; r = VecU8_Pop(v, index); if (VEC_IS_ERROR(r.f0)) @@ -785,7 +787,7 @@ static PyObject *vec_pop(PyObject *self, PyObject *args) result_item1 = VecU8_BoxItem(r.f1); } else if (VecFloat_Check(vec)) { VecFloat v = ((VecFloatObject *)vec)->vec; - VEC_FLOAT_INCREF(v); + VEC_INCREF(v); VecFloatPopResult r; r = VecFloat_Pop(v, index); if (VEC_IS_ERROR(r.f0)) @@ -795,7 +797,7 @@ static PyObject *vec_pop(PyObject *self, PyObject *args) result_item1 = VecFloat_BoxItem(r.f1); } else if (VecI32_Check(vec)) { VecI32 v = ((VecI32Object *)vec)->vec; - VEC_I32_INCREF(v); + VEC_INCREF(v); VecI32PopResult r; r = VecI32_Pop(v, index); if (VEC_IS_ERROR(r.f0)) @@ -805,7 +807,7 @@ static PyObject *vec_pop(PyObject *self, PyObject *args) result_item1 = VecI32_BoxItem(r.f1); } else if (VecI16_Check(vec)) { VecI16 v = ((VecI16Object *)vec)->vec; - VEC_I16_INCREF(v); + VEC_INCREF(v); VecI16PopResult r; r = VecI16_Pop(v, index); if (VEC_IS_ERROR(r.f0)) @@ -815,7 +817,7 @@ static PyObject *vec_pop(PyObject *self, PyObject *args) result_item1 = VecI16_BoxItem(r.f1); } else if (VecBool_Check(vec)) { VecBool v = ((VecBoolObject *)vec)->vec; - VEC_BOOL_INCREF(v); + VEC_INCREF(v); VecBoolPopResult r; r = VecBool_Pop(v, index); if (VEC_IS_ERROR(r.f0)) @@ -825,12 +827,12 @@ static PyObject *vec_pop(PyObject *self, PyObject *args) result_item1 = VecBool_BoxItem(r.f1); } else if (VecT_Check(vec)) { VecT v = ((VecTObject *)vec)->vec; - VEC_T_INCREF(v); + VEC_INCREF(v); VecTPopResult r; r = VecT_Pop(v, index); if (VEC_IS_ERROR(r.f0)) return NULL; - result_item0 = VecT_Box(r.f0, VEC_T_BUF(v)->item_type); + result_item0 = VecT_Box(r.f0, v.buf->item_type); if (result_item0 == NULL) { Py_DECREF(r.f1); return NULL; @@ -838,24 +840,23 @@ static PyObject *vec_pop(PyObject *self, PyObject *args) result_item1 = r.f1; } else if (VecNested_Check(vec)) { VecNested v = ((VecNestedObject *)vec)->vec; - VEC_NESTED_INCREF(v); + VEC_INCREF(v); VecNestedPopResult r; r = VecNested_Pop(v, index); if (VEC_IS_ERROR(r.f0)) return NULL; - PyObject *popped_buf = VecNested_ItemBuf(VEC_NESTED_BUF(r.f0), r.f1); result_item0 = VecNested_Box(r.f0); if (result_item0 == NULL) { - Py_XDECREF(popped_buf); + Py_DECREF(r.f0.buf); + Py_DECREF(r.f1.buf); return NULL; } result_item1 = VecNested_BoxItem(r.f0, r.f1); if (result_item1 == NULL) { Py_DECREF(result_item0); - Py_XDECREF(popped_buf); + Py_DECREF(r.f1.buf); return NULL; } - Py_XDECREF(popped_buf); } else { PyErr_Format(PyExc_TypeError, "vec argument expected, got %.100s", Py_TYPE(vec)->tp_name); @@ -889,57 +890,57 @@ static PyObject *vec_extend(PyObject *self, PyObject *args) if (VecI64_Check(vec)) { VecI64 v = ((VecI64Object *)vec)->vec; - VEC_I64_INCREF(v); + VEC_INCREF(v); v = VecI64_Extend(v, iterable); if (VEC_IS_ERROR(v)) return NULL; return VecI64_Box(v); } else if (VecU8_Check(vec)) { VecU8 v = ((VecU8Object *)vec)->vec; - VEC_U8_INCREF(v); + VEC_INCREF(v); v = VecU8_Extend(v, iterable); if (VEC_IS_ERROR(v)) return NULL; return VecU8_Box(v); } else if (VecFloat_Check(vec)) { VecFloat v = ((VecFloatObject *)vec)->vec; - VEC_FLOAT_INCREF(v); + VEC_INCREF(v); v = VecFloat_Extend(v, iterable); if (VEC_IS_ERROR(v)) return NULL; return VecFloat_Box(v); } else if (VecI32_Check(vec)) { VecI32 v = ((VecI32Object *)vec)->vec; - VEC_I32_INCREF(v); + VEC_INCREF(v); v = VecI32_Extend(v, iterable); if (VEC_IS_ERROR(v)) return NULL; return VecI32_Box(v); } else if (VecI16_Check(vec)) { VecI16 v = ((VecI16Object *)vec)->vec; - VEC_I16_INCREF(v); + VEC_INCREF(v); v = VecI16_Extend(v, iterable); if (VEC_IS_ERROR(v)) return NULL; return VecI16_Box(v); } else if (VecBool_Check(vec)) { VecBool v = ((VecBoolObject *)vec)->vec; - VEC_BOOL_INCREF(v); + VEC_INCREF(v); v = VecBool_Extend(v, iterable); if (VEC_IS_ERROR(v)) return NULL; return VecBool_Box(v); } else if (VecT_Check(vec)) { VecT v = ((VecTObject *)vec)->vec; - size_t item_type = VEC_T_BUF(v)->item_type; - VEC_T_INCREF(v); + size_t item_type = v.buf->item_type; + VEC_INCREF(v); v = VecT_Extend(v, iterable, item_type); if (VEC_IS_ERROR(v)) return NULL; return VecT_Box(v, item_type); } else if (VecNested_Check(vec)) { VecNested v = ((VecNestedObject *)vec)->vec; - VEC_NESTED_INCREF(v); + VEC_INCREF(v); v = VecNested_Extend(v, iterable); if (VEC_IS_ERROR(v)) return NULL; @@ -956,19 +957,7 @@ static PyTypeObject *get_vec_type(void) { return &VecType; } -static int -vecs_abi_version(void) { - return LIBRT_VECS_ABI_VERSION; -} - -static int -vecs_api_version(void) { - return LIBRT_VECS_API_VERSION; -} - static VecCapsule Capsule = { - vecs_abi_version, - vecs_api_version, &Vec_TAPI, &Vec_NestedAPI, &Vec_I64API, @@ -980,17 +969,22 @@ static VecCapsule Capsule = { get_vec_type, }; +#endif // MYPYC_EXPERIMENTAL + static PyMethodDef VecsMethods[] = { +#ifdef MYPYC_EXPERIMENTAL {"append", vec_append, METH_VARARGS, "Append a value to the end of a vec"}, {"remove", vec_remove, METH_VARARGS, "Remove first occurrence of value from a vec"}, {"pop", vec_pop, METH_VARARGS, "Remove and return vec item at index (default last)"}, {"extend", vec_extend, METH_VARARGS, "Extend a vec with items from an iterable"}, +#endif {NULL, NULL, 0, NULL} /* Sentinel */ }; static int librt_vecs_module_exec(PyObject *m) { +#ifdef MYPYC_EXPERIMENTAL PyObject *ext = PyImport_ImportModule("mypy_extensions"); if (ext == NULL) { return -1; @@ -1102,6 +1096,7 @@ librt_vecs_module_exec(PyObject *m) } Py_DECREF(ext); +#endif return 0; } diff --git a/mypyc/lib-rt/vecs/librt_vecs.h b/mypyc/lib-rt/vecs/librt_vecs.h index 73a16225aee26..d0bb4760ed579 100644 --- a/mypyc/lib-rt/vecs/librt_vecs.h +++ b/mypyc/lib-rt/vecs/librt_vecs.h @@ -6,19 +6,10 @@ #define PY_SSIZE_T_CLEAN #include -#include #include #include "mypyc_util.h" -// ABI version -- only an exact match is compatible. This will only be changed in -// very exceptional cases (likely never) due to strict backward compatibility -// requirements. -#define LIBRT_VECS_ABI_VERSION 1 - -// API version -- more recent versions must maintain backward compatibility, i.e. -// we can add new features but not remove or change existing features (unless -// ABI version is changed, but see the comment above). -#define LIBRT_VECS_API_VERSION 1 +#ifdef MYPYC_EXPERIMENTAL // Magic (native) integer return value on exception. Caller must also // use PyErr_Occurred() since this overlaps with valid integer values. @@ -90,7 +81,7 @@ typedef struct _VecTBufObject { typedef struct _VecNestedBufItem { Py_ssize_t len; - void *items; + PyObject *buf; } VecNestedBufItem; // Nested vec type: vec[vec[...]], vec[vec[...] | None], etc. @@ -106,98 +97,48 @@ typedef struct _VecNestedBufObject { // Unboxed vec objects -// -// The items pointer points to the first element of the items array in the -// corresponding buffer object. Use VEC_*_BUF() to recover the buffer object -// from the items pointer (only needed on cold paths like grow/refcount). -// Items pointer is NULL for empty/uninitialized vecs. typedef struct _VecI64 { Py_ssize_t len; - int64_t *items; + VecI64BufObject *buf; } VecI64; typedef struct _VecI32 { Py_ssize_t len; - int32_t *items; + VecI32BufObject *buf; } VecI32; typedef struct _VecI16 { Py_ssize_t len; - int16_t *items; + VecI16BufObject *buf; } VecI16; typedef struct _VecU8 { Py_ssize_t len; - uint8_t *items; + VecU8BufObject *buf; } VecU8; typedef struct _VecFloat { Py_ssize_t len; - double *items; + VecFloatBufObject *buf; } VecFloat; typedef struct _VecBool { Py_ssize_t len; - char *items; + VecBoolBufObject *buf; } VecBool; typedef struct _VecT { Py_ssize_t len; - PyObject **items; + VecTBufObject *buf; } VecT; typedef struct _VecNested { Py_ssize_t len; - VecNestedBufItem *items; + VecNestedBufObject *buf; } VecNested; -// Recover buffer object from items pointer. Only valid when items != NULL. -#define VEC_I64_BUF_FROM_ITEMS(items_) \ - ((VecI64BufObject *)((char *)(items_) - offsetof(VecI64BufObject, items))) -#define VEC_I32_BUF_FROM_ITEMS(items_) \ - ((VecI32BufObject *)((char *)(items_) - offsetof(VecI32BufObject, items))) -#define VEC_I16_BUF_FROM_ITEMS(items_) \ - ((VecI16BufObject *)((char *)(items_) - offsetof(VecI16BufObject, items))) -#define VEC_U8_BUF_FROM_ITEMS(items_) \ - ((VecU8BufObject *)((char *)(items_) - offsetof(VecU8BufObject, items))) -#define VEC_FLOAT_BUF_FROM_ITEMS(items_) \ - ((VecFloatBufObject *)((char *)(items_) - offsetof(VecFloatBufObject, items))) -#define VEC_BOOL_BUF_FROM_ITEMS(items_) \ - ((VecBoolBufObject *)((char *)(items_) - offsetof(VecBoolBufObject, items))) -#define VEC_T_BUF_FROM_ITEMS(items_) \ - ((VecTBufObject *)((char *)(items_) - offsetof(VecTBufObject, items))) -#define VEC_NESTED_BUF_FROM_ITEMS(items_) \ - ((VecNestedBufObject *)((char *)(items_) - offsetof(VecNestedBufObject, items))) - -#define VEC_I64_BUF(v) VEC_I64_BUF_FROM_ITEMS((v).items) -#define VEC_I32_BUF(v) VEC_I32_BUF_FROM_ITEMS((v).items) -#define VEC_I16_BUF(v) VEC_I16_BUF_FROM_ITEMS((v).items) -#define VEC_U8_BUF(v) VEC_U8_BUF_FROM_ITEMS((v).items) -#define VEC_FLOAT_BUF(v) VEC_FLOAT_BUF_FROM_ITEMS((v).items) -#define VEC_BOOL_BUF(v) VEC_BOOL_BUF_FROM_ITEMS((v).items) -#define VEC_T_BUF(v) VEC_T_BUF_FROM_ITEMS((v).items) -#define VEC_NESTED_BUF(v) VEC_NESTED_BUF_FROM_ITEMS((v).items) - -// Type-specific incref/decref. Safe when items may be NULL. -#define VEC_I64_INCREF(v) do { if ((v).items) Py_INCREF(VEC_I64_BUF(v)); } while (0) -#define VEC_I64_DECREF(v) do { if ((v).items) Py_DECREF(VEC_I64_BUF(v)); } while (0) -#define VEC_I32_INCREF(v) do { if ((v).items) Py_INCREF(VEC_I32_BUF(v)); } while (0) -#define VEC_I32_DECREF(v) do { if ((v).items) Py_DECREF(VEC_I32_BUF(v)); } while (0) -#define VEC_I16_INCREF(v) do { if ((v).items) Py_INCREF(VEC_I16_BUF(v)); } while (0) -#define VEC_I16_DECREF(v) do { if ((v).items) Py_DECREF(VEC_I16_BUF(v)); } while (0) -#define VEC_U8_INCREF(v) do { if ((v).items) Py_INCREF(VEC_U8_BUF(v)); } while (0) -#define VEC_U8_DECREF(v) do { if ((v).items) Py_DECREF(VEC_U8_BUF(v)); } while (0) -#define VEC_FLOAT_INCREF(v) do { if ((v).items) Py_INCREF(VEC_FLOAT_BUF(v)); } while (0) -#define VEC_FLOAT_DECREF(v) do { if ((v).items) Py_DECREF(VEC_FLOAT_BUF(v)); } while (0) -#define VEC_BOOL_INCREF(v) do { if ((v).items) Py_INCREF(VEC_BOOL_BUF(v)); } while (0) -#define VEC_BOOL_DECREF(v) do { if ((v).items) Py_DECREF(VEC_BOOL_BUF(v)); } while (0) -#define VEC_T_INCREF(v) do { if ((v).items) Py_INCREF(VEC_T_BUF(v)); } while (0) -#define VEC_T_DECREF(v) do { if ((v).items) Py_DECREF(VEC_T_BUF(v)); } while (0) -#define VEC_NESTED_INCREF(v) do { if ((v).items) Py_INCREF(VEC_NESTED_BUF(v)); } while (0) -#define VEC_NESTED_DECREF(v) do { if ((v).items) Py_DECREF(VEC_NESTED_BUF(v)); } while (0) - // Boxed vec objects @@ -485,7 +426,7 @@ typedef struct tuple_T2VvVi { VecNested f0; VecNestedBufItem f1; } tuple_T2VvVi; -static tuple_T2VvVi tuple_undefined_T2VvVi = { { -1, NULL } , { -1, 0 } }; +static tuple_T2VvVi tuple_undefined_T2VvVi = { { -1, NULL } , { -1, NULL } }; #endif typedef tuple_T2VvVi VecNestedPopResult; @@ -508,8 +449,6 @@ typedef struct _VecNestedAPI { } VecNestedAPI; typedef struct { - int (*abi_version)(void); - int (*api_version)(void); VecTAPI *t; VecNestedAPI *nested; VecI64API *i64; @@ -524,7 +463,10 @@ typedef struct { #define VEC_BUF_SIZE(b) ((b)->ob_base.ob_size) #define VEC_ITEM_TYPE(t) ((PyTypeObject *)((t) & ~1)) #define VEC_BUF_ITEM_TYPE(b) VEC_ITEM_TYPE((b)->item_type) +#define VEC_CAP(v) ((v).buf->ob_base.ob_size) #define VEC_IS_ERROR(v) ((v).len < 0) +#define VEC_DECREF(v) Py_XDECREF((v).buf) +#define VEC_INCREF(v) Py_XINCREF((v).buf) // Type objects @@ -812,103 +754,55 @@ VecNested VecNested_ExtendVec(VecNested dst, VecNested src); VecNested VecNested_Remove(VecNested vec, VecNestedBufItem x); VecNestedPopResult VecNested_Pop(VecNested v, Py_ssize_t index); -static inline PyObject *VecNested_ItemBuf(VecNestedBufObject *parent, VecNestedBufItem item) { - if (item.items == NULL) - return NULL; - if (parent->depth > 1) - return (PyObject *)VEC_NESTED_BUF_FROM_ITEMS(item.items); - if (parent->item_type == VEC_ITEM_TYPE_I64) - return (PyObject *)VEC_I64_BUF_FROM_ITEMS(item.items); - if (parent->item_type == VEC_ITEM_TYPE_U8) - return (PyObject *)VEC_U8_BUF_FROM_ITEMS(item.items); - if (parent->item_type == VEC_ITEM_TYPE_FLOAT) - return (PyObject *)VEC_FLOAT_BUF_FROM_ITEMS(item.items); - if (parent->item_type == VEC_ITEM_TYPE_I32) - return (PyObject *)VEC_I32_BUF_FROM_ITEMS(item.items); - if (parent->item_type == VEC_ITEM_TYPE_I16) - return (PyObject *)VEC_I16_BUF_FROM_ITEMS(item.items); - if (parent->item_type == VEC_ITEM_TYPE_BOOL) - return (PyObject *)VEC_BOOL_BUF_FROM_ITEMS(item.items); - return (PyObject *)VEC_T_BUF_FROM_ITEMS(item.items); -} - -static inline void VecNested_ItemXINCREF(VecNestedBufObject *parent, VecNestedBufItem item) { - Py_XINCREF(VecNested_ItemBuf(parent, item)); -} - -static inline void VecNested_ItemXDECREF(VecNestedBufObject *parent, VecNestedBufItem item) { - Py_XDECREF(VecNested_ItemBuf(parent, item)); -} - -static inline void VecNested_ItemCLEAR(VecNestedBufObject *parent, VecNestedBufItem *item) { - PyObject *buf = VecNested_ItemBuf(parent, *item); - item->items = NULL; - Py_XDECREF(buf); -} - -static inline int VecNested_ItemVISIT( - VecNestedBufObject *parent, VecNestedBufItem item, visitproc visit, void *arg) { - PyObject *buf = VecNested_ItemBuf(parent, item); - if (buf) { - int ret = visit(buf, arg); - if (ret) - return ret; - } - return 0; -} - // Return 0 on success, -1 on error. Store unboxed item in *unboxed if successful. // Return a *borrowed* reference. static inline int VecNested_UnboxItem(VecNested v, PyObject *item, VecNestedBufItem *unboxed) { - VecNestedBufObject *v_buf = VEC_NESTED_BUF(v); - size_t depth = v_buf->depth; + size_t depth = v.buf->depth; if (depth == 1) { if (item->ob_type == &VecTType) { - // Boxed vec[t] always has items != NULL (buf is allocated on boxing) - VecTObject *o = (VecTObject *)item; - if (VEC_T_BUF(o->vec)->item_type == v_buf->item_type) { + VecNestedObject *o = (VecNestedObject *)item; + if (o->vec.buf->item_type == v.buf->item_type) { unboxed->len = o->vec.len; - unboxed->items = o->vec.items; + unboxed->buf = (PyObject *)o->vec.buf; return 0; } - } else if (item->ob_type == &VecI64Type && v_buf->item_type == VEC_ITEM_TYPE_I64) { + } else if (item->ob_type == &VecI64Type && v.buf->item_type == VEC_ITEM_TYPE_I64) { VecI64Object *o = (VecI64Object *)item; unboxed->len = o->vec.len; - unboxed->items = o->vec.items; + unboxed->buf = (PyObject *)o->vec.buf; return 0; - } else if (item->ob_type == &VecU8Type && v_buf->item_type == VEC_ITEM_TYPE_U8) { + } else if (item->ob_type == &VecU8Type && v.buf->item_type == VEC_ITEM_TYPE_U8) { VecU8Object *o = (VecU8Object *)item; unboxed->len = o->vec.len; - unboxed->items = o->vec.items; + unboxed->buf = (PyObject *)o->vec.buf; return 0; - } else if (item->ob_type == &VecFloatType && v_buf->item_type == VEC_ITEM_TYPE_FLOAT) { + } else if (item->ob_type == &VecFloatType && v.buf->item_type == VEC_ITEM_TYPE_FLOAT) { VecFloatObject *o = (VecFloatObject *)item; unboxed->len = o->vec.len; - unboxed->items = o->vec.items; + unboxed->buf = (PyObject *)o->vec.buf; return 0; - } else if (item->ob_type == &VecI32Type && v_buf->item_type == VEC_ITEM_TYPE_I32) { + } else if (item->ob_type == &VecI32Type && v.buf->item_type == VEC_ITEM_TYPE_I32) { VecI32Object *o = (VecI32Object *)item; unboxed->len = o->vec.len; - unboxed->items = o->vec.items; + unboxed->buf = (PyObject *)o->vec.buf; return 0; - } else if (item->ob_type == &VecI16Type && v_buf->item_type == VEC_ITEM_TYPE_I16) { + } else if (item->ob_type == &VecI16Type && v.buf->item_type == VEC_ITEM_TYPE_I16) { VecI16Object *o = (VecI16Object *)item; unboxed->len = o->vec.len; - unboxed->items = o->vec.items; + unboxed->buf = (PyObject *)o->vec.buf; return 0; - } else if (item->ob_type == &VecBoolType && v_buf->item_type == VEC_ITEM_TYPE_BOOL) { + } else if (item->ob_type == &VecBoolType && v.buf->item_type == VEC_ITEM_TYPE_BOOL) { VecBoolObject *o = (VecBoolObject *)item; unboxed->len = o->vec.len; - unboxed->items = o->vec.items; + unboxed->buf = (PyObject *)o->vec.buf; return 0; } } else if (item->ob_type == &VecNestedType) { VecNestedObject *o = (VecNestedObject *)item; - VecNestedBufObject *o_buf = VEC_NESTED_BUF(o->vec); - if (o_buf->depth == v_buf->depth - 1 - && o_buf->item_type == v_buf->item_type) { + if (o->vec.buf->depth == v.buf->depth - 1 + && o->vec.buf->item_type == v.buf->item_type) { unboxed->len = o->vec.len; - unboxed->items = o->vec.items; + unboxed->buf = (PyObject *)o->vec.buf; return 0; } } @@ -920,45 +814,36 @@ static inline int VecNested_UnboxItem(VecNested v, PyObject *item, VecNestedBufI static inline PyObject *VecNested_BoxItem(VecNested v, VecNestedBufItem item) { if (item.len < 0) Py_RETURN_NONE; - VecNestedBufObject *v_buf = VEC_NESTED_BUF(v); - VecNested_ItemXINCREF(v_buf, item); - if (v_buf->depth > 1) { + Py_XINCREF(item.buf); + if (v.buf->depth > 1) { // Item is a nested vec - VecNested iv = { .len = item.len, - .items = (VecNestedBufItem *)item.items }; - return VecNested_Box(iv); + VecNested v = { .len = item.len, .buf = (VecNestedBufObject *)item.buf }; + return VecNested_Box(v); } else { // Item is a non-nested vec - size_t item_type = v_buf->item_type; + size_t item_type = v.buf->item_type; if (item_type == VEC_ITEM_TYPE_I64) { - VecI64 iv = { .len = item.len, - .items = (int64_t *)item.items }; - return VecI64_Box(iv); + VecI64 v = { .len = item.len, .buf = (VecI64BufObject *)item.buf }; + return VecI64_Box(v); } else if (item_type == VEC_ITEM_TYPE_U8) { - VecU8 iv = { .len = item.len, - .items = (uint8_t *)item.items }; - return VecU8_Box(iv); + VecU8 v = { .len = item.len, .buf = (VecU8BufObject *)item.buf }; + return VecU8_Box(v); } else if (item_type == VEC_ITEM_TYPE_FLOAT) { - VecFloat iv = { .len = item.len, - .items = (double *)item.items }; - return VecFloat_Box(iv); + VecFloat v = { .len = item.len, .buf = (VecFloatBufObject *)item.buf }; + return VecFloat_Box(v); } else if (item_type == VEC_ITEM_TYPE_I32) { - VecI32 iv = { .len = item.len, - .items = (int32_t *)item.items }; - return VecI32_Box(iv); + VecI32 v = { .len = item.len, .buf = (VecI32BufObject *)item.buf }; + return VecI32_Box(v); } else if (item_type == VEC_ITEM_TYPE_I16) { - VecI16 iv = { .len = item.len, - .items = (int16_t *)item.items }; - return VecI16_Box(iv); + VecI16 v = { .len = item.len, .buf = (VecI16BufObject *)item.buf }; + return VecI16_Box(v); } else if (item_type == VEC_ITEM_TYPE_BOOL) { - VecBool iv = { .len = item.len, - .items = (char *)item.items }; - return VecBool_Box(iv); + VecBool v = { .len = item.len, .buf = (VecBoolBufObject *)item.buf }; + return VecBool_Box(v); } else { // Generic vec[t] - VecT iv = { .len = item.len, - .items = (PyObject **)item.items }; - return VecT_Box(iv, item_type); + VecT v = { .len = item.len, .buf = (VecTBufObject *)item.buf }; + return VecT_Box(v, item_type); } } } @@ -995,4 +880,6 @@ int Vec_GenericRemove(Py_ssize_t *len, PyObject **items, PyObject *item); PyObject *Vec_GenericPopWrapper(Py_ssize_t *len, PyObject **items, PyObject *args); PyObject *Vec_GenericPop(Py_ssize_t *len, PyObject **items, Py_ssize_t index); +#endif // MYPYC_EXPERIMENTAL + #endif // VEC_H_INCL diff --git a/mypyc/lib-rt/vecs/librt_vecs_api.c b/mypyc/lib-rt/vecs/librt_vecs_api.c index 8c45874f1bfd2..7ac067ae41618 100644 --- a/mypyc/lib-rt/vecs/librt_vecs_api.c +++ b/mypyc/lib-rt/vecs/librt_vecs_api.c @@ -1,5 +1,16 @@ #include "librt_vecs_api.h" +#ifndef MYPYC_EXPERIMENTAL + +int +import_librt_vecs(void) +{ + // All librt.vecs features are experimental for now, so don't set up the API here + return 0; +} + +#else + VecCapsule *VecApi = NULL; VecI64API VecI64Api = {0}; VecI32API VecI32Api = {0}; @@ -17,28 +28,9 @@ import_librt_vecs(void) if (mod == NULL) return -1; Py_DECREF(mod); // we import just for the side effect of making the below work. - VecCapsule *capsule = PyCapsule_Import("librt.vecs._C_API", 0); - if (!capsule) - return -1; - if (capsule->abi_version() != LIBRT_VECS_ABI_VERSION) { - char err[128]; - snprintf(err, sizeof(err), - "ABI version conflict for librt.vecs, expected %d, found %d", - LIBRT_VECS_ABI_VERSION, - capsule->abi_version()); - PyErr_SetString(PyExc_ValueError, err); + VecApi = PyCapsule_Import("librt.vecs._C_API", 0); + if (!VecApi) return -1; - } - if (capsule->api_version() < LIBRT_VECS_API_VERSION) { - char err[128]; - snprintf(err, sizeof(err), - "API version conflict for librt.vecs, expected %d or newer, found %d (hint: upgrade librt)", - LIBRT_VECS_API_VERSION, - capsule->api_version()); - PyErr_SetString(PyExc_ValueError, err); - return -1; - } - VecApi = capsule; VecI64Api = *VecApi->i64; VecI32Api = *VecApi->i32; VecI16Api = *VecApi->i16; @@ -49,3 +41,5 @@ import_librt_vecs(void) VecNestedApi = *VecApi->nested; return 0; } + +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs/librt_vecs_api.h b/mypyc/lib-rt/vecs/librt_vecs_api.h index 6f42c0b7251d0..d7f1cacfea9ea 100644 --- a/mypyc/lib-rt/vecs/librt_vecs_api.h +++ b/mypyc/lib-rt/vecs/librt_vecs_api.h @@ -10,6 +10,8 @@ int import_librt_vecs(void); +#ifdef MYPYC_EXPERIMENTAL + // Global API pointers initialized by import_librt_vecs() extern VecCapsule *VecApi; extern VecI64API VecI64Api; @@ -21,4 +23,6 @@ extern VecBoolAPI VecBoolApi; extern VecTAPI VecTApi; extern VecNestedAPI VecNestedApi; +#endif // MYPYC_EXPERIMENTAL + #endif // LIBRT_VECS_API_H diff --git a/mypyc/lib-rt/vecs/vec_bool.c b/mypyc/lib-rt/vecs/vec_bool.c index e21b66c9dec50..54b7a1f783077 100644 --- a/mypyc/lib-rt/vecs/vec_bool.c +++ b/mypyc/lib-rt/vecs/vec_bool.c @@ -1,3 +1,4 @@ +#ifdef MYPYC_EXPERIMENTAL #define VEC VecBool #define VEC_TYPE VecBoolType #define VEC_OBJECT VecBoolObject @@ -16,3 +17,5 @@ #define BUFFER_FORMAT "b" #include "vec_template.c" + +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs/vec_float.c b/mypyc/lib-rt/vecs/vec_float.c index 9b1f139732b47..16822fa5a0e11 100644 --- a/mypyc/lib-rt/vecs/vec_float.c +++ b/mypyc/lib-rt/vecs/vec_float.c @@ -1,3 +1,4 @@ +#ifdef MYPYC_EXPERIMENTAL #define VEC VecFloat #define VEC_TYPE VecFloatType #define VEC_OBJECT VecFloatObject @@ -17,3 +18,5 @@ #define BUFFER_FORMAT "d" #include "vec_template.c" + +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs/vec_i16.c b/mypyc/lib-rt/vecs/vec_i16.c index 994b67e7793bf..dc1e6dcdec4f6 100644 --- a/mypyc/lib-rt/vecs/vec_i16.c +++ b/mypyc/lib-rt/vecs/vec_i16.c @@ -1,3 +1,4 @@ +#ifdef MYPYC_EXPERIMENTAL #define VEC VecI16 #define VEC_TYPE VecI16Type #define VEC_OBJECT VecI16Object @@ -17,3 +18,5 @@ #define BUFFER_FORMAT "h" #include "vec_template.c" + +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs/vec_i32.c b/mypyc/lib-rt/vecs/vec_i32.c index b475815ce4d8d..f1a7622506cab 100644 --- a/mypyc/lib-rt/vecs/vec_i32.c +++ b/mypyc/lib-rt/vecs/vec_i32.c @@ -1,3 +1,4 @@ +#ifdef MYPYC_EXPERIMENTAL #define VEC VecI32 #define VEC_TYPE VecI32Type #define VEC_OBJECT VecI32Object @@ -17,3 +18,5 @@ #define BUFFER_FORMAT "i" #include "vec_template.c" + +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs/vec_i64.c b/mypyc/lib-rt/vecs/vec_i64.c index e496364688b76..5e89d68f9bf6f 100644 --- a/mypyc/lib-rt/vecs/vec_i64.c +++ b/mypyc/lib-rt/vecs/vec_i64.c @@ -1,3 +1,4 @@ +#ifdef MYPYC_EXPERIMENTAL #define VEC VecI64 #define VEC_TYPE VecI64Type #define VEC_OBJECT VecI64Object @@ -17,3 +18,5 @@ #define BUFFER_FORMAT "q" #include "vec_template.c" + +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs/vec_nested.c b/mypyc/lib-rt/vecs/vec_nested.c index c2f04ba8566e9..2f4a16c12a31b 100644 --- a/mypyc/lib-rt/vecs/vec_nested.c +++ b/mypyc/lib-rt/vecs/vec_nested.c @@ -1,3 +1,4 @@ +#ifdef MYPYC_EXPERIMENTAL // Implementation of nested vec[t], when t is a vec type. // // Examples of types supported: @@ -10,26 +11,21 @@ #include "librt_vecs.h" #include "vecs_internal.h" -#define VEC_BUF(v) ((VecNestedBufObject *)((char *)(v).items - offsetof(VecNestedBufObject, items))) -#define VEC_CAP(v) (VEC_BUF(v)->ob_base.ob_size) -#define VEC_INCREF(v) do { if ((v).items) Py_INCREF(VEC_BUF(v)); } while (0) -#define VEC_DECREF(v) do { if ((v).items) Py_DECREF(VEC_BUF(v)); } while (0) - static inline VecNested vec_error() { VecNested v = { .len = -1 }; return v; } static inline void vec_track_buffer(VecNested *vec) { - PyObject_GC_Track(VEC_BUF(*vec)); + PyObject_GC_Track(vec->buf); } static inline PyObject *box_vec_item_by_index(VecNested v, Py_ssize_t index) { - return VecNested_BoxItem(v, v.items[index]); + return VecNested_BoxItem(v, v.buf->items[index]); } // Alloc a partially initialized vec. If size > 0, caller *must* immediately initialize len, -// and items. Caller *must* also call vec_track_buffer on the returned vec but only +// and buf->items. Caller *must* also call vec_track_buffer on the returned vec but only // after initializing the items. static VecNested vec_alloc(Py_ssize_t size, size_t item_type, size_t depth) { VecNestedBufObject *buf = PyObject_GC_NewVar(VecNestedBufObject, &VecNestedBufType, size); @@ -39,7 +35,7 @@ static VecNested vec_alloc(Py_ssize_t size, size_t item_type, size_t depth) { buf->depth = depth; if (!Vec_IsMagicItemType(item_type)) Py_INCREF(VEC_BUF_ITEM_TYPE(buf)); - VecNested res = { .items = buf->items }; + VecNested res = { .buf = buf }; return res; } @@ -58,8 +54,7 @@ PyObject *VecNested_Box(VecNested vec) { VecNested VecNested_Unbox(PyObject *obj, size_t item_type, size_t depth) { if (obj->ob_type == &VecNestedType) { VecNested result = ((VecNestedObject *)obj)->vec; - VecNestedBufObject *buf = VEC_BUF(result); - if (buf->item_type == item_type && buf->depth == depth) { + if (result.buf->item_type == item_type && result.buf->depth == depth) { VEC_INCREF(result); // TODO: Should we borrow instead? return result; } @@ -70,7 +65,7 @@ VecNested VecNested_Unbox(PyObject *obj, size_t item_type, size_t depth) { } VecNested VecNested_ConvertFromNested(VecNestedBufItem item) { - return (VecNested) { item.len, (VecNestedBufItem *)item.items }; + return (VecNested) { item.len, (VecNestedBufObject *)item.buf }; } VecNested VecNested_New(Py_ssize_t size, Py_ssize_t cap, size_t item_type, size_t depth) { @@ -84,8 +79,8 @@ VecNested VecNested_New(Py_ssize_t size, Py_ssize_t cap, size_t item_type, size_ if (VEC_IS_ERROR(vec)) return vec; for (Py_ssize_t i = 0; i < cap; i++) { - vec.items[i].len = -1; - vec.items[i].items = 0; + vec.buf->items[i].len = -1; + vec.buf->items[i].buf = NULL; } vec.len = size; vec_track_buffer(&vec); @@ -94,8 +89,7 @@ VecNested VecNested_New(Py_ssize_t size, Py_ssize_t cap, size_t item_type, size_ static PyObject *vec_repr(PyObject *self) { VecNested v = ((VecNestedObject *)self)->vec; - VecNestedBufObject *buf = VEC_BUF(v); - return Vec_GenericRepr(self, buf->item_type, buf->depth, 1); + return Vec_GenericRepr(self, v.buf->item_type, v.buf->depth, 1); } static PyObject *vec_get_item(PyObject *o, Py_ssize_t i) { @@ -124,15 +118,14 @@ VecNested VecNested_Slice(VecNested vec, int64_t start, int64_t end) { if (end > vec.len) end = vec.len; int64_t slicelength = end - start; - VecNestedBufObject *vec_buf = VEC_BUF(vec); - VecNested res = vec_alloc(slicelength, vec_buf->item_type, vec_buf->depth); + VecNested res = vec_alloc(slicelength, vec.buf->item_type, vec.buf->depth); if (VEC_IS_ERROR(res)) return res; res.len = slicelength; for (Py_ssize_t i = 0; i < slicelength; i++) { - VecNestedBufItem item = vec.items[start + i]; - VecNested_ItemXINCREF(vec_buf, item); - res.items[i] = item; + VecNestedBufItem item = vec.buf->items[start + i]; + Py_XINCREF(item.buf); + res.buf->items[i] = item; } vec_track_buffer(&res); return res; @@ -157,16 +150,15 @@ static PyObject *vec_subscript(PyObject *self, PyObject *item) { if (PySlice_Unpack(item, &start, &stop, &step) < 0) return NULL; Py_ssize_t slicelength = PySlice_AdjustIndices(vec.len, &start, &stop, step); - VecNestedBufObject *vec_buf = VEC_BUF(vec); - VecNested res = vec_alloc(slicelength, vec_buf->item_type, vec_buf->depth); + VecNested res = vec_alloc(slicelength, vec.buf->item_type, vec.buf->depth); if (VEC_IS_ERROR(res)) return NULL; res.len = slicelength; Py_ssize_t j = start; for (Py_ssize_t i = 0; i < slicelength; i++) { - VecNestedBufItem item = vec.items[j]; - VecNested_ItemXINCREF(vec_buf, item); - res.items[i] = item; + VecNestedBufItem item = vec.buf->items[j]; + Py_INCREF(item.buf); + res.buf->items[i] = item; j += step; } vec_track_buffer(&res); @@ -187,10 +179,9 @@ static int vec_ass_item(PyObject *self, Py_ssize_t i, PyObject *o) { VecNestedBufItem item; if (VecNested_UnboxItem(v, o, &item) < 0) return -1; - VecNestedBufObject *v_buf = VEC_BUF(v); - VecNested_ItemXINCREF(v_buf, item); - VecNested_ItemXDECREF(v_buf, v.items[i]); - v.items[i] = item; + VEC_INCREF(item); + VEC_DECREF(v.buf->items[i]); + v.buf->items[i] = item; return 0; } else { PyErr_SetString(PyExc_IndexError, "index out of range"); @@ -219,11 +210,9 @@ static int vec_contains(PyObject *self, PyObject *value) { static PyObject *compare_vec_eq(VecNested x, VecNested y, int op) { int cmp = 1; PyObject *res; - VecNestedBufObject *x_buf = VEC_BUF(x); - VecNestedBufObject *y_buf = VEC_BUF(y); if (x.len != y.len - || x_buf->item_type != y_buf->item_type - || x_buf->depth != y_buf->depth) { + || x.buf->item_type != y.buf->item_type + || x.buf->depth != y.buf->depth) { cmp = 0; } else { for (Py_ssize_t i = 0; i < x.len; i++) { @@ -265,39 +254,38 @@ PyObject *vec_richcompare(PyObject *self, PyObject *other, int op) { // Append item to 'vec', stealing 'vec'. Return 'vec' with item appended. VecNested VecNested_Append(VecNested vec, VecNestedBufItem x) { Py_ssize_t cap = VEC_CAP(vec); - VecNestedBufObject *vec_buf = VEC_BUF(vec); - VecNested_ItemXINCREF(vec_buf, x); + VEC_INCREF(x); if (vec.len < cap) { // Slot may have duplicate ref from prior remove/pop - VecNested_ItemXDECREF(vec_buf, vec.items[vec.len]); - vec.items[vec.len] = x; + Py_XDECREF(vec.buf->items[vec.len].buf); + vec.buf->items[vec.len] = x; vec.len++; return vec; } else { Py_ssize_t new_size = Vec_GrowCapacity(cap); // TODO: Avoid initializing to zero here - VecNested new = vec_alloc(new_size, vec_buf->item_type, vec_buf->depth); + VecNested new = vec_alloc(new_size, vec.buf->item_type, vec.buf->depth); if (VEC_IS_ERROR(new)) { - VecNested_ItemXDECREF(vec_buf, x); + VEC_DECREF(x); // The input vec is being consumed/stolen by this function, so on error // we must decref it to avoid leaking the buffer. VEC_DECREF(vec); return new; } // Copy items to new vec. - memcpy(new.items, vec.items, sizeof(VecNestedBufItem) * vec.len); + memcpy(new.buf->items, vec.buf->items, sizeof(VecNestedBufItem) * vec.len); // TODO: How to safely represent deleted items? - memset(new.items + vec.len, 0, sizeof(VecNestedBufItem) * (new_size - vec.len)); - if (Py_REFCNT(vec_buf) > 1) { + memset(new.buf->items + vec.len, 0, sizeof(VecNestedBufItem) * (new_size - vec.len)); + if (Py_REFCNT(vec.buf) > 1) { // Other references to old buffer exist; INCREF items in new buffer // so old buffer keeps valid references for aliases. for (Py_ssize_t i = 0; i < vec.len; i++) - VecNested_ItemXINCREF(vec_buf, new.items[i]); + Py_XINCREF(new.buf->items[i].buf); } else { // No aliases; transfer ownership by clearing old buffer items. - memset(vec.items, 0, sizeof(VecNestedBufItem) * vec.len); + memset(vec.buf->items, 0, sizeof(VecNestedBufItem) * vec.len); } - new.items[vec.len] = x; + new.buf->items[vec.len] = x; new.len = vec.len + 1; vec_track_buffer(&new); VEC_DECREF(vec); @@ -310,9 +298,7 @@ VecNested VecNested_Append(VecNested vec, VecNestedBufItem x) { VecNested VecNested_Extend(VecNested vec, PyObject *iterable) { if (VecNested_Check(iterable)) { VecNested src = ((VecNestedObject *)iterable)->vec; - VecNestedBufObject *vec_buf = VEC_BUF(vec); - VecNestedBufObject *src_buf = VEC_BUF(src); - if (src_buf->item_type == vec_buf->item_type && src_buf->depth == vec_buf->depth) { + if (src.buf->item_type == vec.buf->item_type && src.buf->depth == vec.buf->depth) { return VecNested_ExtendVec(vec, src); } } @@ -359,24 +345,22 @@ VecNested VecNested_ExtendVec(VecNested dst, VecNested src) { Py_ssize_t new_len = dst.len + src.len; // VecNested buf is never NULL (even for empty vecs), so no NULL guard needed Py_ssize_t cap = VEC_CAP(dst); - VecNestedBufObject *dst_buf = VEC_BUF(dst); - VecNestedBufObject *src_buf = VEC_BUF(src); - if (new_len <= cap && dst.items != src.items) { + if (new_len <= cap && dst.buf != src.buf) { // Fast path: enough capacity and no aliasing for (Py_ssize_t i = 0; i < src.len; i++) { - VecNestedBufItem item = src.items[i]; - VecNested_ItemXINCREF(src_buf, item); + VecNestedBufItem item = src.buf->items[i]; + Py_XINCREF(item.buf); // Slot may have duplicate ref from prior remove/pop - VecNested_ItemXDECREF(dst_buf, dst.items[dst.len + i]); - dst.items[dst.len + i] = item; + Py_XDECREF(dst.buf->items[dst.len + i].buf); + dst.buf->items[dst.len + i] = item; } dst.len = new_len; return dst; } // Need to reallocate (or dst and src share a buffer) Py_ssize_t new_cap = Vec_GrowCapacityTo(cap, new_len); - int aliased = dst.items == src.items; - VecNested new = vec_alloc(new_cap, dst_buf->item_type, dst_buf->depth); + int aliased = dst.buf == src.buf; + VecNested new = vec_alloc(new_cap, dst.buf->item_type, dst.buf->depth); if (VEC_IS_ERROR(new)) { VEC_DECREF(dst); return new; @@ -385,25 +369,25 @@ VecNested VecNested_ExtendVec(VecNested dst, VecNested src) { // dst and src share a buffer -- incref all items instead of // moving refs, to avoid mutating the shared buffer for (Py_ssize_t i = 0; i < dst.len; i++) { - VecNested_ItemXINCREF(dst_buf, dst.items[i]); - new.items[i] = dst.items[i]; + Py_XINCREF(dst.buf->items[i].buf); + new.buf->items[i] = dst.buf->items[i]; } } else { - memcpy(new.items, dst.items, sizeof(VecNestedBufItem) * dst.len); - if (Py_REFCNT(dst_buf) > 1) { + memcpy(new.buf->items, dst.buf->items, sizeof(VecNestedBufItem) * dst.len); + if (Py_REFCNT(dst.buf) > 1) { for (Py_ssize_t i = 0; i < dst.len; i++) - VecNested_ItemXINCREF(dst_buf, new.items[i]); + Py_XINCREF(new.buf->items[i].buf); } else { - memset(dst.items, 0, sizeof(VecNestedBufItem) * dst.len); + memset(dst.buf->items, 0, sizeof(VecNestedBufItem) * dst.len); } } // Copy src items (incref each buf) for (Py_ssize_t i = 0; i < src.len; i++) { - VecNestedBufItem item = src.items[i]; - VecNested_ItemXINCREF(src_buf, item); - new.items[dst.len + i] = item; + VecNestedBufItem item = src.buf->items[i]; + Py_XINCREF(item.buf); + new.buf->items[dst.len + i] = item; } - memset(new.items + new_len, 0, sizeof(VecNestedBufItem) * (new_cap - new_len)); + memset(new.buf->items + new_len, 0, sizeof(VecNestedBufItem) * (new_cap - new_len)); new.len = new_len; vec_track_buffer(&new); VEC_DECREF(dst); @@ -412,8 +396,7 @@ VecNested VecNested_ExtendVec(VecNested dst, VecNested src) { // Remove item from 'vec', stealing 'vec'. Return 'vec' with item removed. VecNested VecNested_Remove(VecNested self, VecNestedBufItem arg) { - VecNestedBufItem *items = self.items; - VecNestedBufObject *self_buf = VEC_BUF(self); + VecNestedBufItem *items = self.buf->items; PyObject *boxed_arg = VecNested_BoxItem(self, arg); if (boxed_arg == NULL) { @@ -425,7 +408,7 @@ VecNested VecNested_Remove(VecNested self, VecNestedBufItem arg) { for (Py_ssize_t i = 0; i < self.len; i++) { int match = 0; - if (items[i].len == arg.len && items[i].items == arg.items) + if (items[i].len == arg.len && items[i].buf == arg.buf) match = 1; else { PyObject *item = box_vec_item_by_index(self, i); @@ -449,11 +432,11 @@ VecNested VecNested_Remove(VecNested self, VecNestedBufItem arg) { } if (match) { if (i < self.len - 1) { - VecNested_ItemCLEAR(self_buf, &items[i]); + Py_CLEAR(items[i].buf); for (; i < self.len - 1; i++) { items[i] = items[i + 1]; } - VecNested_ItemXINCREF(self_buf, items[self.len - 1]); + Py_XINCREF(items[self.len - 1].buf); } self.len--; Py_DECREF(boxed_arg); @@ -483,17 +466,16 @@ VecNestedPopResult VecNested_Pop(VecNested v, Py_ssize_t index) { VEC_DECREF(v); result.f0 = vec_error(); result.f1.len = 0; - result.f1.items = NULL; + result.f1.buf = NULL; return result; } - VecNestedBufItem *items = v.items; - VecNestedBufObject *v_buf = VEC_BUF(v); + VecNestedBufItem *items = v.buf->items; result.f1 = items[index]; for (Py_ssize_t i = index; i < v.len - 1; i++) items[i] = items[i + 1]; if (v.len > 0) - VecNested_ItemXINCREF(v_buf, items[v.len - 1]); + Py_XINCREF(items[v.len - 1].buf); v.len--; // Return the stolen reference without INCREF result.f0 = v; @@ -503,18 +485,14 @@ VecNestedPopResult VecNested_Pop(VecNested v, Py_ssize_t index) { static int VecNested_traverse(VecNestedObject *self, visitproc visit, void *arg) { - if (self->vec.items) - Py_VISIT(VEC_BUF(self->vec)); + Py_VISIT(self->vec.buf); return 0; } static int VecNested_clear(VecNestedObject *self) { - if (self->vec.items) { - Py_DECREF(VEC_BUF(self->vec)); - self->vec.items = NULL; - } + Py_CLEAR(self->vec.buf); return 0; } @@ -523,7 +501,7 @@ VecNested_dealloc(VecNestedObject *self) { PyObject_GC_UnTrack(self); Py_TRASHCAN_BEGIN(self, VecNested_dealloc) - VecNested_clear(self); + Py_CLEAR(self->vec.buf); Py_TYPE(self)->tp_free((PyObject *)self); Py_TRASHCAN_END } @@ -534,9 +512,7 @@ VecNestedBuf_traverse(VecNestedBufObject *self, visitproc visit, void *arg) if (!Vec_IsMagicItemType(self->item_type)) Py_VISIT(VEC_BUF_ITEM_TYPE(self)); for (Py_ssize_t i = 0; i < VEC_BUF_SIZE(self); i++) { - int ret = VecNested_ItemVISIT(self, self->items[i], visit, arg); - if (ret) - return ret; + Py_VISIT(self->items[i].buf); } return 0; } @@ -544,13 +520,13 @@ VecNestedBuf_traverse(VecNestedBufObject *self, visitproc visit, void *arg) static inline int VecNestedBuf_clear(VecNestedBufObject *self) { - for (Py_ssize_t i = 0; i < VEC_BUF_SIZE(self); i++) { - VecNested_ItemCLEAR(self, &self->items[i]); - } if (self->item_type && !Vec_IsMagicItemType(self->item_type)) { Py_DECREF(VEC_BUF_ITEM_TYPE(self)); self->item_type = 0; } + for (Py_ssize_t i = 0; i < VEC_BUF_SIZE(self); i++) { + Py_CLEAR(self->items[i].buf); + } return 0; } @@ -587,7 +563,7 @@ static PyMethodDef vec_methods[] = { typedef struct { PyObject_HEAD - VecNested vec; // Unboxed vec (keeps buffer alive via items reference) + VecNested vec; // Unboxed vec (keeps buffer alive via buf reference) Py_ssize_t index; // Current iteration index } VecNestedIterObject; @@ -598,7 +574,7 @@ static PyObject *VecNested_iter(PyObject *self) { if (it == NULL) return NULL; it->vec = ((VecNestedObject *)self)->vec; - VEC_INCREF(it->vec); + Py_INCREF(it->vec.buf); it->index = 0; PyObject_GC_Track(it); return (PyObject *)it; @@ -607,29 +583,25 @@ static PyObject *VecNested_iter(PyObject *self) { static int VecNestedIter_traverse(VecNestedIterObject *self, visitproc visit, void *arg) { - if (self->vec.items) - Py_VISIT(VEC_BUF(self->vec)); + Py_VISIT(self->vec.buf); return 0; } static int VecNestedIter_clear(VecNestedIterObject *self) { - if (self->vec.items) { - Py_DECREF(VEC_BUF(self->vec)); - self->vec.items = NULL; - } + Py_CLEAR(self->vec.buf); return 0; } static void VecNestedIter_dealloc(VecNestedIterObject *self) { PyObject_GC_UnTrack(self); - VEC_DECREF(self->vec); + Py_XDECREF(self->vec.buf); PyObject_GC_Del(self); } static PyObject *VecNestedIter_next(VecNestedIterObject *self) { - if (self->vec.items == NULL) + if (self->vec.buf == NULL) return NULL; if (self->index < self->vec.len) { PyObject *item = box_vec_item_by_index(self->vec, self->index); @@ -638,13 +610,12 @@ static PyObject *VecNestedIter_next(VecNestedIterObject *self) { self->index++; return item; } - VEC_DECREF(self->vec); - self->vec.items = NULL; + Py_CLEAR(self->vec.buf); return NULL; // StopIteration } static PyObject *VecNestedIter_len(VecNestedIterObject *self, PyObject *Py_UNUSED(ignored)) { - if (self->vec.items == NULL) + if (self->vec.buf == NULL) return PyLong_FromSsize_t(0); Py_ssize_t remaining = self->vec.len - self->index; if (remaining < 0) @@ -712,8 +683,8 @@ PyObject *VecNested_FromIterable(size_t item_type, size_t depth, PyObject *itera return NULL; if (cap > 0) { for (int64_t i = 0; i < cap; i++) { - v.items[i].len = -1; - v.items[i].items = 0; + v.buf->items[i].len = -1; + v.buf->items[i].buf = NULL; } } v.len = 0; @@ -763,3 +734,5 @@ VecNestedAPI Vec_NestedAPI = { VecNested_Extend, VecNested_ExtendVec, }; + +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs/vec_t.c b/mypyc/lib-rt/vecs/vec_t.c index 3cfd1756201ce..b40c37b7a45e5 100644 --- a/mypyc/lib-rt/vecs/vec_t.c +++ b/mypyc/lib-rt/vecs/vec_t.c @@ -1,3 +1,4 @@ +#ifdef MYPYC_EXPERIMENTAL // Implementation of generic vec[t], when t is a plain type object (possibly optional). // // Examples of types supported: @@ -12,11 +13,6 @@ #include "librt_vecs.h" #include "vecs_internal.h" -#define VEC_BUF(v) ((VecTBufObject *)((char *)(v).items - offsetof(VecTBufObject, items))) -#define VEC_CAP(v) (VEC_BUF(v)->ob_base.ob_size) -#define VEC_INCREF(v) do { if ((v).items) Py_INCREF(VEC_BUF(v)); } while (0) -#define VEC_DECREF(v) do { if ((v).items) Py_DECREF(VEC_BUF(v)); } while (0) - static inline VecT vec_error() { VecT v = { .len = -1 }; return v; @@ -32,13 +28,13 @@ static inline VecTBufObject *alloc_buf(Py_ssize_t size, size_t item_type) { } static inline void vec_track_buffer(VecT *vec) { - if (vec->items != NULL) { - PyObject_GC_Track(VEC_T_BUF(*vec)); + if (vec->buf != NULL) { + PyObject_GC_Track(vec->buf); } } // Alloc a partially initialized vec. If size > 0, caller *must* immediately initialize len, -// and items. Caller *must* also call vec_track_buffer on the returned vec but only +// and buf->items. Caller *must* also call vec_track_buffer on the returned vec but only // after initializing the items. static VecT vec_alloc(Py_ssize_t size, size_t item_type) { VecTBufObject *buf; @@ -50,24 +46,23 @@ static VecT vec_alloc(Py_ssize_t size, size_t item_type) { if (buf == NULL) return vec_error(); } - return (VecT) { .items = (buf != NULL) ? buf->items : NULL }; + return (VecT) { .buf = buf }; } // Box a VecT value, stealing 'vec'. On failure, return NULL and decref 'vec'. PyObject *VecT_Box(VecT vec, size_t item_type) { - // An unboxed empty vec may have NULL items, but a boxed vec must have a buf + // An unboxed empty vec may have a NULL buf, but a boxed vec must have it // allocated, since it contains the item type - if (vec.items == NULL) { - VecTBufObject *buf = alloc_buf(0, item_type); - if (buf == NULL) + if (vec.buf == NULL) { + vec.buf = alloc_buf(0, item_type); + if (vec.buf == NULL) return NULL; - vec.items = buf->items; vec_track_buffer(&vec); } VecTObject *obj = PyObject_GC_New(VecTObject, &VecTType); if (obj == NULL) { - // items is always defined, so no need for a NULL check - Py_DECREF(VEC_BUF(vec)); + // vec.buf is always defined, so no need for a NULL check + Py_DECREF(vec.buf); return NULL; } obj->vec = vec; @@ -78,7 +73,7 @@ PyObject *VecT_Box(VecT vec, size_t item_type) { VecT VecT_Unbox(PyObject *obj, size_t item_type) { if (obj->ob_type == &VecTType) { VecT result = ((VecTObject *)obj)->vec; - if (VEC_BUF(result)->item_type == item_type) { + if (result.buf->item_type == item_type) { VEC_INCREF(result); // TODO: Should we borrow instead? return result; } @@ -89,7 +84,7 @@ VecT VecT_Unbox(PyObject *obj, size_t item_type) { } VecT VecT_ConvertFromNested(VecNestedBufItem item) { - return (VecT) { item.len, (PyObject **)item.items }; + return (VecT) { item.len, (VecTBufObject *)item.buf }; } VecT VecT_New(Py_ssize_t size, Py_ssize_t cap, size_t item_type) { @@ -103,7 +98,7 @@ VecT VecT_New(Py_ssize_t size, Py_ssize_t cap, size_t item_type) { if (VEC_IS_ERROR(vec)) return vec; for (Py_ssize_t i = 0; i < cap; i++) { - vec.items[i] = NULL; + vec.buf->items[i] = NULL; } vec_track_buffer(&vec); vec.len = size; @@ -112,17 +107,17 @@ VecT VecT_New(Py_ssize_t size, Py_ssize_t cap, size_t item_type) { static PyObject *vec_repr(PyObject *self) { VecTObject *v = (VecTObject *)self; - return Vec_GenericRepr(self, VEC_BUF(v->vec)->item_type, 0, 1); + return Vec_GenericRepr(self, v->vec.buf->item_type, 0, 1); } static PyObject *vec_get_item(PyObject *o, Py_ssize_t i) { VecT v = ((VecTObject *)o)->vec; if ((size_t)i < (size_t)v.len) { - PyObject *item = v.items[i]; + PyObject *item = v.buf->items[i]; Py_INCREF(item); return item; } else if ((size_t)i + (size_t)v.len < (size_t)v.len) { - PyObject *item = v.items[i + v.len]; + PyObject *item = v.buf->items[i + v.len]; Py_INCREF(item); return item; } else { @@ -146,15 +141,15 @@ VecT VecT_Slice(VecT vec, int64_t start, int64_t end) { end = vec.len; int64_t slicelength = end - start; if (slicelength == 0) - return (VecT) { .len = 0, .items = NULL }; - VecT res = vec_alloc(slicelength, VEC_BUF(vec)->item_type); + return (VecT) { .len = 0, .buf = NULL }; + VecT res = vec_alloc(slicelength, vec.buf->item_type); if (VEC_IS_ERROR(res)) return res; res.len = slicelength; for (Py_ssize_t i = 0; i < slicelength; i++) { - PyObject *item = vec.items[start + i]; + PyObject *item = vec.buf->items[start + i]; Py_INCREF(item); - res.items[i] = item; + res.buf->items[i] = item; } vec_track_buffer(&res); return res; @@ -167,11 +162,11 @@ static PyObject *vec_subscript(PyObject *self, PyObject *item) { if (i == -1 && PyErr_Occurred()) return NULL; if ((size_t)i < (size_t)vec.len) { - PyObject *result = vec.items[i]; + PyObject *result = vec.buf->items[i]; Py_INCREF(result); return result; } else if ((size_t)i + (size_t)vec.len < (size_t)vec.len) { - PyObject *result = vec.items[i + vec.len]; + PyObject *result = vec.buf->items[i + vec.len]; Py_INCREF(result); return result; } else { @@ -183,19 +178,19 @@ static PyObject *vec_subscript(PyObject *self, PyObject *item) { if (PySlice_Unpack(item, &start, &stop, &step) < 0) return NULL; Py_ssize_t slicelength = PySlice_AdjustIndices(vec.len, &start, &stop, step); - VecT res = vec_alloc(slicelength, VEC_BUF(vec)->item_type); + VecT res = vec_alloc(slicelength, vec.buf->item_type); if (VEC_IS_ERROR(res)) return NULL; res.len = slicelength; Py_ssize_t j = start; for (Py_ssize_t i = 0; i < slicelength; i++) { - PyObject *item = vec.items[j]; + PyObject *item = vec.buf->items[j]; Py_INCREF(item); - res.items[i] = item; + res.buf->items[i] = item; j += step; } vec_track_buffer(&res); - PyObject *result = VecT_Box(res, VEC_BUF(vec)->item_type); + PyObject *result = VecT_Box(res, vec.buf->item_type); if (result == NULL) { VEC_DECREF(res); } @@ -209,18 +204,18 @@ static PyObject *vec_subscript(PyObject *self, PyObject *item) { static int vec_ass_item(PyObject *self, Py_ssize_t i, PyObject *o) { VecT v = ((VecTObject *)self)->vec; - if (!VecT_ItemCheck(v, o, VEC_BUF(v)->item_type)) + if (!VecT_ItemCheck(v, o, v.buf->item_type)) return -1; if ((size_t)i < (size_t)v.len) { - PyObject *old = v.items[i]; + PyObject *old = v.buf->items[i]; Py_INCREF(o); - v.items[i] = o; + v.buf->items[i] = o; Py_XDECREF(old); return 0; } else if ((size_t)i + (size_t)v.len < (size_t)v.len) { - PyObject *old = v.items[i + v.len]; + PyObject *old = v.buf->items[i + v.len]; Py_INCREF(o); - v.items[i + v.len] = o; + v.buf->items[i + v.len] = o; Py_XDECREF(old); return 0; } else { @@ -232,7 +227,7 @@ static int vec_ass_item(PyObject *self, Py_ssize_t i, PyObject *o) { static int vec_contains(PyObject *self, PyObject *value) { VecT v = ((VecTObject *)self)->vec; for (Py_ssize_t i = 0; i < v.len; i++) { - PyObject *item = v.items[i]; + PyObject *item = v.buf->items[i]; if (item == value) { return 1; } @@ -253,11 +248,11 @@ static PyObject *vec_richcompare(PyObject *self, PyObject *other, int op) { } else { VecT x = ((VecTObject *)self)->vec; VecT y = ((VecTObject *)other)->vec; - if (VEC_BUF(x)->item_type != VEC_BUF(y)->item_type) { + if (x.buf->item_type != y.buf->item_type) { res = op == Py_EQ ? Py_False : Py_True; } else { // TODO: why pointers to len? - return Vec_GenericRichcompare(&x.len, x.items, &y.len, y.items, op); + return Vec_GenericRichcompare(&x.len, x.buf->items, &y.len, y.buf->items, op); } } } else @@ -268,13 +263,13 @@ static PyObject *vec_richcompare(PyObject *self, PyObject *other, int op) { // Append item to 'vec', stealing 'vec'. Return 'vec' with item appended. VecT VecT_Append(VecT vec, PyObject *x, size_t item_type) { - if (vec.items == NULL) { + if (vec.buf == NULL) { VecT new = vec_alloc(1, item_type); if (VEC_IS_ERROR(new)) return new; Py_INCREF(x); new.len = 1; - new.items[0] = x; + new.buf->items[0] = x; vec_track_buffer(&new); return new; } @@ -282,13 +277,13 @@ VecT VecT_Append(VecT vec, PyObject *x, size_t item_type) { Py_INCREF(x); if (vec.len < cap) { // Slot may have duplicate ref from prior remove/pop - Py_XSETREF(vec.items[vec.len], x); + Py_XSETREF(vec.buf->items[vec.len], x); vec.len++; return vec; } else { Py_ssize_t new_size = Vec_GrowCapacity(cap); // TODO: Avoid initializing to zero here - VecT new = vec_alloc(new_size, VEC_BUF(vec)->item_type); + VecT new = vec_alloc(new_size, vec.buf->item_type); if (VEC_IS_ERROR(new)) { Py_DECREF(x); // The input vec is being consumed/stolen by this function, so on error @@ -297,19 +292,18 @@ VecT VecT_Append(VecT vec, PyObject *x, size_t item_type) { return new; } // Copy items to new vec. - memcpy(new.items, vec.items, sizeof(PyObject *) * vec.len); - memset(new.items + vec.len, 0, sizeof(PyObject *) * (new_size - vec.len)); - VecTBufObject *old_buf = VEC_BUF(vec); - if (Py_REFCNT(old_buf) > 1) { + memcpy(new.buf->items, vec.buf->items, sizeof(PyObject *) * vec.len); + memset(new.buf->items + vec.len, 0, sizeof(PyObject *) * (new_size - vec.len)); + if (Py_REFCNT(vec.buf) > 1) { // Other references to old buffer exist; INCREF items in new buffer // so old buffer keeps valid references for aliases. for (Py_ssize_t i = 0; i < vec.len; i++) - Py_XINCREF(new.items[i]); + Py_XINCREF(new.buf->items[i]); } else { // No aliases; transfer ownership by clearing old buffer items. - memset(vec.items, 0, sizeof(PyObject *) * vec.len); + memset(vec.buf->items, 0, sizeof(PyObject *) * vec.len); } - new.items[vec.len] = x; + new.buf->items[vec.len] = x; new.len = vec.len + 1; vec_track_buffer(&new); VEC_DECREF(vec); @@ -322,7 +316,7 @@ VecT VecT_Append(VecT vec, PyObject *x, size_t item_type) { VecT VecT_Extend(VecT vec, PyObject *iterable, size_t item_type) { if (VecT_Check(iterable)) { VecT src = ((VecTObject *)iterable)->vec; - if (src.items != NULL && VEC_BUF(src)->item_type == item_type) { + if (src.buf != NULL && src.buf->item_type == item_type) { return VecT_ExtendVec(vec, src, item_type); } } @@ -366,7 +360,7 @@ VecT VecT_ExtendVec(VecT dst, VecT src, size_t item_type) { return vec_error(); } Py_ssize_t new_len = dst.len + src.len; - if (dst.items == NULL) { + if (dst.buf == NULL) { // dst is empty, allocate new buf VecT new = vec_alloc(new_len, item_type); if (VEC_IS_ERROR(new)) { @@ -374,29 +368,29 @@ VecT VecT_ExtendVec(VecT dst, VecT src, size_t item_type) { return new; } for (Py_ssize_t i = 0; i < src.len; i++) { - Py_INCREF(src.items[i]); - new.items[i] = src.items[i]; + Py_INCREF(src.buf->items[i]); + new.buf->items[i] = src.buf->items[i]; } - memset(new.items + src.len, 0, sizeof(PyObject *) * (new_len - src.len)); + memset(new.buf->items + src.len, 0, sizeof(PyObject *) * (new_len - src.len)); new.len = new_len; vec_track_buffer(&new); return new; } Py_ssize_t cap = VEC_CAP(dst); - if (new_len <= cap && dst.items != src.items) { + if (new_len <= cap && dst.buf != src.buf) { // Fast path: enough capacity and no aliasing for (Py_ssize_t i = 0; i < src.len; i++) { - Py_INCREF(src.items[i]); + Py_INCREF(src.buf->items[i]); // Slot may have duplicate ref from prior remove/pop - Py_XSETREF(dst.items[dst.len + i], src.items[i]); + Py_XSETREF(dst.buf->items[dst.len + i], src.buf->items[i]); } dst.len = new_len; return dst; } // Need to reallocate (or dst and src share a buffer) Py_ssize_t new_cap = Vec_GrowCapacityTo(cap, new_len); - int aliased = dst.items == src.items; - VecT new = vec_alloc(new_cap, VEC_BUF(dst)->item_type); + int aliased = dst.buf == src.buf; + VecT new = vec_alloc(new_cap, dst.buf->item_type); if (VEC_IS_ERROR(new)) { VEC_DECREF(dst); return new; @@ -405,25 +399,24 @@ VecT VecT_ExtendVec(VecT dst, VecT src, size_t item_type) { // dst and src share a buffer -- incref all items instead of // moving refs, to avoid mutating the shared buffer for (Py_ssize_t i = 0; i < dst.len; i++) { - Py_INCREF(dst.items[i]); - new.items[i] = dst.items[i]; + Py_INCREF(dst.buf->items[i]); + new.buf->items[i] = dst.buf->items[i]; } } else { - memcpy(new.items, dst.items, sizeof(PyObject *) * dst.len); - VecTBufObject *dst_buf = VEC_BUF(dst); - if (Py_REFCNT(dst_buf) > 1) { + memcpy(new.buf->items, dst.buf->items, sizeof(PyObject *) * dst.len); + if (Py_REFCNT(dst.buf) > 1) { for (Py_ssize_t i = 0; i < dst.len; i++) - Py_XINCREF(new.items[i]); + Py_XINCREF(new.buf->items[i]); } else { - memset(dst.items, 0, sizeof(PyObject *) * dst.len); + memset(dst.buf->items, 0, sizeof(PyObject *) * dst.len); } } // Copy src items (incref each) for (Py_ssize_t i = 0; i < src.len; i++) { - Py_INCREF(src.items[i]); - new.items[dst.len + i] = src.items[i]; + Py_INCREF(src.buf->items[i]); + new.buf->items[dst.len + i] = src.buf->items[i]; } - memset(new.items + new_len, 0, sizeof(PyObject *) * (new_cap - new_len)); + memset(new.buf->items + new_len, 0, sizeof(PyObject *) * (new_cap - new_len)); new.len = new_len; vec_track_buffer(&new); VEC_DECREF(dst); @@ -438,14 +431,14 @@ PyObject *VecT_ToList(VecT v) { VEC_DECREF(v); return NULL; } - if (n > 0 && Py_REFCNT(VEC_BUF(v)) == 1) { + if (n > 0 && Py_REFCNT(v.buf) == 1) { for (Py_ssize_t i = 0; i < n; i++) { - PyList_SET_ITEM(list, i, v.items[i]); - v.items[i] = NULL; + PyList_SET_ITEM(list, i, v.buf->items[i]); + v.buf->items[i] = NULL; } } else { for (Py_ssize_t i = 0; i < n; i++) { - PyObject *item = v.items[i]; + PyObject *item = v.buf->items[i]; Py_INCREF(item); PyList_SET_ITEM(list, i, item); } @@ -462,14 +455,14 @@ PyObject *VecT_ToTuple(VecT v) { VEC_DECREF(v); return NULL; } - if (n > 0 && Py_REFCNT(VEC_BUF(v)) == 1) { + if (n > 0 && Py_REFCNT(v.buf) == 1) { for (Py_ssize_t i = 0; i < n; i++) { - PyTuple_SET_ITEM(tuple, i, v.items[i]); - v.items[i] = NULL; + PyTuple_SET_ITEM(tuple, i, v.buf->items[i]); + v.buf->items[i] = NULL; } } else { for (Py_ssize_t i = 0; i < n; i++) { - PyObject *item = v.items[i]; + PyObject *item = v.buf->items[i]; Py_INCREF(item); PyTuple_SET_ITEM(tuple, i, item); } @@ -480,7 +473,7 @@ PyObject *VecT_ToTuple(VecT v) { // Remove item from 'vec', stealing 'vec'. Return 'vec' with item removed. VecT VecT_Remove(VecT v, PyObject *arg) { - PyObject **items = v.items; + PyObject **items = v.buf->items; for (Py_ssize_t i = 0; i < v.len; i++) { int match = 0; if (items[i] == arg) @@ -534,7 +527,7 @@ VecTPopResult VecT_Pop(VecT v, Py_ssize_t index) { return result; } - PyObject **items = v.items; + PyObject **items = v.buf->items; result.f1 = items[index]; for (Py_ssize_t i = index; i < v.len - 1; i++) items[i] = items[i + 1]; @@ -550,18 +543,14 @@ VecTPopResult VecT_Pop(VecT v, Py_ssize_t index) { static int VecT_traverse(VecTObject *self, visitproc visit, void *arg) { - if (self->vec.items) - Py_VISIT(VEC_BUF(self->vec)); + Py_VISIT(self->vec.buf); return 0; } static int VecT_clear(VecTObject *self) { - if (self->vec.items) { - Py_DECREF(VEC_BUF(self->vec)); - self->vec.items = NULL; - } + Py_CLEAR(self->vec.buf); return 0; } @@ -570,7 +559,7 @@ VecT_dealloc(VecTObject *self) { PyObject_GC_UnTrack(self); Py_TRASHCAN_BEGIN(self, VecT_dealloc) - VecT_clear(self); + Py_CLEAR(self->vec.buf); Py_TYPE(self)->tp_free((PyObject *)self); Py_TRASHCAN_END } @@ -631,7 +620,7 @@ static PyMethodDef vec_methods[] = { typedef struct { PyObject_HEAD - VecT vec; // Unboxed vec (keeps buffer alive via items reference) + VecT vec; // Unboxed vec (keeps buffer alive via buf reference) Py_ssize_t index; // Current iteration index } VecTIterObject; @@ -642,7 +631,7 @@ static PyObject *VecT_iter(PyObject *self) { if (it == NULL) return NULL; it->vec = ((VecTObject *)self)->vec; - VEC_INCREF(it->vec); + Py_INCREF(it->vec.buf); it->index = 0; PyObject_GC_Track(it); return (PyObject *)it; @@ -651,43 +640,38 @@ static PyObject *VecT_iter(PyObject *self) { static int VecTIter_traverse(VecTIterObject *self, visitproc visit, void *arg) { - if (self->vec.items) - Py_VISIT(VEC_BUF(self->vec)); + Py_VISIT(self->vec.buf); return 0; } static int VecTIter_clear(VecTIterObject *self) { - if (self->vec.items) { - Py_DECREF(VEC_BUF(self->vec)); - self->vec.items = NULL; - } + Py_CLEAR(self->vec.buf); return 0; } static void VecTIter_dealloc(VecTIterObject *self) { PyObject_GC_UnTrack(self); - VEC_DECREF(self->vec); + Py_XDECREF(self->vec.buf); PyObject_GC_Del(self); } static PyObject *VecTIter_next(VecTIterObject *self) { - if (self->vec.items == NULL) + if (self->vec.buf == NULL) return NULL; if (self->index < self->vec.len) { - PyObject *item = self->vec.items[self->index]; + PyObject *item = self->vec.buf->items[self->index]; self->index++; Py_INCREF(item); return item; } - VEC_DECREF(self->vec); - self->vec.items = NULL; + Py_CLEAR(self->vec.buf); return NULL; // StopIteration } static PyObject *VecTIter_len(VecTIterObject *self, PyObject *Py_UNUSED(ignored)) { - if (self->vec.items == NULL) + if (self->vec.buf == NULL) return PyLong_FromSsize_t(0); Py_ssize_t remaining = self->vec.len - self->index; if (remaining < 0) @@ -760,15 +744,15 @@ static inline VecT vec_from_sequence( PyObject *item = is_list ? PyList_GET_ITEM(seq, i) : PyTuple_GET_ITEM(seq, i); if (!VecT_ItemCheck(v, item, item_type)) { for (Py_ssize_t j = i; j < alloc_size; j++) - v.items[j] = NULL; + v.buf->items[j] = NULL; VEC_DECREF(v); return vec_error(); } Py_INCREF(item); - v.items[i] = item; + v.buf->items[i] = item; } for (Py_ssize_t j = n; j < alloc_size; j++) - v.items[j] = NULL; + v.buf->items[j] = NULL; vec_track_buffer(&v); v.len = n; return v; @@ -786,7 +770,7 @@ VecT VecT_FromIterable(size_t item_type, PyObject *iterable, int64_t cap) { return vec_error(); if (cap > 0) { for (int64_t i = 0; i < cap; i++) - v.items[i] = NULL; + v.buf->items[i] = NULL; } v.len = 0; vec_track_buffer(&v); @@ -837,3 +821,5 @@ VecTAPI Vec_TAPI = { VecT_ToList, VecT_ToTuple, }; + +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs/vec_template.c b/mypyc/lib-rt/vecs/vec_template.c index b3e80261fec38..cdbe211874822 100644 --- a/mypyc/lib-rt/vecs/vec_template.c +++ b/mypyc/lib-rt/vecs/vec_template.c @@ -1,3 +1,4 @@ +#ifdef MYPYC_EXPERIMENTAL // NOTE: This file can't be compiled on its own, it must be #included // with certain #defines set, as described below. // @@ -30,11 +31,6 @@ #include "vecs_internal.h" #include "mypyc_util.h" -#define VEC_BUF(v) ((BUF_OBJECT *)((char *)(v).items - offsetof(BUF_OBJECT, items))) -#define VEC_CAP(v) (VEC_BUF(v)->ob_base.ob_size) -#define VEC_INCREF(v) do { if ((v).items) Py_INCREF(VEC_BUF(v)); } while (0) -#define VEC_DECREF(v) do { if ((v).items) Py_DECREF(VEC_BUF(v)); } while (0) - inline static VEC vec_error() { VEC v = { .len = -1 }; return v; @@ -52,15 +48,12 @@ static VEC vec_alloc(Py_ssize_t size) if (buf == NULL) return vec_error(); } - VEC res = { .items = (buf != NULL) ? buf->items : NULL }; + VEC res = { .buf = buf }; return res; } static void vec_dealloc(VEC_OBJECT *self) { - if (self->vec.items) { - Py_DECREF(VEC_BUF(self->vec)); - self->vec.items = NULL; - } + Py_CLEAR(self->vec.buf); PyObject_Del(self); } @@ -87,7 +80,7 @@ VEC FUNC(Unbox)(PyObject *obj) { } VEC FUNC(ConvertFromNested)(VecNestedBufItem item) { - return (VEC) { item.len, (ITEM_C_TYPE *)item.items }; + return (VEC) { item.len, (BUF_OBJECT *)item.buf }; } VEC FUNC(New)(Py_ssize_t size, Py_ssize_t cap) { @@ -101,7 +94,7 @@ VEC FUNC(New)(Py_ssize_t size, Py_ssize_t cap) { if (VEC_IS_ERROR(vec)) return vec; for (Py_ssize_t i = 0; i < cap; i++) { - vec.items[i] = 0; + vec.buf->items[i] = 0; } vec.len = size; return vec; @@ -155,7 +148,7 @@ static inline VEC vec_from_sequence(PyObject *seq, int64_t cap, const int is_lis VEC_DECREF(v); return vec_error(); } - v.items[i] = x; + v.buf->items[i] = x; } v.len = n; return v; @@ -174,7 +167,7 @@ VEC FUNC(FromIterable)(PyObject *iterable, int64_t cap) { if (VEC_IS_ERROR(v)) return vec_error(); if (n > 0) - memcpy(v.items, PyBytes_AS_STRING(iterable), n); + memcpy(v.buf->items, PyBytes_AS_STRING(iterable), n); v.len = n; return v; } @@ -193,7 +186,7 @@ VEC FUNC(FromIterable)(PyObject *iterable, int64_t cap) { return vec_error(); } if (n > 0) { - memcpy(v.items, view.buf, n * sizeof(ITEM_C_TYPE)); + memcpy(v.buf->items, view.buf, n * sizeof(ITEM_C_TYPE)); } v.len = n; PyBuffer_Release(&view); @@ -211,7 +204,7 @@ VEC FUNC(FromIterable)(PyObject *iterable, int64_t cap) { if (VEC_IS_ERROR(v)) return vec_error(); if (cap > 0) { - memset(v.items, 0, sizeof(ITEM_C_TYPE) * cap); + memset(v.buf->items, 0, sizeof(ITEM_C_TYPE) * cap); } v.len = 0; @@ -272,9 +265,9 @@ static PyObject *vec_repr(PyObject *self) { static PyObject *vec_get_item(PyObject *o, Py_ssize_t i) { VEC v = ((VEC_OBJECT *)o)->vec; if ((size_t)i < (size_t)v.len) { - return BOX_ITEM(v.items[i]); + return BOX_ITEM(v.buf->items[i]); } else if ((size_t)i + (size_t)v.len < (size_t)v.len) { - return BOX_ITEM(v.items[i + v.len]); + return BOX_ITEM(v.buf->items[i + v.len]); } else { PyErr_SetString(PyExc_IndexError, "index out of range"); return NULL; @@ -300,7 +293,7 @@ VEC FUNC(Slice)(VEC vec, int64_t start, int64_t end) { return res; res.len = slicelength; for (Py_ssize_t i = 0; i < slicelength; i++) - res.items[i] = vec.items[start + i]; + res.buf->items[i] = vec.buf->items[start + i]; return res; } @@ -311,9 +304,9 @@ static PyObject *vec_subscript(PyObject *self, PyObject *item) { if (i == -1 && PyErr_Occurred()) return NULL; if ((size_t)i < (size_t)vec.len) { - return BOX_ITEM(vec.items[i]); + return BOX_ITEM(vec.buf->items[i]); } else if ((size_t)i + (size_t)vec.len < (size_t)vec.len) { - return BOX_ITEM(vec.items[i + vec.len]); + return BOX_ITEM(vec.buf->items[i + vec.len]); } else { PyErr_SetString(PyExc_IndexError, "index out of range"); return NULL; @@ -329,7 +322,7 @@ static PyObject *vec_subscript(PyObject *self, PyObject *item) { res.len = slicelength; Py_ssize_t j = start; for (Py_ssize_t i = 0; i < slicelength; i++) { - res.items[i] = vec.items[j]; + res.buf->items[i] = vec.buf->items[j]; j += step; } return FUNC(Box)(res); @@ -346,10 +339,10 @@ static int vec_ass_item(PyObject *self, Py_ssize_t i, PyObject *o) { return -1; VEC v = ((VEC_OBJECT *)self)->vec; if ((size_t)i < (size_t)v.len) { - v.items[i] = x; + v.buf->items[i] = x; return 0; } else if ((size_t)i + (size_t)v.len < (size_t)v.len) { - v.items[i + v.len] = x; + v.buf->items[i + v.len] = x; return 0; } else { PyErr_SetString(PyExc_IndexError, "index out of range"); @@ -365,7 +358,7 @@ static int vec_contains(PyObject *self, PyObject *value) { // Fall back to boxed comparison (e.g. 2.0 == 2) VEC v = ((VEC_OBJECT *)self)->vec; for (Py_ssize_t i = 0; i < v.len; i++) { - PyObject *boxed = BOX_ITEM(v.items[i]); + PyObject *boxed = BOX_ITEM(v.buf->items[i]); if (boxed == NULL) return -1; int cmp = PyObject_RichCompareBool(boxed, value, Py_EQ); @@ -377,7 +370,7 @@ static int vec_contains(PyObject *self, PyObject *value) { } VEC v = ((VEC_OBJECT *)self)->vec; for (Py_ssize_t i = 0; i < v.len; i++) { - if (v.items[i] == x) + if (v.buf->items[i] == x) return 1; } return 0; @@ -400,7 +393,7 @@ static PyObject *vec_richcompare(PyObject *self, PyObject *other, int op) { cmp = 0; } else { for (Py_ssize_t i = 0; i < x.len; i++) { - if (x.items[i] != y.items[i]) { + if (x.buf->items[i] != y.buf->items[i]) { cmp = 0; break; } @@ -418,12 +411,12 @@ static PyObject *vec_richcompare(PyObject *self, PyObject *other, int op) { // Append item to 'vec', stealing 'vec'. Return 'vec' with item appended. VEC FUNC(Append)(VEC vec, ITEM_C_TYPE x) { - if (vec.items && vec.len < VEC_CAP(vec)) { - vec.items[vec.len] = x; + if (vec.buf && vec.len < VEC_CAP(vec)) { + vec.buf->items[vec.len] = x; vec.len++; return vec; } else { - Py_ssize_t cap = vec.items ? VEC_CAP(vec) : 0; + Py_ssize_t cap = vec.buf ? VEC_CAP(vec) : 0; Py_ssize_t new_size = Vec_GrowCapacity(cap); VEC new = vec_alloc(new_size); if (VEC_IS_ERROR(new)) { @@ -434,9 +427,9 @@ VEC FUNC(Append)(VEC vec, ITEM_C_TYPE x) { } new.len = vec.len + 1; if (vec.len > 0) - memcpy(new.items, vec.items, sizeof(ITEM_C_TYPE) * vec.len); - new.items[vec.len] = x; - VEC_DECREF(vec); + memcpy(new.buf->items, vec.buf->items, sizeof(ITEM_C_TYPE) * vec.len); + new.buf->items[vec.len] = x; + Py_XDECREF(vec.buf); return new; } } @@ -463,9 +456,9 @@ inline static VEC vec_extend_items( return vec_error(); } Py_ssize_t new_len = dst.len + n; - Py_ssize_t cap = dst.items ? VEC_CAP(dst) : 0; + Py_ssize_t cap = dst.buf ? VEC_CAP(dst) : 0; if (!force_alloc && new_len <= cap) { - memcpy(dst.items + dst.len, items, sizeof(ITEM_C_TYPE) * n); + memcpy(dst.buf->items + dst.len, items, sizeof(ITEM_C_TYPE) * n); dst.len = new_len; return dst; } @@ -476,10 +469,10 @@ inline static VEC vec_extend_items( return vec_error(); } if (dst.len > 0) - memcpy(new.items, dst.items, sizeof(ITEM_C_TYPE) * dst.len); - memcpy(new.items + dst.len, items, sizeof(ITEM_C_TYPE) * n); + memcpy(new.buf->items, dst.buf->items, sizeof(ITEM_C_TYPE) * dst.len); + memcpy(new.buf->items + dst.len, items, sizeof(ITEM_C_TYPE) * n); new.len = new_len; - VEC_DECREF(dst); + Py_XDECREF(dst.buf); return new; } @@ -508,10 +501,10 @@ VEC FUNC(Extend)(VEC vec, PyObject *iterable) { Py_ssize_t n = view.len / (Py_ssize_t)sizeof(ITEM_C_TYPE); if (n > 0) { Py_ssize_t dst_bytes = n * (Py_ssize_t)sizeof(ITEM_C_TYPE); - int force_alloc = vec.items != NULL + int force_alloc = vec.buf != NULL && n <= VEC_CAP(vec) - vec.len && vec_memory_overlaps(view.buf, view.len, - vec.items + vec.len, dst_bytes); + vec.buf->items + vec.len, dst_bytes); vec = vec_extend_items(vec, (const ITEM_C_TYPE *)view.buf, n, force_alloc); } PyBuffer_Release(&view); @@ -552,7 +545,7 @@ VEC FUNC(Extend)(VEC vec, PyObject *iterable) { VEC FUNC(ExtendVec)(VEC dst, VEC src) { if (src.len == 0) return dst; - return vec_extend_items(dst, src.items, src.len, dst.items == src.items); + return vec_extend_items(dst, src.buf->items, src.len, dst.buf == src.buf); } // Convert vec to list, stealing 'v'. @@ -564,7 +557,7 @@ PyObject *FUNC(ToList)(VEC v) { return NULL; } for (Py_ssize_t i = 0; i < n; i++) { - PyObject *item = BOX_ITEM(v.items[i]); + PyObject *item = BOX_ITEM(v.buf->items[i]); if (item == NULL) { Py_DECREF(list); VEC_DECREF(v); @@ -585,7 +578,7 @@ PyObject *FUNC(ToTuple)(VEC v) { return NULL; } for (Py_ssize_t i = 0; i < n; i++) { - PyObject *item = BOX_ITEM(v.items[i]); + PyObject *item = BOX_ITEM(v.buf->items[i]); if (item == NULL) { Py_DECREF(tuple); VEC_DECREF(v); @@ -600,9 +593,9 @@ PyObject *FUNC(ToTuple)(VEC v) { // Remove item from 'vec', stealing 'vec'. Return 'vec' with item removed. VEC FUNC(Remove)(VEC v, ITEM_C_TYPE x) { for (Py_ssize_t i = 0; i < v.len; i++) { - if (v.items[i] == x) { + if (v.buf->items[i] == x) { for (; i < v.len - 1; i++) { - v.items[i] = v.items[i + 1]; + v.buf->items[i] = v.buf->items[i + 1]; } v.len--; // Return the stolen reference without INCREF @@ -633,9 +626,9 @@ NAME(PopResult) FUNC(Pop)(VEC v, Py_ssize_t index) { return result; } - result.f1 = v.items[index]; + result.f1 = v.buf->items[index]; for (Py_ssize_t i = index; i < v.len - 1; i++) { - v.items[i] = v.items[i + 1]; + v.buf->items[i] = v.buf->items[i + 1]; } v.len--; @@ -664,7 +657,7 @@ static int vec_getbuffer(VEC_OBJECT *self, Py_buffer *view, int flags) { view->obj = (PyObject *)self; Py_INCREF(self); - view->buf = (void *)self->vec.items; + view->buf = (self->vec.buf != NULL) ? (void *)self->vec.buf->items : NULL; view->len = self->vec.len * (Py_ssize_t)sizeof(ITEM_C_TYPE); view->readonly = 1; view->itemsize = sizeof(ITEM_C_TYPE); @@ -703,7 +696,7 @@ static PyMethodDef vec_methods[] = { typedef struct { PyObject_HEAD - VEC vec; // Unboxed vec (keeps buffer alive via items reference) + VEC vec; // Unboxed vec (keeps buffer alive via buf reference) Py_ssize_t index; // Current iteration index } NAME(IterObject); @@ -714,33 +707,32 @@ static PyObject *vec_iter(PyObject *self) { if (it == NULL) return NULL; it->vec = ((VEC_OBJECT *)self)->vec; - VEC_INCREF(it->vec); + Py_XINCREF(it->vec.buf); it->index = 0; return (PyObject *)it; } static void vec_iter_dealloc(NAME(IterObject) *self) { - VEC_DECREF(self->vec); + Py_XDECREF(self->vec.buf); PyObject_Del(self); } static PyObject *vec_iter_next(NAME(IterObject) *self) { - if (self->vec.items == NULL) + if (self->vec.buf == NULL) return NULL; if (self->index < self->vec.len) { - PyObject *item = BOX_ITEM(self->vec.items[self->index]); + PyObject *item = BOX_ITEM(self->vec.buf->items[self->index]); if (item == NULL) return NULL; self->index++; return item; } - VEC_DECREF(self->vec); - self->vec.items = NULL; + Py_CLEAR(self->vec.buf); return NULL; // StopIteration } static PyObject *vec_iter_len(NAME(IterObject) *self, PyObject *Py_UNUSED(ignored)) { - if (self->vec.items == NULL) + if (self->vec.buf == NULL) return PyLong_FromSsize_t(0); Py_ssize_t remaining = self->vec.len - self->index; if (remaining < 0) @@ -816,7 +808,4 @@ NAME(API) FEATURES = { FUNC(ToTuple), }; -#undef VEC_BUF -#undef VEC_CAP -#undef VEC_INCREF -#undef VEC_DECREF +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs/vec_u8.c b/mypyc/lib-rt/vecs/vec_u8.c index f2942689e95b6..f5c82824790fd 100644 --- a/mypyc/lib-rt/vecs/vec_u8.c +++ b/mypyc/lib-rt/vecs/vec_u8.c @@ -1,3 +1,4 @@ +#ifdef MYPYC_EXPERIMENTAL #define VEC VecU8 #define VEC_TYPE VecU8Type #define VEC_OBJECT VecU8Object @@ -17,3 +18,5 @@ #define BUFFER_FORMAT "B" #include "vec_template.c" + +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs_extra_ops.c b/mypyc/lib-rt/vecs_extra_ops.c index a5da116c8a352..564eae2c9fcb5 100644 --- a/mypyc/lib-rt/vecs_extra_ops.c +++ b/mypyc/lib-rt/vecs_extra_ops.c @@ -3,4 +3,8 @@ #include "vecs_extra_ops.h" +#ifdef MYPYC_EXPERIMENTAL + // All operations are currently implemented as inline functions in vecs_extra_ops.h + +#endif // MYPYC_EXPERIMENTAL diff --git a/mypyc/lib-rt/vecs_extra_ops.h b/mypyc/lib-rt/vecs_extra_ops.h index 4c31d7cd34488..561bf0a1b6524 100644 --- a/mypyc/lib-rt/vecs_extra_ops.h +++ b/mypyc/lib-rt/vecs_extra_ops.h @@ -1,6 +1,8 @@ #ifndef VECS_EXTRA_OPS_H #define VECS_EXTRA_OPS_H +#ifdef MYPYC_EXPERIMENTAL + #include "vecs/librt_vecs_api.h" // Check if obj is an instance of vec (any vec type) @@ -12,7 +14,9 @@ static inline PyObject *CPyVecU8_ToBytes(VecU8 v) { if (v.len == 0) { return PyBytes_FromStringAndSize(NULL, 0); } - return PyBytes_FromStringAndSize((const char *)v.items, v.len); + return PyBytes_FromStringAndSize((const char *)v.buf->items, v.len); } +#endif // MYPYC_EXPERIMENTAL + #endif diff --git a/mypyc/primitives/librt_random_ops.py b/mypyc/primitives/librt_random_ops.py deleted file mode 100644 index 6aaee84ecd0d6..0000000000000 --- a/mypyc/primitives/librt_random_ops.py +++ /dev/null @@ -1,104 +0,0 @@ -from mypyc.ir.deps import LIBRT_RANDOM -from mypyc.ir.ops import ERR_MAGIC, ERR_NEVER -from mypyc.ir.rtypes import float_rprimitive, int64_rprimitive, random_rprimitive -from mypyc.primitives.registry import function_op, method_op - -# Random() -- construct with OS entropy -function_op( - name="librt.random.Random", - arg_types=[], - return_type=random_rprimitive, - c_function_name="LibRTRandom_Random_internal", - error_kind=ERR_MAGIC, - dependencies=[LIBRT_RANDOM], -) - -# Random(seed) -- construct with integer seed -function_op( - name="librt.random.Random", - arg_types=[int64_rprimitive], - return_type=random_rprimitive, - c_function_name="LibRTRandom_Random_from_seed_internal", - error_kind=ERR_MAGIC, - dependencies=[LIBRT_RANDOM], -) - -# Random.randint(a, b) -- return random integer in [a, b] -method_op( - name="randint", - arg_types=[random_rprimitive, int64_rprimitive, int64_rprimitive], - return_type=int64_rprimitive, - c_function_name="LibRTRandom_Random_randint_internal", - error_kind=ERR_MAGIC, - dependencies=[LIBRT_RANDOM], -) - -# Random.randrange(stop) -- return random integer in [0, stop) -method_op( - name="randrange", - arg_types=[random_rprimitive, int64_rprimitive], - return_type=int64_rprimitive, - c_function_name="LibRTRandom_Random_randrange1_internal", - error_kind=ERR_MAGIC, - dependencies=[LIBRT_RANDOM], -) - -# Random.randrange(start, stop) -- return random integer in [start, stop) -method_op( - name="randrange", - arg_types=[random_rprimitive, int64_rprimitive, int64_rprimitive], - return_type=int64_rprimitive, - c_function_name="LibRTRandom_Random_randrange2_internal", - error_kind=ERR_MAGIC, - dependencies=[LIBRT_RANDOM], -) - -# Random.random() -- return random float in [0.0, 1.0) -method_op( - name="random", - arg_types=[random_rprimitive], - return_type=float_rprimitive, - c_function_name="LibRTRandom_Random_random_internal", - error_kind=ERR_NEVER, - dependencies=[LIBRT_RANDOM], -) - -# Module-level random() -- return random float using thread-local RNG -function_op( - name="librt.random.random", - arg_types=[], - return_type=float_rprimitive, - c_function_name="LibRTRandom_module_random_internal", - error_kind=ERR_MAGIC, - dependencies=[LIBRT_RANDOM], -) - -# Module-level randrange(stop) -- return random integer using thread-local RNG -function_op( - name="librt.random.randrange", - arg_types=[int64_rprimitive], - return_type=int64_rprimitive, - c_function_name="LibRTRandom_module_randrange1_internal", - error_kind=ERR_MAGIC, - dependencies=[LIBRT_RANDOM], -) - -# Module-level randrange(start, stop) -- return random integer using thread-local RNG -function_op( - name="librt.random.randrange", - arg_types=[int64_rprimitive, int64_rprimitive], - return_type=int64_rprimitive, - c_function_name="LibRTRandom_module_randrange2_internal", - error_kind=ERR_MAGIC, - dependencies=[LIBRT_RANDOM], -) - -# Module-level randint(a, b) -- return random integer using thread-local RNG -function_op( - name="librt.random.randint", - arg_types=[int64_rprimitive, int64_rprimitive], - return_type=int64_rprimitive, - c_function_name="LibRTRandom_module_randint_internal", - error_kind=ERR_MAGIC, - dependencies=[LIBRT_RANDOM], -) diff --git a/mypyc/primitives/librt_vecs_ops.py b/mypyc/primitives/librt_vecs_ops.py index e4852d5387069..57769c77be929 100644 --- a/mypyc/primitives/librt_vecs_ops.py +++ b/mypyc/primitives/librt_vecs_ops.py @@ -16,6 +16,7 @@ return_type=bit_rprimitive, c_function_name="CPyVec_Check", error_kind=ERR_NEVER, + experimental=True, dependencies=[LIBRT_VECS, VECS_EXTRA_OPS], ) @@ -26,5 +27,6 @@ return_type=bytes_rprimitive, c_function_name="CPyVecU8_ToBytes", error_kind=ERR_MAGIC, + experimental=True, dependencies=[LIBRT_VECS, VECS_EXTRA_OPS], ) diff --git a/mypyc/primitives/registry.py b/mypyc/primitives/registry.py index e22a044d9bb27..c04b4ff65a757 100644 --- a/mypyc/primitives/registry.py +++ b/mypyc/primitives/registry.py @@ -403,7 +403,6 @@ def load_global_op(name: str, type: RType, src: str) -> LoadAddressDescription: import mypyc.primitives.dict_ops import mypyc.primitives.float_ops import mypyc.primitives.int_ops -import mypyc.primitives.librt_random_ops import mypyc.primitives.librt_strings_ops import mypyc.primitives.librt_time_ops import mypyc.primitives.librt_vecs_ops diff --git a/mypyc/test-data/capsule-deps.test b/mypyc/test-data/capsule-deps.test index 263e2a4b6ec40..00252dd2de6ec 100644 --- a/mypyc/test-data/capsule-deps.test +++ b/mypyc/test-data/capsule-deps.test @@ -79,7 +79,7 @@ def f() -> bytes: Capsule(name='librt.base64') Capsule(name='librt.strings') -[case testVecCapsuleDepInFunction] +[case testVecCapsuleDepInFunction_experimental] from librt.vecs import vec def f() -> None: @@ -87,7 +87,7 @@ def f() -> None: [out] Capsule(name='librt.vecs') -[case testVecCapsuleDepInMethod] +[case testVecCapsuleDepInMethod_experimental] from librt.vecs import vec class C: @@ -96,14 +96,14 @@ class C: [out] Capsule(name='librt.vecs') -[case testVecCapsuleDepAtTopLevel] +[case testVecCapsuleDepAtTopLevel_experimental] from librt.vecs import vec vec[str]() [out] Capsule(name='librt.vecs') -[case testVecCapsuleDepInAttributeType] +[case testVecCapsuleDepInAttributeType_experimental] from librt.vecs import vec class C: diff --git a/mypyc/test-data/irbuild-librt-random.test b/mypyc/test-data/irbuild-librt-random.test deleted file mode 100644 index 9215c13c88d6e..0000000000000 --- a/mypyc/test-data/irbuild-librt-random.test +++ /dev/null @@ -1,119 +0,0 @@ -[case testLibrtRandomConstructor_64bit] -from librt.random import Random - -def make_random() -> Random: - return Random() -[out] -def make_random(): - r0 :: librt.random.Random -L0: - r0 = LibRTRandom_Random_internal() - return r0 - -[case testLibrtRandomConstructorWithSeed_64bit] -from librt.random import Random -from mypy_extensions import i64 - -def make_random_seeded(n: i64) -> Random: - return Random(n) -[out] -def make_random_seeded(n): - n :: i64 - r0 :: librt.random.Random -L0: - r0 = LibRTRandom_Random_from_seed_internal(n) - return r0 - -[case testLibrtRandomRandrange_64bit] -from librt.random import Random -from mypy_extensions import i64 - -def randrange1(r: Random, stop: i64) -> i64: - return r.randrange(stop) -def randrange2(r: Random, start: i64, stop: i64) -> i64: - return r.randrange(start, stop) -[out] -def randrange1(r, stop): - r :: librt.random.Random - stop, r0 :: i64 -L0: - r0 = LibRTRandom_Random_randrange1_internal(r, stop) - return r0 -def randrange2(r, start, stop): - r :: librt.random.Random - start, stop, r0 :: i64 -L0: - r0 = LibRTRandom_Random_randrange2_internal(r, start, stop) - return r0 - -[case testLibrtRandomRandint_64bit] -from librt.random import Random -from mypy_extensions import i64 - -def randint(r: Random, a: i64, b: i64) -> i64: - return r.randint(a, b) -[out] -def randint(r, a, b): - r :: librt.random.Random - a, b, r0 :: i64 -L0: - r0 = LibRTRandom_Random_randint_internal(r, a, b) - return r0 - -[case testLibrtRandomRandom_64bit] -from librt.random import Random - -def rand(r: Random) -> float: - return r.random() -[out] -def rand(r): - r :: librt.random.Random - r0 :: float -L0: - r0 = LibRTRandom_Random_random_internal(r) - return r0 - -[case testLibrtRandomModuleRandom_64bit] -from librt.random import random - -def module_random() -> float: - return random() -[out] -def module_random(): - r0 :: float -L0: - r0 = LibRTRandom_module_random_internal() - return r0 - -[case testLibrtRandomModuleRandint_64bit] -from librt.random import randint -from mypy_extensions import i64 - -def module_randint(a: i64, b: i64) -> i64: - return randint(a, b) -[out] -def module_randint(a, b): - a, b, r0 :: i64 -L0: - r0 = LibRTRandom_module_randint_internal(a, b) - return r0 - -[case testLibrtRandomModuleRandrange_64bit] -from librt.random import randrange -from mypy_extensions import i64 - -def module_randrange1(stop: i64) -> i64: - return randrange(stop) -def module_randrange2(start: i64, stop: i64) -> i64: - return randrange(start, stop) -[out] -def module_randrange1(stop): - stop, r0 :: i64 -L0: - r0 = LibRTRandom_module_randrange1_internal(stop) - return r0 -def module_randrange2(start, stop): - start, stop, r0 :: i64 -L0: - r0 = LibRTRandom_module_randrange2_internal(start, stop) - return r0 diff --git a/mypyc/test-data/irbuild-vec-i64.test b/mypyc/test-data/irbuild-vec-i64.test index aeab3ed9f2a8b..37603cdd40849 100644 --- a/mypyc/test-data/irbuild-vec-i64.test +++ b/mypyc/test-data/irbuild-vec-i64.test @@ -65,10 +65,11 @@ def f(v, i): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: i64 + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: i64 L0: r0 = v.len r1 = i < r0 :: unsigned @@ -86,12 +87,13 @@ L3: L4: r5 = i L5: - r6 = v.items - r7 = r5 * 8 - r8 = r6 + r7 - r9 = load_mem r8 :: i64* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecI64BufObject + r8 = r5 * 8 + r9 = r7 + r8 + r10 = load_mem r9 :: i64* keep_alive v - return r9 + return r10 [case testVecI64GetItem_32bit] # The IR is quite verbose, but it's acceptable since 32-bit targets are not common any more @@ -113,10 +115,11 @@ def f(v, i): r6 :: i64 r7, r8 :: bit r9 :: native_int - r10 :: ptr - r11 :: native_int - r12 :: ptr - r13 :: i64 + r10 :: object + r11 :: ptr + r12 :: native_int + r13 :: ptr + r14 :: i64 L0: r0 = v.len r1 = extend signed r0: native_int to i64 @@ -147,12 +150,13 @@ L8: CPyInt32_Overflow() unreachable L9: - r10 = v.items - r11 = r9 * 8 - r12 = r10 + r11 - r13 = load_mem r12 :: i64* + r10 = v.buf + r11 = get_element_ptr r10 items :: VecI64BufObject + r12 = r9 * 8 + r13 = r11 + r12 + r14 = load_mem r13 :: i64* keep_alive v - return r13 + return r14 [case testVecI64Append] from librt.vecs import vec, append @@ -214,9 +218,10 @@ def f(v, i, x): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr L0: r0 = v.len r1 = i < r0 :: unsigned @@ -234,10 +239,11 @@ L3: L4: r5 = i L5: - r6 = v.items - r7 = r5 * 8 - r8 = r6 + r7 - set_mem r8, x :: i64* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecI64BufObject + r8 = r5 * 8 + r9 = r7 + r8 + set_mem r9, x :: i64* keep_alive v return 1 @@ -250,16 +256,18 @@ def f() -> vec[i64]: [out] def f(): r0 :: vec[i64] - r1, r2, r3, r4 :: ptr + r1 :: object + r2, r3, r4, r5 :: ptr L0: r0 = VecI64Api.alloc(3, 3) - r1 = r0.items - set_mem r1, 1 :: i64* - r2 = r1 + 8 - set_mem r2, 5 :: i64* + r1 = r0.buf + r2 = get_element_ptr r1 items :: VecI64BufObject + set_mem r2, 1 :: i64* r3 = r2 + 8 - set_mem r3, 14 :: i64* + set_mem r3, 5 :: i64* r4 = r3 + 8 + set_mem r4, 14 :: i64* + r5 = r4 + 8 keep_alive r0 return r0 @@ -273,24 +281,26 @@ def f(n: i64) -> vec[i64]: def f(n): n :: i64 r0 :: vec[i64] - r1 :: ptr - r2 :: i64 - r3, r4 :: ptr - r5 :: bit - r6 :: ptr + r1 :: object + r2 :: ptr + r3 :: i64 + r4, r5 :: ptr + r6 :: bit + r7 :: ptr L0: r0 = VecI64Api.alloc(n, n) - r1 = r0.items - r2 = n * 8 - r3 = r1 + r2 - r4 = r1 + r1 = r0.buf + r2 = get_element_ptr r1 items :: VecI64BufObject + r3 = n * 8 + r4 = r2 + r3 + r5 = r2 L1: - r5 = r4 < r3 :: unsigned - if r5 goto L2 else goto L3 :: bool + r6 = r5 < r4 :: unsigned + if r6 goto L2 else goto L3 :: bool L2: - set_mem r4, 3 :: i64* - r6 = r4 + 8 - r4 = r6 + set_mem r5, 3 :: i64* + r7 = r5 + 8 + r5 = r7 goto L1 L3: keep_alive r0 @@ -306,24 +316,26 @@ def f(n: i64, x: i64) -> vec[i64]: def f(n, x): n, x :: i64 r0 :: vec[i64] - r1 :: ptr - r2 :: native_int - r3, r4 :: ptr - r5 :: bit - r6 :: ptr + r1 :: object + r2 :: ptr + r3 :: native_int + r4, r5 :: ptr + r6 :: bit + r7 :: ptr L0: r0 = VecI64Api.alloc(3, 3) - r1 = r0.items - r2 = 3 * 8 - r3 = r1 + r2 - r4 = r1 + r1 = r0.buf + r2 = get_element_ptr r1 items :: VecI64BufObject + r3 = 3 * 8 + r4 = r2 + r3 + r5 = r2 L1: - r5 = r4 < r3 :: unsigned - if r5 goto L2 else goto L3 :: bool + r6 = r5 < r4 :: unsigned + if r6 goto L2 else goto L3 :: bool L2: - set_mem r4, x :: i64* - r6 = r4 + 8 - r4 = r6 + set_mem r5, x :: i64* + r7 = r5 + 8 + r5 = r7 goto L1 L3: keep_alive r0 @@ -382,10 +394,11 @@ def f(n, l): r4 :: bit r5 :: object r6, x, r7 :: i64 - r8 :: ptr - r9 :: native_int - r10 :: ptr - r11 :: native_int + r8 :: object + r9 :: ptr + r10 :: native_int + r11 :: ptr + r12 :: native_int L0: r0 = var_object_size l r1 = VecI64Api.alloc(r0, r0) @@ -399,14 +412,15 @@ L2: r6 = unbox(i64, r5) x = r6 r7 = x + 1 - r8 = r1.items - r9 = r2 * 8 - r10 = r8 + r9 - set_mem r10, r7 :: i64* + r8 = r1.buf + r9 = get_element_ptr r8 items :: VecI64BufObject + r10 = r2 * 8 + r11 = r9 + r10 + set_mem r11, r7 :: i64* keep_alive r1 L3: - r11 = r2 + 1 - r2 = r11 + r12 = r2 + 1 + r2 = r12 goto L1 L4: return r1 @@ -426,14 +440,16 @@ def f(n, v): r1 :: vec[i64] r2, r3 :: native_int r4 :: bit - r5 :: ptr - r6 :: native_int - r7 :: ptr - r8, x, r9 :: i64 - r10 :: ptr - r11 :: native_int + r5 :: object + r6 :: ptr + r7 :: native_int + r8 :: ptr + r9, x, r10 :: i64 + r11 :: object r12 :: ptr r13 :: native_int + r14 :: ptr + r15 :: native_int L0: r0 = v.len r1 = VecI64Api.alloc(r0, r0) @@ -443,21 +459,23 @@ L1: r4 = r2 < r3 :: signed if r4 goto L2 else goto L4 :: bool L2: - r5 = v.items - r6 = r2 * 8 - r7 = r5 + r6 - r8 = load_mem r7 :: i64* - x = r8 + r5 = v.buf + r6 = get_element_ptr r5 items :: VecI64BufObject + r7 = r2 * 8 + r8 = r6 + r7 + r9 = load_mem r8 :: i64* + x = r9 keep_alive v - r9 = x + 1 - r10 = r1.items - r11 = r2 * 8 - r12 = r10 + r11 - set_mem r12, r9 :: i64* + r10 = x + 1 + r11 = r1.buf + r12 = get_element_ptr r11 items :: VecI64BufObject + r13 = r2 * 8 + r14 = r12 + r13 + set_mem r14, r10 :: i64* keep_alive r1 L3: - r13 = r2 + 1 - r2 = r13 + r15 = r2 + 1 + r2 = r15 goto L1 L4: return r1 @@ -514,11 +532,12 @@ def f(v): t :: i64 r0, r1 :: native_int r2 :: bit - r3 :: ptr - r4 :: native_int - r5 :: ptr - r6, x, r7 :: i64 - r8 :: native_int + r3 :: object + r4 :: ptr + r5 :: native_int + r6 :: ptr + r7, x, r8 :: i64 + r9 :: native_int L0: t = 0 r0 = 0 @@ -527,17 +546,18 @@ L1: r2 = r0 < r1 :: signed if r2 goto L2 else goto L4 :: bool L2: - r3 = v.items - r4 = r0 * 8 - r5 = r3 + r4 - r6 = load_mem r5 :: i64* - x = r6 + r3 = v.buf + r4 = get_element_ptr r3 items :: VecI64BufObject + r5 = r0 * 8 + r6 = r4 + r5 + r7 = load_mem r6 :: i64* + x = r7 keep_alive v - r7 = t + 1 - t = r7 + r8 = t + 1 + t = r8 L3: - r8 = r0 + 1 - r0 = r8 + r9 = r0 + 1 + r0 = r9 goto L1 L4: return t @@ -553,39 +573,41 @@ def contains(v, n): v :: vec[i64] n :: i64 r0 :: native_int - r1 :: ptr - r2 :: native_int - r3, r4 :: ptr - r5 :: bit - r6 :: i64 - r7 :: bit - r8 :: ptr - r9 :: bool + r1 :: object + r2 :: ptr + r3 :: native_int + r4, r5 :: ptr + r6 :: bit + r7 :: i64 + r8 :: bit + r9 :: ptr + r10 :: bool L0: r0 = v.len - r1 = v.items - r2 = r0 * 8 - r3 = r1 + r2 - r4 = r1 + r1 = v.buf + r2 = get_element_ptr r1 items :: VecI64BufObject + r3 = r0 * 8 + r4 = r2 + r3 + r5 = r2 L1: - r5 = r4 < r3 :: unsigned - if r5 goto L2 else goto L4 :: bool + r6 = r5 < r4 :: unsigned + if r6 goto L2 else goto L4 :: bool L2: - r6 = load_mem r4 :: i64* - r7 = r6 == n - if r7 goto L5 else goto L3 :: bool + r7 = load_mem r5 :: i64* + r8 = r7 == n + if r8 goto L5 else goto L3 :: bool L3: - r8 = r4 + 8 - r4 = r8 + r9 = r5 + 8 + r5 = r9 goto L1 L4: keep_alive v - r9 = 0 + r10 = 0 goto L6 L5: - r9 = 1 + r10 = 1 L6: - return r9 + return r10 [case testVecI64GetItemWithInt_64bit] from librt.vecs import vec @@ -602,10 +624,11 @@ def f(v): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: i64 + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: i64 L0: r0 = v.len r1 = 0 < r0 :: unsigned @@ -623,12 +646,13 @@ L3: L4: r5 = 0 L5: - r6 = v.items - r7 = r5 * 8 - r8 = r6 + r7 - r9 = load_mem r8 :: i64* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecI64BufObject + r8 = r5 * 8 + r9 = r7 + r8 + r10 = load_mem r9 :: i64* keep_alive v - return r9 + return r10 [case testVecI64Slicing_64bit] from librt.vecs import vec @@ -721,19 +745,21 @@ def inplace(v, n, m): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9, r10 :: i64 - r11 :: native_int - r12 :: bit - r13 :: i64 - r14 :: bit - r15 :: bool - r16 :: i64 - r17 :: ptr - r18 :: i64 + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10, r11 :: i64 + r12 :: native_int + r13 :: bit + r14 :: i64 + r15 :: bit + r16 :: bool + r17 :: i64 + r18 :: object r19 :: ptr + r20 :: i64 + r21 :: ptr L0: r0 = v.len r1 = n < r0 :: unsigned @@ -751,32 +777,34 @@ L3: L4: r5 = n L5: - r6 = v.items - r7 = r5 * 8 - r8 = r6 + r7 - r9 = load_mem r8 :: i64* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecI64BufObject + r8 = r5 * 8 + r9 = r7 + r8 + r10 = load_mem r9 :: i64* keep_alive v - r10 = r9 + m - r11 = v.len - r12 = n < r11 :: unsigned - if r12 goto L9 else goto L6 :: bool + r11 = r10 + m + r12 = v.len + r13 = n < r12 :: unsigned + if r13 goto L9 else goto L6 :: bool L6: - r13 = n + r11 - r14 = r13 < r11 :: unsigned - if r14 goto L8 else goto L7 :: bool + r14 = n + r12 + r15 = r14 < r12 :: unsigned + if r15 goto L8 else goto L7 :: bool L7: - r15 = raise IndexError + r16 = raise IndexError unreachable L8: - r16 = r13 + r17 = r14 goto L10 L9: - r16 = n + r17 = n L10: - r17 = v.items - r18 = r16 * 8 - r19 = r17 + r18 - set_mem r19, r10 :: i64* + r18 = v.buf + r19 = get_element_ptr r18 items :: VecI64BufObject + r20 = r17 * 8 + r21 = r19 + r20 + set_mem r21, r11 :: i64* keep_alive v return 1 @@ -853,14 +881,16 @@ def list_with_cap() -> vec[i64]: [out] def list_with_cap(): r0 :: vec[i64] - r1, r2, r3 :: ptr + r1 :: object + r2, r3, r4 :: ptr L0: r0 = VecI64Api.alloc(2, 5) - r1 = r0.items - set_mem r1, 1 :: i64* - r2 = r1 + 8 - set_mem r2, 2 :: i64* + r1 = r0.buf + r2 = get_element_ptr r1 items :: VecI64BufObject + set_mem r2, 1 :: i64* r3 = r2 + 8 + set_mem r3, 2 :: i64* + r4 = r3 + 8 keep_alive r0 return r0 @@ -874,24 +904,26 @@ def repeated_with_cap(n: i64) -> vec[i64]: def repeated_with_cap(n): n :: i64 r0 :: vec[i64] - r1 :: ptr - r2 :: i64 - r3, r4 :: ptr - r5 :: bit - r6 :: ptr + r1 :: object + r2 :: ptr + r3 :: i64 + r4, r5 :: ptr + r6 :: bit + r7 :: ptr L0: r0 = VecI64Api.alloc(n, 10) - r1 = r0.items - r2 = n * 8 - r3 = r1 + r2 - r4 = r1 + r1 = r0.buf + r2 = get_element_ptr r1 items :: VecI64BufObject + r3 = n * 8 + r4 = r2 + r3 + r5 = r2 L1: - r5 = r4 < r3 :: unsigned - if r5 goto L2 else goto L3 :: bool + r6 = r5 < r4 :: unsigned + if r6 goto L2 else goto L3 :: bool L2: - set_mem r4, 3 :: i64* - r6 = r4 + 8 - r4 = r6 + set_mem r5, 3 :: i64* + r7 = r5 + 8 + r5 = r7 goto L1 L3: keep_alive r0 diff --git a/mypyc/test-data/irbuild-vec-misc.test b/mypyc/test-data/irbuild-vec-misc.test index c0d4325e38fcc..58753326b905f 100644 --- a/mypyc/test-data/irbuild-vec-misc.test +++ b/mypyc/test-data/irbuild-vec-misc.test @@ -60,14 +60,16 @@ def create_i32() -> vec[i32]: [out] def create_i32(): r0 :: vec[i32] - r1, r2, r3 :: ptr + r1 :: object + r2, r3, r4 :: ptr L0: r0 = VecI32Api.alloc(2, 2) - r1 = r0.items - set_mem r1, 1 :: i32* - r2 = r1 + 4 - set_mem r2, -5 :: i32* + r1 = r0.buf + r2 = get_element_ptr r1 items :: VecI32BufObject + set_mem r2, 1 :: i32* r3 = r2 + 4 + set_mem r3, -5 :: i32* + r4 = r3 + 4 keep_alive r0 return r0 @@ -125,10 +127,11 @@ def get_item_bool(v, i): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: bool + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: bool L0: r0 = v.len r1 = i < r0 :: unsigned @@ -146,12 +149,13 @@ L3: L4: r5 = i L5: - r6 = v.items - r7 = r5 * 1 - r8 = r6 + r7 - r9 = load_mem r8 :: builtins.bool* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecBoolBufObject + r8 = r5 * 1 + r9 = r7 + r8 + r10 = load_mem r9 :: builtins.bool* keep_alive v - return r9 + return r10 [case testVecMiscPop] from librt.vecs import vec, pop @@ -225,11 +229,12 @@ def for_bool(v): s :: i16 r0, r1 :: native_int r2 :: bit - r3 :: ptr - r4 :: native_int - r5 :: ptr - r6, x, r7 :: i16 - r8 :: native_int + r3 :: object + r4 :: ptr + r5 :: native_int + r6 :: ptr + r7, x, r8 :: i16 + r9 :: native_int L0: s = 0 r0 = 0 @@ -238,17 +243,18 @@ L1: r2 = r0 < r1 :: signed if r2 goto L2 else goto L4 :: bool L2: - r3 = v.items - r4 = r0 * 2 - r5 = r3 + r4 - r6 = load_mem r5 :: i16* - x = r6 + r3 = v.buf + r4 = get_element_ptr r3 items :: VecI16BufObject + r5 = r0 * 2 + r6 = r4 + r5 + r7 = load_mem r6 :: i16* + x = r7 keep_alive v - r7 = s + x - s = r7 + r8 = s + x + s = r8 L3: - r8 = r0 + 1 - r0 = r8 + r9 = r0 + 1 + r0 = r9 goto L1 L4: return s @@ -270,10 +276,11 @@ def get_item_nested(v, i): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: vec[i32] + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: vec[i32] L0: r0 = v.len r1 = i < r0 :: unsigned @@ -291,12 +298,13 @@ L3: L4: r5 = i L5: - r6 = v.items - r7 = r5 * 16 - r8 = r6 + r7 - r9 = load_mem r8 :: vec[i32]* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecNestedBufObject + r8 = r5 * 16 + r9 = r7 + r8 + r10 = load_mem r9 :: vec[i32]* keep_alive v - return r9 + return r10 [case testVecMiscNestedPop_64bit] from librt.vecs import vec, pop @@ -310,39 +318,34 @@ def get_item_nested(v: vec[vec[i32]], i: i64) -> vec[i32]: def get_item_nested(v, i): v :: vec[vec[i32]] i :: i64 - r0 :: tuple[vec[vec[i32]], VecNestedBufItem{len:native_int, items:ptr}] + r0 :: tuple[vec[vec[i32]], VecNestedBufItem{len:native_int, buf:object_nrc}] r1, r2 :: vec[vec[i32]] - r3, r4 :: VecNestedBufItem{len:native_int, items:ptr} - r5 :: native_int - r6 :: ptr - r7, r8 :: vec[i32] - r9 :: tuple[vec[vec[i32]], vec[i32]] - r10 :: vec[vec[i32]] - r11 :: vec[i32] - r12 :: vec[vec[i32]] - r13, x :: vec[i32] + r3, r4 :: VecNestedBufItem{len:native_int, buf:object_nrc} + r5 :: vec[i32] + r6 :: tuple[vec[vec[i32]], vec[i32]] + r7 :: vec[vec[i32]] + r8 :: vec[i32] + r9 :: vec[vec[i32]] + r10, x :: vec[i32] L0: r0 = VecNestedApi.pop(v, i) r1 = borrow r0[0] r2 = unborrow r1 r3 = borrow r0[1] r4 = unborrow r3 - r5 = r4.len - r6 = r4.items - r7 = set_element undef vec[i32], len, r5 - r8 = set_element r7, items, r6 - r9 = (r2, r8) + r5 = VecI32Api.convert_from_nested(r4) + r6 = (r2, r5) keep_alive steal r0 - r10 = borrow r9[0] - r11 = borrow r9[1] - keep_alive steal r9 - r12 = unborrow r10 - v = r12 - r13 = unborrow r11 - x = r13 + r7 = borrow r6[0] + r8 = borrow r6[1] + keep_alive steal r6 + r9 = unborrow r7 + v = r9 + r10 = unborrow r8 + x = r10 return x -[case testVecU8ToBytes] +[case testVecU8ToBytes_experimental] from librt.vecs import vec from mypy_extensions import u8 diff --git a/mypyc/test-data/irbuild-vec-nested.test b/mypyc/test-data/irbuild-vec-nested.test index 1fe42a880d5b0..2d0b4dbbcae78 100644 --- a/mypyc/test-data/irbuild-vec-nested.test +++ b/mypyc/test-data/irbuild-vec-nested.test @@ -104,14 +104,14 @@ def f(v, vv): v :: vec[vec[str]] vv :: vec[str] r0 :: native_int - r1 :: ptr - r2, r3 :: VecNestedBufItem{len:native_int, items:ptr} + r1 :: object + r2, r3 :: VecNestedBufItem{len:native_int, buf:object_nrc} r4 :: vec[vec[str]] L0: r0 = vv.len - r1 = vv.items + r1 = vv.buf r2 = set_element undef VecNestedBufItem, len, r0 - r3 = set_element r2, items, r1 + r3 = set_element r2, buf, r1 r4 = VecNestedApi.append(v, r3) keep_alive vv return r4 @@ -127,14 +127,14 @@ def f(v, vv): v :: vec[vec[i64]] vv :: vec[i64] r0 :: native_int - r1 :: ptr - r2, r3 :: VecNestedBufItem{len:native_int, items:ptr} + r1 :: object + r2, r3 :: VecNestedBufItem{len:native_int, buf:object_nrc} r4 :: vec[vec[i64]] L0: r0 = vv.len - r1 = vv.items + r1 = vv.buf r2 = set_element undef VecNestedBufItem, len, r0 - r3 = set_element r2, items, r1 + r3 = set_element r2, buf, r1 r4 = VecNestedApi.append(v, r3) keep_alive vv return r4 @@ -206,10 +206,11 @@ def f(v, n): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: vec[str] + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: vec[str] L0: r0 = v.len r1 = n < r0 :: unsigned @@ -227,12 +228,13 @@ L3: L4: r5 = n L5: - r6 = v.items - r7 = r5 * 16 - r8 = r6 + r7 - r9 = load_mem r8 :: vec[str]* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecNestedBufObject + r8 = r5 * 16 + r9 = r7 + r8 + r10 = load_mem r9 :: vec[str]* keep_alive v - return r9 + return r10 [case testVecNestedI64GetItem_64bit] from librt.vecs import vec @@ -250,10 +252,11 @@ def f(v, n): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: vec[i64] + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: vec[i64] L0: r0 = v.len r1 = n < r0 :: unsigned @@ -271,12 +274,13 @@ L3: L4: r5 = n L5: - r6 = v.items - r7 = r5 * 16 - r8 = r6 + r7 - r9 = load_mem r8 :: vec[i64]* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecNestedBufObject + r8 = r5 * 16 + r9 = r7 + r8 + r10 = load_mem r9 :: vec[i64]* keep_alive v - return r9 + return r10 [case testVecNestedI64GetItemWithBorrow_64bit] from librt.vecs import vec @@ -294,20 +298,22 @@ def f(v, n): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: vec[i64] - r10 :: native_int - r11 :: bit - r12 :: i64 - r13 :: bit - r14 :: bool - r15 :: i64 - r16 :: ptr - r17 :: i64 + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: vec[i64] + r11 :: native_int + r12 :: bit + r13 :: i64 + r14 :: bit + r15 :: bool + r16 :: i64 + r17 :: object r18 :: ptr r19 :: i64 + r20 :: ptr + r21 :: i64 L0: r0 = v.len r1 = n < r0 :: unsigned @@ -325,32 +331,34 @@ L3: L4: r5 = n L5: - r6 = v.items - r7 = r5 * 16 - r8 = r6 + r7 - r9 = borrow load_mem r8 :: vec[i64]* - r10 = r9.len - r11 = n < r10 :: unsigned - if r11 goto L9 else goto L6 :: bool + r6 = v.buf + r7 = get_element_ptr r6 items :: VecNestedBufObject + r8 = r5 * 16 + r9 = r7 + r8 + r10 = borrow load_mem r9 :: vec[i64]* + r11 = r10.len + r12 = n < r11 :: unsigned + if r12 goto L9 else goto L6 :: bool L6: - r12 = n + r10 - r13 = r12 < r10 :: unsigned - if r13 goto L8 else goto L7 :: bool + r13 = n + r11 + r14 = r13 < r11 :: unsigned + if r14 goto L8 else goto L7 :: bool L7: - r14 = raise IndexError + r15 = raise IndexError unreachable L8: - r15 = r12 + r16 = r13 goto L10 L9: - r15 = n + r16 = n L10: - r16 = r9.items - r17 = r15 * 8 - r18 = r16 + r17 - r19 = load_mem r18 :: i64* - keep_alive v, r9 - return r19 + r17 = r10.buf + r18 = get_element_ptr r17 items :: VecI64BufObject + r19 = r16 * 8 + r20 = r18 + r19 + r21 = load_mem r20 :: i64* + keep_alive v, r10 + return r21 [case testVecDoublyNestedGetItem_64bit] from librt.vecs import vec @@ -368,10 +376,11 @@ def f(v, n): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: vec[vec[str]] + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: vec[vec[str]] L0: r0 = v.len r1 = n < r0 :: unsigned @@ -389,12 +398,13 @@ L3: L4: r5 = n L5: - r6 = v.items - r7 = r5 * 16 - r8 = r6 + r7 - r9 = load_mem r8 :: vec[vec[str]]* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecNestedBufObject + r8 = r5 * 16 + r9 = r7 + r8 + r10 = load_mem r9 :: vec[vec[str]]* keep_alive v - return r9 + return r10 [case testVecNestedCreateWithCap_64bit] from librt.vecs import vec diff --git a/mypyc/test-data/irbuild-vec-t.test b/mypyc/test-data/irbuild-vec-t.test index 63ad14bc2d7a2..b2bd185c039c9 100644 --- a/mypyc/test-data/irbuild-vec-t.test +++ b/mypyc/test-data/irbuild-vec-t.test @@ -215,10 +215,11 @@ def f(v, n): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: str + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: str L0: r0 = v.len r1 = n < r0 :: unsigned @@ -236,12 +237,13 @@ L3: L4: r5 = n L5: - r6 = v.items - r7 = r5 * 8 - r8 = r6 + r7 - r9 = load_mem r8 :: builtins.str* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecTBufObject + r8 = r5 * 8 + r9 = r7 + r8 + r10 = load_mem r9 :: builtins.str* keep_alive v - return r9 + return r10 [case testVecTOptionalGetItem_64bit] from librt.vecs import vec @@ -260,10 +262,11 @@ def f(v, n): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: union[str, None] + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: union[str, None] L0: r0 = v.len r1 = n < r0 :: unsigned @@ -281,12 +284,13 @@ L3: L4: r5 = n L5: - r6 = v.items - r7 = r5 * 8 - r8 = r6 + r7 - r9 = load_mem r8 :: union* + r6 = v.buf + r7 = get_element_ptr r6 items :: VecTBufObject + r8 = r5 * 8 + r9 = r7 + r8 + r10 = load_mem r9 :: union* keep_alive v - return r9 + return r10 [case testNewTPopLast] from typing import Tuple @@ -459,18 +463,20 @@ def list_with_cap(): r2 :: object r3 :: ptr r4 :: vec[str] - r5, r6, r7 :: ptr + r5 :: object + r6, r7, r8 :: ptr L0: r0 = 'a' r1 = 'b' r2 = load_address PyUnicode_Type r3 = r2 r4 = VecTApi.alloc(2, 5, r3) - r5 = r4.items - set_mem r5, r0 :: builtins.str* - r6 = r5 + 8 - set_mem r6, r1 :: builtins.str* + r5 = r4.buf + r6 = get_element_ptr r5 items :: VecTBufObject + set_mem r6, r0 :: builtins.str* r7 = r6 + 8 + set_mem r7, r1 :: builtins.str* + r8 = r7 + 8 keep_alive r4 return r4 diff --git a/mypyc/test-data/refcount.test b/mypyc/test-data/refcount.test index 2f03a159dfacb..4e5f1a51cc73e 100644 --- a/mypyc/test-data/refcount.test +++ b/mypyc/test-data/refcount.test @@ -1519,10 +1519,11 @@ def f(v, i, x): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: str + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: str L0: r0 = v.len r1 = i < r0 :: unsigned @@ -1540,13 +1541,14 @@ L3: L4: r5 = i L5: - r6 = v.items - r7 = r5 * 8 - r8 = r6 + r7 - r9 = borrow load_mem r8 :: builtins.str* - dec_ref r9 + r6 = v.buf + r7 = get_element_ptr r6 items :: VecTBufObject + r8 = r5 * 8 + r9 = r7 + r8 + r10 = borrow load_mem r9 :: builtins.str* + dec_ref r10 inc_ref x - set_mem r8, x :: builtins.str* + set_mem r9, x :: builtins.str* return 1 [case testVecTConstructFromListMultiply_64bit] @@ -1562,29 +1564,31 @@ def f(n): r0, r1 :: object r2, r3 :: ptr r4 :: vec[str | None] - r5 :: ptr - r6 :: i64 - r7, r8 :: ptr - r9 :: bit - r10 :: ptr + r5 :: object + r6 :: ptr + r7 :: i64 + r8, r9 :: ptr + r10 :: bit + r11 :: ptr L0: r0 = box(None, 1) r1 = load_address PyUnicode_Type r2 = r1 r3 = r2 | 1 r4 = VecTApi.alloc(n, n, r3) - r5 = r4.items - r6 = n * 8 - r7 = r5 + r6 - r8 = r5 + r5 = r4.buf + r6 = get_element_ptr r5 items :: VecTBufObject + r7 = n * 8 + r8 = r6 + r7 + r9 = r6 L1: - r9 = r8 < r7 :: unsigned - if r9 goto L2 else goto L3 :: bool + r10 = r9 < r8 :: unsigned + if r10 goto L2 else goto L3 :: bool L2: inc_ref r0 - set_mem r8, r0 :: union* - r10 = r8 + 8 - r8 = r10 + set_mem r9, r0 :: union* + r11 = r9 + 8 + r9 = r11 goto L1 L3: return r4 @@ -1606,12 +1610,13 @@ def f(v): t :: i64 r0, r1 :: native_int r2 :: bit - r3 :: ptr - r4 :: native_int - r5 :: ptr - r6, s :: str - r7 :: None - r8 :: native_int + r3 :: object + r4 :: ptr + r5 :: native_int + r6 :: ptr + r7, s :: str + r8 :: None + r9 :: native_int L0: t = 0 r0 = 0 @@ -1620,16 +1625,17 @@ L1: r2 = r0 < r1 :: signed if r2 goto L2 else goto L4 :: bool L2: - r3 = v.items - r4 = r0 * 8 - r5 = r3 + r4 - r6 = load_mem r5 :: builtins.str* - s = r6 - r7 = g(s) + r3 = v.buf + r4 = get_element_ptr r3 items :: VecTBufObject + r5 = r0 * 8 + r6 = r4 + r5 + r7 = load_mem r6 :: builtins.str* + s = r7 + r8 = g(s) dec_ref s L3: - r8 = r0 + 1 - r0 = r8 + r9 = r0 + 1 + r0 = r9 goto L1 L4: return t @@ -1649,20 +1655,22 @@ def f(): r2 :: object r3 :: ptr r4 :: vec[str] - r5, r6, r7 :: ptr + r5 :: object + r6, r7, r8 :: ptr L0: r0 = 'x' r1 = 'y' r2 = load_address PyUnicode_Type r3 = r2 r4 = VecTApi.alloc(2, 2, r3) - r5 = r4.items + r5 = r4.buf + r6 = get_element_ptr r5 items :: VecTBufObject inc_ref r0 - set_mem r5, r0 :: builtins.str* - r6 = r5 + 8 - inc_ref r1 - set_mem r6, r1 :: builtins.str* + set_mem r6, r0 :: builtins.str* r7 = r6 + 8 + inc_ref r1 + set_mem r7, r1 :: builtins.str* + r8 = r7 + 8 return r4 [case testVecI64GetItemBorrowVec_64bit] @@ -1685,10 +1693,11 @@ def C.f(self, x): r4 :: bit r5 :: bool r6 :: i64 - r7 :: ptr - r8 :: i64 - r9 :: ptr - r10 :: i64 + r7 :: object + r8 :: ptr + r9 :: i64 + r10 :: ptr + r11 :: i64 L0: r0 = borrow self.v r1 = r0.len @@ -1707,11 +1716,12 @@ L3: L4: r6 = x L5: - r7 = r0.items - r8 = r6 * 8 - r9 = r7 + r8 - r10 = load_mem r9 :: i64* - return r10 + r7 = r0.buf + r8 = get_element_ptr r7 items :: VecI64BufObject + r9 = r6 * 8 + r10 = r8 + r9 + r11 = load_mem r10 :: i64* + return r11 [case testVecI64LenBorrowVec_64bit] from librt.vecs import vec @@ -1743,14 +1753,14 @@ def f(vv, v): vv :: vec[vec[str]] v :: vec[str] r0 :: native_int - r1 :: ptr - r2, r3 :: VecNestedBufItem{len:native_int, items:ptr} + r1 :: object + r2, r3 :: VecNestedBufItem{len:native_int, buf:object_nrc} r4 :: vec[vec[str]] L0: r0 = v.len - r1 = v.items + r1 = v.buf r2 = set_element undef VecNestedBufItem, len, r0 - r3 = set_element r2, items, r1 + r3 = set_element r2, buf, r1 inc_ref vv r4 = VecNestedApi.append(vv, r3) return r4 @@ -1771,10 +1781,11 @@ def f(v, n): r3 :: bit r4 :: bool r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: str + r6 :: object + r7 :: ptr + r8 :: i64 + r9 :: ptr + r10 :: str L0: r0 = v.len r1 = n < r0 :: unsigned @@ -1792,11 +1803,12 @@ L3: L4: r5 = n L5: - r6 = v.items - r7 = r5 * 8 - r8 = r6 + r7 - r9 = load_mem r8 :: builtins.str* - return r9 + r6 = v.buf + r7 = get_element_ptr r6 items :: VecTBufObject + r8 = r5 * 8 + r9 = r7 + r8 + r10 = load_mem r9 :: builtins.str* + return r10 [case testVecNestedGetItem_64bit] from librt.vecs import vec @@ -1817,10 +1829,11 @@ def f(v, n): r6 :: bit r7 :: bool r8 :: i64 - r9 :: ptr - r10 :: i64 - r11 :: ptr - r12, vv :: vec[str] + r9 :: object + r10 :: ptr + r11 :: i64 + r12 :: ptr + r13, vv :: vec[str] L0: r0 = load_address PyUnicode_Type r1 = r0 @@ -1841,91 +1854,19 @@ L3: L4: r8 = n L5: - r9 = r2.items - r10 = r8 * 16 - r11 = r9 + r10 - r12 = load_mem r11 :: vec[str]* + r9 = r2.buf + r10 = get_element_ptr r9 items :: VecNestedBufObject + r11 = r8 * 16 + r12 = r10 + r11 + r13 = load_mem r12 :: vec[str]* dec_ref r2 - vv = r12 + vv = r13 dec_ref vv return 1 L6: dec_ref r2 goto L2 -[case testVecNestedGetItemBorrow_64bit] -from librt.vecs import vec -from mypy_extensions import i64 - -def f(v: vec[vec[i64]], n: i64, m: i64) -> i64: - return v[n][m] -[out] -def f(v, n, m): - v :: vec[vec[i64]] - n, m :: i64 - r0 :: native_int - r1 :: bit - r2 :: i64 - r3 :: bit - r4 :: bool - r5 :: i64 - r6 :: ptr - r7 :: i64 - r8 :: ptr - r9 :: vec[i64] - r10 :: native_int - r11 :: bit - r12 :: i64 - r13 :: bit - r14 :: bool - r15 :: i64 - r16 :: ptr - r17 :: i64 - r18 :: ptr - r19 :: i64 -L0: - r0 = v.len - r1 = n < r0 :: unsigned - if r1 goto L4 else goto L1 :: bool -L1: - r2 = n + r0 - r3 = r2 < r0 :: unsigned - if r3 goto L3 else goto L2 :: bool -L2: - r4 = raise IndexError - unreachable -L3: - r5 = r2 - goto L5 -L4: - r5 = n -L5: - r6 = v.items - r7 = r5 * 16 - r8 = r6 + r7 - r9 = borrow load_mem r8 :: vec[i64]* - r10 = r9.len - r11 = m < r10 :: unsigned - if r11 goto L9 else goto L6 :: bool -L6: - r12 = m + r10 - r13 = r12 < r10 :: unsigned - if r13 goto L8 else goto L7 :: bool -L7: - r14 = raise IndexError - unreachable -L8: - r15 = r12 - goto L10 -L9: - r15 = m -L10: - r16 = r9.items - r17 = r15 * 8 - r18 = r16 + r17 - r19 = load_mem r18 :: i64* - return r19 - [case testVecPop] from librt.vecs import vec, pop, append from mypy_extensions import i64 @@ -1966,17 +1907,15 @@ def f(v: vec[vec[str]]) -> vec[str]: [out] def f(v): v :: vec[vec[str]] - r0 :: tuple[vec[vec[str]], VecNestedBufItem{len:native_int, items:ptr}] + r0 :: tuple[vec[vec[str]], VecNestedBufItem{len:native_int, buf:object_nrc}] r1, r2 :: vec[vec[str]] - r3, r4 :: VecNestedBufItem{len:native_int, items:ptr} - r5 :: native_int - r6 :: ptr - r7, r8 :: vec[str] - r9 :: tuple[vec[vec[str]], vec[str]] - r10 :: vec[vec[str]] - r11 :: vec[str] - r12, vv :: vec[vec[str]] - r13, x :: vec[str] + r3, r4 :: VecNestedBufItem{len:native_int, buf:object_nrc} + r5 :: vec[str] + r6 :: tuple[vec[vec[str]], vec[str]] + r7 :: vec[vec[str]] + r8 :: vec[str] + r9, vv :: vec[vec[str]] + r10, x :: vec[str] L0: inc_ref v r0 = VecNestedApi.pop(v, -1) @@ -1984,18 +1923,15 @@ L0: r2 = unborrow r1 r3 = borrow r0[1] r4 = unborrow r3 - r5 = r4.len - r6 = r4.items - r7 = set_element undef vec[str], len, r5 - r8 = set_element r7, items, r6 - r9 = (r2, r8) - r10 = borrow r9[0] - r11 = borrow r9[1] - r12 = unborrow r10 - vv = r12 + r5 = VecTApi.convert_from_nested(r4) + r6 = (r2, r5) + r7 = borrow r6[0] + r8 = borrow r6[1] + r9 = unborrow r7 + vv = r9 dec_ref vv - r13 = unborrow r11 - x = r13 + r10 = unborrow r8 + x = r10 return x [case testPropertySetterCallWithRefcountedObject] diff --git a/mypyc/test-data/run-librt-random.test b/mypyc/test-data/run-librt-random.test deleted file mode 100644 index 0b34222678018..0000000000000 --- a/mypyc/test-data/run-librt-random.test +++ /dev/null @@ -1,344 +0,0 @@ -[case testRandom_librt] -from typing import Any - -from librt.random import Random, random, randint, randrange, seed -from mypy_extensions import i64 -from testutil import assertRaises - -# -# Random object basics -# - -def test_random_construct() -> None: - r = Random() - assert isinstance(r, Random) - -def test_randint_basic() -> None: - r = Random() - for i in range(100): - val = r.randint(0, 10) - assert 0 <= val <= 10 - -def test_randint_single_value() -> None: - r = Random() - for i in range(10): - assert r.randint(5, 5) == 5 - -def test_randint_negative_range() -> None: - r = Random() - for i in range(100): - val = r.randint(-10, -1) - assert -10 <= val <= -1 - -def test_randint_mixed_range() -> None: - r = Random() - for i in range(100): - val = r.randint(-5, 5) - assert -5 <= val <= 5 - -def test_randint_large_range() -> None: - r = Random() - for i in range(100): - val = r.randint(0, 1000000) - assert 0 <= val <= 1000000 - -def test_randint_produces_different_values() -> None: - r = Random() - values = set() - for i in range(100): - values.add(r.randint(0, 1000000)) - # With range 0-1000000 and 100 samples, we should get at least 2 distinct values - assert len(values) > 1 - -def test_random_basic() -> None: - r = Random() - for i in range(100): - val = r.random() - assert 0.0 <= val < 1.0 - -def test_random_returns_float() -> None: - r = Random() - val = r.random() - assert isinstance(val, float) - -def test_random_produces_different_values() -> None: - r = Random() - values = set() - for i in range(100): - values.add(r.random()) - assert len(values) > 1 - -def test_randrange_one_arg() -> None: - r = Random() - for i in range(100): - val = r.randrange(10) - assert 0 <= val < 10 - -def test_randrange_two_args() -> None: - r = Random() - for i in range(100): - val = r.randrange(5, 15) - assert 5 <= val < 15 - -def test_randrange_negative() -> None: - r = Random() - for i in range(100): - val = r.randrange(-10, 0) - assert -10 <= val < 0 - -def test_randrange_single_value() -> None: - r = Random() - for i in range(10): - assert r.randrange(7, 8) == 7 - -def test_randrange_produces_different_values() -> None: - r = Random() - values = set() - for i in range(100): - values.add(r.randrange(1000000)) - assert len(values) > 1 - -def test_constructor_seed() -> None: - r1 = Random(42) - r2 = Random(42) - vals1 = [r1.randint(0, 1000000) for _ in range(20)] - vals2 = [r2.randint(0, 1000000) for _ in range(20)] - assert vals1 == vals2 - -def test_constructor_seed_different() -> None: - r1 = Random(42) - r2 = Random(43) - vals1 = [r1.randint(0, 1000000) for _ in range(20)] - vals2 = [r2.randint(0, 1000000) for _ in range(20)] - assert vals1 != vals2 - -def test_constructor_none_seed() -> None: - r = Random(None) - val = r.random() - assert 0.0 <= val < 1.0 - -def test_seed_method() -> None: - r = Random(0) - r.seed(42) - vals1 = [r.randint(0, 1000000) for _ in range(20)] - r.seed(42) - vals2 = [r.randint(0, 1000000) for _ in range(20)] - assert vals1 == vals2 - -def test_seed_method_resets_state() -> None: - r = Random(42) - expected = [r.randint(0, 1000000) for _ in range(20)] - # Consume some values, then reseed - r.seed(42) - actual = [r.randint(0, 1000000) for _ in range(20)] - assert expected == actual - -# -# Module-level functions -# - -def test_module_random_basic() -> None: - for i in range(100): - val = random() - assert 0.0 <= val < 1.0 - -def test_module_random_returns_float() -> None: - assert isinstance(random(), float) - -def test_module_random_produces_different_values() -> None: - values = set() - for i in range(100): - values.add(random()) - assert len(values) > 1 - -def test_module_randint_basic() -> None: - for i in range(100): - val = randint(0, 10) - assert 0 <= val <= 10 - -def test_module_randint_single_value() -> None: - for i in range(10): - assert randint(5, 5) == 5 - -def test_module_randint_produces_different_values() -> None: - values = set() - for i in range(100): - values.add(randint(0, 1000000)) - assert len(values) > 1 - -def test_module_randrange_one_arg() -> None: - for i in range(100): - val = randrange(10) - assert 0 <= val < 10 - -def test_module_randrange_two_args() -> None: - for i in range(100): - val = randrange(5, 15) - assert 5 <= val < 15 - -def test_module_randrange_produces_different_values() -> None: - values = set() - for i in range(100): - values.add(randrange(1000000)) - assert len(values) > 1 - -def test_module_seed_reproducible() -> None: - seed(42) - vals1 = [randint(0, 1000000) for _ in range(20)] - seed(42) - vals2 = [randint(0, 1000000) for _ in range(20)] - assert vals1 == vals2 - -def test_module_seed_different() -> None: - seed(42) - vals1 = [randint(0, 1000000) for _ in range(20)] - seed(43) - vals2 = [randint(0, 1000000) for _ in range(20)] - assert vals1 != vals2 - -# -# Wrapper function calling convention (via Any) -# - -def test_method_random_via_wrapper() -> None: - r: Any = Random(42) - val = r.random() - assert isinstance(val, float) - assert 0.0 <= val < 1.0 - -def test_method_seed_via_wrapper() -> None: - r: Any = Random(0) - r.seed(42) - val = r.random() - assert 0.0 <= val < 1.0 - -def test_module_random_via_wrapper() -> None: - random_any: Any = random - val = random_any() - assert isinstance(val, float) - assert 0.0 <= val < 1.0 - -def test_module_randint_via_wrapper() -> None: - randint_any: Any = randint - val = randint_any(0, 10) - assert 0 <= val <= 10 - -def test_module_seed_via_wrapper() -> None: - seed_any: Any = seed - seed_any(42) - -# -# Wide i64 ranges -# - -def method_randint(r: Random, a: i64, b: i64) -> i64: - return r.randint(a, b) - -def method_randrange(r: Random, a: i64, b: i64) -> i64: - return r.randrange(a, b) - -def module_randint(a: i64, b: i64) -> i64: - return randint(a, b) - -def module_randrange(a: i64, b: i64) -> i64: - return randrange(a, b) - -def test_full_i64_randint_native() -> None: - lo: i64 = -9223372036854775808 - hi: i64 = 9223372036854775807 - r = Random(42) - saw_non_min = False - for i in range(20): - val = method_randint(r, lo, hi) - assert lo <= val <= hi - if val != lo: - saw_non_min = True - assert saw_non_min - -def test_full_i64_randint_module_native() -> None: - lo: i64 = -9223372036854775808 - hi: i64 = 9223372036854775807 - saw_non_min = False - for i in range(20): - val = module_randint(lo, hi) - assert lo <= val <= hi - if val != lo: - saw_non_min = True - assert saw_non_min - -def test_wide_i64_randrange_native() -> None: - lo: i64 = -9223372036854775808 - hi: i64 = 9223372036854775807 - r = Random(43) - for i in range(20): - val = method_randrange(r, lo, hi) - assert lo <= val < hi - val = module_randrange(lo, hi) - assert lo <= val < hi - -def test_full_i64_randint_python_api() -> None: - r: Any = Random(42) - lo = -9223372036854775808 - hi = 9223372036854775807 - saw_non_min = False - for i in range(20): - val = r.randint(lo, hi) - assert lo <= val <= hi - if val != lo: - saw_non_min = True - assert saw_non_min - -def test_wide_i64_randrange_python_api() -> None: - r: Any = Random(43) - randrange_any: Any = randrange - lo = -9223372036854775808 - hi = 9223372036854775807 - for i in range(20): - val = r.randrange(lo, hi) - assert lo <= val < hi - val = randrange_any(lo, hi) - assert lo <= val < hi - -# -# Error handling -# - -def test_randint_empty_range() -> None: - r = Random() - with assertRaises(ValueError, "empty range"): - r.randint(10, 5) - -def test_randint_wrong_arg_count() -> None: - r = Random() - with assertRaises(TypeError): - r.randint(1) # type: ignore[call-arg] - with assertRaises(TypeError): - r.randint(1, 2, 3) # type: ignore[call-arg] - -def test_module_randint_empty_range() -> None: - with assertRaises(ValueError, "empty range"): - randint(10, 5) - -def test_randrange_empty_range() -> None: - r = Random() - with assertRaises(ValueError, "empty range"): - r.randrange(0) - with assertRaises(ValueError, "empty range"): - r.randrange(-5) - with assertRaises(ValueError, "empty range"): - r.randrange(10, 10) - with assertRaises(ValueError, "empty range"): - r.randrange(10, 5) - -def test_randrange_wrong_arg_count() -> None: - r = Random() - with assertRaises(TypeError): - r.randrange() # type: ignore[call-overload] - with assertRaises(TypeError): - r.randrange(1, 2, 3) # type: ignore[call-overload] - -def test_module_randrange_empty_range() -> None: - with assertRaises(ValueError, "empty range"): - randrange(0) - with assertRaises(ValueError, "empty range"): - randrange(10, 5) diff --git a/mypyc/test-data/run-misc.test b/mypyc/test-data/run-misc.test index d48884ada853a..eda44e16871f2 100644 --- a/mypyc/test-data/run-misc.test +++ b/mypyc/test-data/run-misc.test @@ -971,10 +971,7 @@ print(z) [case testCheckVersion] import sys -if sys.version_info[:2] == (3, 16): - def version() -> int: - return 16 -elif sys.version_info[:2] == (3, 15): +if sys.version_info[:2] == (3, 15): def version() -> int: return 15 elif sys.version_info[:2] == (3, 14): diff --git a/mypyc/test-data/run-python312.test b/mypyc/test-data/run-python312.test index f3dc272fc9fa1..5ed6dca9ecb28 100644 --- a/mypyc/test-data/run-python312.test +++ b/mypyc/test-data/run-python312.test @@ -229,10 +229,9 @@ type C[*Ts] = tuple[*Ts] def test_type_var_tuple_type_alias() -> None: if sys.version_info >= (3, 15): # type: ignore[operator] assert str(C[int, str]) == "_frozen_importlib.C[int, str]" - assert str(getattr(C, "__value__")) == "tuple[typing.Unpack[~Ts]]" else: assert str(C[int, str]) == "C[int, str]" - assert str(getattr(C, "__value__")) == "tuple[typing.Unpack[Ts]]" + assert str(getattr(C, "__value__")) == "tuple[typing.Unpack[Ts]]" type D[**P] = Callable[P, int] diff --git a/mypyc/test-data/run-vecs-i64-interp.test b/mypyc/test-data/run-vecs-i64-interp.test index 113ae0154a8cd..8f82d5cbaf6dc 100644 --- a/mypyc/test-data/run-vecs-i64-interp.test +++ b/mypyc/test-data/run-vecs-i64-interp.test @@ -1,4 +1,4 @@ -[case testLibrtVecsI64Interpreted_librt] +[case testLibrtVecsI64Interpreted_librt_experimental] # Test cases for vec[i64], using generic operations (simulates use from interpreted code). # # These also act as test cases for specialized vec types in general (e.g. vec[float]), diff --git a/mypyc/test-data/run-vecs-i64.test b/mypyc/test-data/run-vecs-i64.test index b8a74c96d6fb9..97913d1321bdb 100644 --- a/mypyc/test-data/run-vecs-i64.test +++ b/mypyc/test-data/run-vecs-i64.test @@ -1,7 +1,7 @@ -- Test cases for vec[i64]. These also partially cover other unboxed item types, -- which use a similar runtime representation. -[case testVecI64BasicOps_librt] +[case testVecI64BasicOps_librt_experimental] from typing import Final, Any, Iterable, Tuple from mypy_extensions import i64, i32 diff --git a/mypyc/test-data/run-vecs-misc-interp.test b/mypyc/test-data/run-vecs-misc-interp.test index e5158d4a1a316..443450c699c6d 100644 --- a/mypyc/test-data/run-vecs-misc-interp.test +++ b/mypyc/test-data/run-vecs-misc-interp.test @@ -1,4 +1,4 @@ -[case testLibrtVecsMiscInterpreted_librt] +[case testLibrtVecsMiscInterpreted_librt_experimental] # Test cases for vec[]. using generic operations. # This simulates use from interpreted code. # @@ -538,7 +538,7 @@ def test_extend_bool() -> None: v = extend(v, [False, True]) assert v == vec[bool]([True, False, True]) -[case testLibrtVecsBufferProtocol_librt] +[case testLibrtVecsBufferProtocol_librt_experimental] import struct from typing import Any @@ -639,3 +639,14 @@ def test_buffer_extend_from_shared_buffer() -> None: v = extend(old, memoryview_(new)) assert v == vec[u8]([1, 2, 1, 2, 3]) assert new == vec[u8]([1, 2, 3]) + +[case testLibrtVecsFeaturesNotAvailableInNonExperimentalBuild_librt] +# This also ensures librt.vecs can be built without experimental features +import librt.vecs + +def test_features_not_available() -> None: + vecs: object = getattr(librt, "vecs") + assert not hasattr(vecs, "vec") + assert not hasattr(vecs, "append") + assert not hasattr(vecs, "remove") + assert not hasattr(vecs, "pop") diff --git a/mypyc/test-data/run-vecs-misc.test b/mypyc/test-data/run-vecs-misc.test index f3ebe11ff55e2..3a5de18690ba0 100644 --- a/mypyc/test-data/run-vecs-misc.test +++ b/mypyc/test-data/run-vecs-misc.test @@ -4,7 +4,7 @@ -- -- vec[i64] test cases are in run-vecs-i64.test. -[case testVecMiscBasicOps_librt] +[case testVecMiscBasicOps_librt_experimental] # mypy: allow-redefinition-old from typing import Any, Iterable, cast diff --git a/mypyc/test-data/run-vecs-nested-interp.test b/mypyc/test-data/run-vecs-nested-interp.test index 049e4dab9013b..767d1188e9252 100644 --- a/mypyc/test-data/run-vecs-nested-interp.test +++ b/mypyc/test-data/run-vecs-nested-interp.test @@ -1,4 +1,4 @@ -[case testLibrtVecsNestedInterpreted_librt] +[case testLibrtVecsNestedInterpreted_librt_experimental] # Test cases for nested vecs, using generic operations (simulates use from interpreted code). import sys diff --git a/mypyc/test-data/run-vecs-nested.test b/mypyc/test-data/run-vecs-nested.test index 98a53e6e0bb6e..7bfe06dbc408e 100644 --- a/mypyc/test-data/run-vecs-nested.test +++ b/mypyc/test-data/run-vecs-nested.test @@ -1,4 +1,4 @@ -[case testVecNestedBasicOps_librt] +[case testVecNestedBasicOps_librt_experimental] from typing import Final, Any, Iterable, Optional, Tuple import sys diff --git a/mypyc/test-data/run-vecs-t-interp.test b/mypyc/test-data/run-vecs-t-interp.test index 73077b5ef8938..a4ffdcb8d9f48 100644 --- a/mypyc/test-data/run-vecs-t-interp.test +++ b/mypyc/test-data/run-vecs-t-interp.test @@ -1,4 +1,4 @@ -[case testLibrtVecsTInterpreted_librt] +[case testLibrtVecsTInterpreted_librt_experimental] # Test cases for vec[], using generic operations (simulates use from interpreted code). import sys from typing import cast, List, Any diff --git a/mypyc/test-data/run-vecs-t.test b/mypyc/test-data/run-vecs-t.test index 50d1adbd51306..055438ced7ccd 100644 --- a/mypyc/test-data/run-vecs-t.test +++ b/mypyc/test-data/run-vecs-t.test @@ -1,7 +1,7 @@ -- Test cases for vec[t] where t is a boxed, non-vec type (PyObject *). -- Also tests for vec[t | None], which uses the same representation. -[case testVecTBasicOps_librt] +[case testVecTBasicOps_librt_experimental] from typing import Final, Any, Iterable, Optional, Tuple from mypy_extensions import i64 diff --git a/mypyc/test/test_emitmodule.py b/mypyc/test/test_emitmodule.py deleted file mode 100644 index 467876303e630..0000000000000 --- a/mypyc/test/test_emitmodule.py +++ /dev/null @@ -1,76 +0,0 @@ -from __future__ import annotations - -import tempfile -import unittest -from pathlib import Path - -import pytest - -from mypy import build -from mypy.options import Options -from mypyc.build import construct_groups -from mypyc.codegen import emitmodule -from mypyc.errors import Errors -from mypyc.irbuild.mapper import Mapper -from mypyc.options import CompilerOptions - - -class FakeSCC: - def __init__(self, mod_ids: list[str]) -> None: - self.mod_ids = mod_ids - - -class TestEmitModule(unittest.TestCase): - def test_compile_modules_to_ir_orders_scc_members_deterministically(self) -> None: - with tempfile.TemporaryDirectory() as tmp_dir, pytest.MonkeyPatch.context() as monkeypatch: - tmp_path = Path(tmp_dir) - a_py = tmp_path / "a.py" - b_py = tmp_path / "b.py" - a_py.write_text("import b\n\nclass A: pass\nclass C(A): pass\n", encoding="utf-8") - b_py.write_text( - "import a\n\nclass B(a.A): pass\nclass D(a.A): pass\n", encoding="utf-8" - ) - - sources = [ - build.BuildSource(str(a_py), "a", None), - build.BuildSource(str(b_py), "b", None), - ] - options = Options() - options.preserve_asts = True - options.mypy_path = [str(tmp_path)] - options.cache_dir = str(tmp_path / ".mypy_cache") - for source in sources: - options.per_module_options.setdefault(source.module, {})["mypyc"] = True - - compiler_options = CompilerOptions(strict_traceback_checks=True) - groups = construct_groups( - sources, False, use_shared_lib=True, group_name_override=None - ) - result = emitmodule.parse_and_typecheck(sources, options, compiler_options, groups) - try: - group_map = { - source.module: lib_name for group, lib_name in groups for source in group - } - children_by_order = [] - for order in (["a", "b"], ["b", "a"]): - monkeypatch.setattr( - emitmodule, - "sorted_components", - lambda graph, order=order: [FakeSCC(order)], - ) - mapper = Mapper(group_map) - errors = Errors(options) - modules = emitmodule.compile_modules_to_ir( - result, mapper, compiler_options, errors - ) - assert errors.num_errors == 0, errors.new_messages() - classes = { - cl.fullname: cl for module in modules.values() for cl in module.classes - } - children = classes["a.A"].children - assert children is not None - children_by_order.append([child.fullname for child in children]) - - assert children_by_order[1] == children_by_order[0] - finally: - result.manager.metastore.close() diff --git a/mypyc/test/test_irbuild.py b/mypyc/test/test_irbuild.py index 7e3993e267e74..f1f0ec777c3da 100644 --- a/mypyc/test/test_irbuild.py +++ b/mypyc/test/test_irbuild.py @@ -59,7 +59,6 @@ "irbuild-math.test", "irbuild-weakref.test", "irbuild-librt-strings.test", - "irbuild-librt-random.test", "irbuild-base64.test", "irbuild-time.test", "irbuild-match.test", diff --git a/mypyc/test/test_run.py b/mypyc/test/test_run.py index e7be5fcf8425a..8fb861f5c2aae 100644 --- a/mypyc/test/test_run.py +++ b/mypyc/test/test_run.py @@ -81,7 +81,6 @@ "run-librt-strings.test", "run-base64.test", "run-librt-time.test", - "run-librt-random.test", "run-match.test", "run-vecs-i64-interp.test", "run-vecs-misc-interp.test", diff --git a/pyproject.toml b/pyproject.toml index e5dd37644e2d6..9313335b0d969 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ requires = [ "mypy_extensions>=1.0.0", "pathspec>=1.0.0", "tomli>=1.1.0; python_version<'3.11'", - "librt>=0.11.0; platform_python_implementation != 'PyPy'", + "librt>=0.10.0; platform_python_implementation != 'PyPy'", # the following is from build-requirements.txt "types-psutil", "types-setuptools", @@ -46,7 +46,6 @@ classifiers = [ "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", - "Programming Language :: Python :: 3.15", "Topic :: Software Development", "Typing :: Typed", ] @@ -58,7 +57,7 @@ dependencies = [ "mypy_extensions>=1.0.0", "pathspec>=1.0.0", "tomli>=1.1.0; python_version<'3.11'", - "librt>=0.11.0; platform_python_implementation != 'PyPy'", + "librt>=0.10.0; platform_python_implementation != 'PyPy'", "ast-serialize>=0.3.0,<1.0.0", ] dynamic = ["version"] diff --git a/setup.py b/setup.py index 1879f6892ba8f..d36a6bfa2c2dc 100644 --- a/setup.py +++ b/setup.py @@ -153,7 +153,6 @@ def run(self) -> None: debug_level = os.getenv("MYPYC_DEBUG_LEVEL", "1") force_multifile = os.getenv("MYPYC_MULTI_FILE", "") == "1" log_trace = bool(int(os.getenv("MYPYC_LOG_TRACE", "0"))) - separate = os.getenv("MYPYC_SEPARATE", "") == "1" ext_modules = mypycify( mypyc_targets + ["--config-file=mypy_bootstrap.ini"], opt_level=opt_level, @@ -162,7 +161,6 @@ def run(self) -> None: # our Appveyor builds run out of memory sometimes. multi_file=sys.platform == "win32" or force_multifile, log_trace=log_trace, - separate=separate, # Mypy itself is allowed to use native_internal extension. depends_on_librt_internal=True, ) diff --git a/test-data/unit/check-async-await.test b/test-data/unit/check-async-await.test index e887b4c575528..907bf6027f657 100644 --- a/test-data/unit/check-async-await.test +++ b/test-data/unit/check-async-await.test @@ -1091,35 +1091,3 @@ class Launcher(P): # E: "list[int]" has no attribute "__aiter__" (not async iterable) [builtins fixtures/async_await.pyi] [typing fixtures/typing-async.pyi] - -[case testTypesCoroutineDecoratorUntyped] -import types - -@types.coroutine -def f(x): - yield - 1 + "" # OK, in untyped function - return 1 - -async def test() -> None: - reveal_type(f) # N: Revealed type is "def (x: Any) -> typing.AwaitableGenerator[Any, Any, Any, Any]" - reveal_type(await f(1)) # N: Revealed type is "Any" - -[builtins fixtures/async_await.pyi] -[typing fixtures/typing-async.pyi] - -[case testTypesCoroutineDecoratorPartiallyTyped] -import types - -@types.coroutine -def f(x: int): - yield - 1 + "" # E: Unsupported left operand type for + ("int") - return 1 - -async def test() -> None: - reveal_type(f) # N: Revealed type is "def (x: builtins.int) -> typing.AwaitableGenerator[Any, Any, Any, Any]" - reveal_type(await f(1)) # N: Revealed type is "Any" - -[builtins fixtures/async_await.pyi] -[typing fixtures/typing-async.pyi] diff --git a/test-data/unit/check-functions.test b/test-data/unit/check-functions.test index 893eefb36f874..bd2bf26613940 100644 --- a/test-data/unit/check-functions.test +++ b/test-data/unit/check-functions.test @@ -3814,9 +3814,9 @@ f(1, b'x', 1) main:3: error: Missing positional argument "y" in call to "f" [case testMissingPositionalArgShiftDetectedFirst] -def f(x: int, y: str, z: bytes, last: float) -> None: ... +def f(x: int, y: str, z: bytes) -> None: ... -f("hello", b'x', 1.5) +f("hello", b'x') [builtins fixtures/primitives.pyi] [out] main:3: error: Missing positional argument "x" in call to "f" @@ -3891,46 +3891,3 @@ f("hello", b'x') main:3: error: Missing positional argument "z" in call to "f" main:3: error: Argument 1 to "f" has incompatible type "str"; expected "int" main:3: error: Argument 2 to "f" has incompatible type "bytes"; expected "str" - -[case testMissingPositionalArgNamesHigherN] -# See https://github.com/python/mypy/issues/21427 -def convert2(first: int, second: str) -> None: ... - -# Possibly omitted arg, but we still issue two errors because there is only one argument -convert2("hello") # E: Missing positional argument "second" in call to "convert2" \ - # E: Argument 1 to "convert2" has incompatible type "str"; expected "int" - -# Other cases -convert2() # E: Missing positional arguments "first", "second" in call to "convert2" - -convert2("hello", 1) # E: Argument 1 to "convert2" has incompatible type "str"; expected "int" \ - # E: Argument 2 to "convert2" has incompatible type "int"; expected "str" - -def convert3(first: int, second: str, third: float) -> None: ... - -# Possibly omitted arg, but we now only issue one error -convert3("hello", 3.15) # E: Missing positional argument "first" in call to "convert3" - -# Other cases -convert3("hello") # E: Missing positional arguments "second", "third" in call to "convert3" \ - # E: Argument 1 to "convert3" has incompatible type "str"; expected "int" - -convert3(3.15, "hello") # E: Missing positional argument "third" in call to "convert3" \ - # E: Argument 1 to "convert3" has incompatible type "float"; expected "int" - -def convert4(first: int, second: str, third: float, fourth: bytes) -> None: ... - -# Possibly omitted arg, but we now only issue one error -convert4("hello", 3.15, b'') # E: Missing positional argument "first" in call to "convert4" - -# Other cases -convert4("hello") # E: Missing positional arguments "second", "third", "fourth" in call to "convert4" \ - # E: Argument 1 to "convert4" has incompatible type "str"; expected "int" - -convert4("hello", 3.15) # E: Missing positional arguments "third", "fourth" in call to "convert4" \ - # E: Argument 1 to "convert4" has incompatible type "str"; expected "int" \ - # E: Argument 2 to "convert4" has incompatible type "float"; expected "str" - -convert4(b'', "hello", 3.15) # E: Missing positional argument "fourth" in call to "convert4" \ - # E: Argument 1 to "convert4" has incompatible type "bytes"; expected "int" -[builtins fixtures/primitives.pyi] diff --git a/test-data/unit/check-generics.test b/test-data/unit/check-generics.test index b6a97c70f4950..a3a5b02d54f89 100644 --- a/test-data/unit/check-generics.test +++ b/test-data/unit/check-generics.test @@ -3542,7 +3542,7 @@ reveal_type(C.foo) # N: Revealed type is "def [T] (self: __main__.B[T`1]) -> T` reveal_type(C[int].foo) # N: Revealed type is "def (self: __main__.B[builtins.int]) -> builtins.int" reveal_type(D.foo) # N: Revealed type is "def (self: __main__.B[builtins.int]) -> builtins.int" -[case testDeterminismFromJoinOrderingInSolver1] +[case testDeterminismFromJoinOrderingInSolver] # Used to fail non-deterministically # https://github.com/python/mypy/issues/19121 from __future__ import annotations @@ -3595,46 +3595,6 @@ def draw_none( takes_int_str_none(c3) [builtins fixtures/tuple.pyi] -[case testDeterminismFromJoinOrderingInSolver2] -# Used to fail non-deterministically -# https://github.com/python/mypy/issues/21445 -from typing import Generic, Iterable, TypeVar, overload - -class A: ... - -@overload -def f0(a: A, b: object, /) -> object: ... -@overload -def f0(a: object, b: int, /) -> object: ... -def f0(a, b, /): ... - -@overload -def f1(a: int, b: object, /) -> object: ... -@overload -def f1(a: object, b: A, /) -> object: ... -def f1(a, b, /): ... - -def g(a, b, /): ... - -T = TypeVar("T") -K = TypeVar("K") -V = TypeVar("V") - -class ziplike(Generic[T]): - def __new__(cls, x: str, y: tuple[V, ...], /) -> ziplike[tuple[str, V]]: - raise - def __iter__(self) -> ziplike[T]: - return self - def __next__(self) -> T: - raise - -class dictlike(Generic[K, V]): - def __init__(self, arg: Iterable[tuple[K, V]]) -> None: pass - -x = dictlike(ziplike("012", (f0, f1, g))) -reveal_type(x) # N: Revealed type is "__main__.dictlike[builtins.str, Overload(def (Any, Any) -> Any, def (Any, Any) -> Any, def (Any, Any) -> Any, def (Any, Any) -> Any)]" -[builtins fixtures/dict.pyi] - [case testPropertyWithGenericSetter] from typing import TypeVar diff --git a/test-data/unit/check-incremental.test b/test-data/unit/check-incremental.test index db15b73419109..6911a350376f3 100644 --- a/test-data/unit/check-incremental.test +++ b/test-data/unit/check-incremental.test @@ -7995,19 +7995,3 @@ import mod [out2] main:2: error: Cannot find implementation or library stub for module named "mod" main:2: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports - -[case testIncrementalFileConfigCommentsStale] --- When a dependency changes, the importing module becomes stale and is --- reprocessed via process_stale_scc. As inline config comments are not cached --- (by design), moving the order of processing the stale SCC can accidentally --- break file config comments on subsequent runs. -# mypy: disable-error-code="import-not-found" -import nonexistent -import b -[file b.py] -x = 1 -[file b.py.2] -x = "hello" -[builtins fixtures/module.pyi] -[stale b] -[rechecked b] diff --git a/test-data/unit/check-inference.test b/test-data/unit/check-inference.test index a5b3ae7238a5a..91c1b8558c575 100644 --- a/test-data/unit/check-inference.test +++ b/test-data/unit/check-inference.test @@ -4369,22 +4369,3 @@ def g() -> None: reveal_type(x) # N: Revealed type is "None | builtins.int" x = "" # E: Incompatible types in assignment (expression has type "str", variable has type "int | None") reveal_type(x) # N: Revealed type is "None | builtins.int" - -[case testLocalPartialTypesWithGlobalInitializedToEmptyListAndRedefine1] -# flags: --allow-redefinition -a = [] # E: Need type annotation for "a" (hint: "a: list[] = ...") - -def f() -> None: - a.append(1) - -reveal_type(a) # N: Revealed type is "builtins.list[Any]" -[builtins fixtures/list.pyi] - -[case testLocalPartialTypesWithGlobalInitializedToEmptyListAndRedefine2] -# flags: --allow-redefinition -x = [] # E: Need type annotation for "x" (hint: "x: list[] = ...") - -# This used to crash. -def f() -> None: - global x - x diff --git a/test-data/unit/check-isinstance.test b/test-data/unit/check-isinstance.test index fe093220aa9da..029224e122168 100644 --- a/test-data/unit/check-isinstance.test +++ b/test-data/unit/check-isinstance.test @@ -2294,16 +2294,18 @@ def f(x: Optional[int], lst: Optional[List[int]], nested_any: List[List[Any]]) - [case testNarrowTypeAfterInTuple] # flags: --warn-unreachable +from typing import Optional class A: pass class B(A): pass class C(A): pass -def f(y: B | None): - if y in (B(), C()): - reveal_type(y) # N: Revealed type is "__main__.B" - else: - reveal_type(y) # N: Revealed type is "__main__.B | None" +y: Optional[B] +if y in (B(), C()): + reveal_type(y) # N: Revealed type is "__main__.B" +else: + reveal_type(y) # N: Revealed type is "__main__.B | None" [builtins fixtures/tuple.pyi] +[out] [case testNarrowTypeAfterInNamedTuple] # flags: --warn-unreachable diff --git a/test-data/unit/check-narrowing.test b/test-data/unit/check-narrowing.test index 944161810389b..4f23d9147205e 100644 --- a/test-data/unit/check-narrowing.test +++ b/test-data/unit/check-narrowing.test @@ -3201,7 +3201,7 @@ class X: [builtins fixtures/dict.pyi] -[case testNarrowStringInLiteralContainer] +[case testTypeNarrowingStringInLiteralContainer] # flags: --strict-equality --warn-unreachable from typing import Literal @@ -3235,69 +3235,6 @@ def narrow_set(x: str, t: set[Literal['a', 'b']]): reveal_type(x) # N: Revealed type is "builtins.str" [builtins fixtures/primitives.pyi] -[case testNarrowLiteralInLiteralContainer] -# flags: --strict-equality --warn-unreachable -from typing import Literal - -def narrow_tuple_exact(x: Literal['a', 'b', 'c'], t: tuple[Literal['a'], Literal['b']]): - if x in t: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b']" - else: - reveal_type(x) # N: Revealed type is "Literal['c']" - - if x not in t: - reveal_type(x) # N: Revealed type is "Literal['c']" - else: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b']" - -def narrow_tuple_expression(x: Literal['a', 'b', 'c']): - # TODO: this should match narrow_tuple_exact - if x in ('a', 'b'): - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b']" - else: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b'] | Literal['c']" - - if x not in ('a', 'b'): - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b'] | Literal['c']" - else: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b']" - -def narrow_tuple_union(x: Literal['a', 'b', 'c'], t: tuple[Literal['a', 'b']]): - if x in t: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b']" - else: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b'] | Literal['c']" - - if x not in t: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b'] | Literal['c']" - else: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b']" - -def narrow_tuple_with_other_type(x: Literal['a', 'b', 'c'], t: tuple[Literal['a'], int]): - if x in t: - reveal_type(x) # N: Revealed type is "Literal['a']" - else: - reveal_type(x) # N: Revealed type is "Literal['b'] | Literal['c']" - -def narrow_homo_tuple(x: Literal['a', 'b', 'c'], t: tuple[Literal['a', 'b'], ...]): - if x in t: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b']" - else: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b'] | Literal['c']" - -def narrow_list(x: Literal['a', 'b', 'c'], t: list[Literal['a', 'b']]): - if x in t: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b']" - else: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b'] | Literal['c']" - -def narrow_set(x: Literal['a', 'b', 'c'], t: set[Literal['a', 'b']]): - if x in t: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b']" - else: - reveal_type(x) # N: Revealed type is "Literal['a'] | Literal['b'] | Literal['c']" -[builtins fixtures/primitives.pyi] - [case testNarrowingLiteralInLiteralContainer] # flags: --strict-equality --warn-unreachable diff --git a/test-data/unit/check-typeddict.test b/test-data/unit/check-typeddict.test index c74ae96d7763e..622004758364b 100644 --- a/test-data/unit/check-typeddict.test +++ b/test-data/unit/check-typeddict.test @@ -235,20 +235,6 @@ reveal_type(d) # N: Revealed type is "TypedDict('__main__.D', {})" [builtins fixtures/dict.pyi] [typing fixtures/typing-typeddict.pyi] -[case testTypedDictDecoratorUndefinedNames] -from typing import TypedDict - -@abc # E: Name "abc" is not defined -@efg.hij # E: Name "efg" is not defined -@klm[nop] # E: Name "klm" is not defined \ - # E: Name "nop" is not defined -@qrs.tuv[wxy] # E: Name "qrs" is not defined \ - # E: Name "wxy" is not defined -class A(TypedDict): - x: int -[builtins fixtures/dict.pyi] -[typing fixtures/typing-typeddict.pyi] - [case testTypedDictWithClassmethodAlternativeConstructorDoesNotCrash] # https://github.com/python/mypy/issues/5653 from typing import TypedDict diff --git a/test-data/unit/check-typevar-tuple.test b/test-data/unit/check-typevar-tuple.test index 7ca21b280aad0..703653227e200 100644 --- a/test-data/unit/check-typevar-tuple.test +++ b/test-data/unit/check-typevar-tuple.test @@ -2145,13 +2145,14 @@ match(b) # E: Argument 1 to "match" has incompatible type "Bad"; expected "PC[U [builtins fixtures/tuple.pyi] [case testVariadicTupleCollectionCheck] +from typing import Tuple, Optional from typing_extensions import Unpack -allowed: tuple[int, Unpack[tuple[int, ...]]] +allowed: Tuple[int, Unpack[Tuple[int, ...]]] -def f(x: int | None): - if x in allowed: - reveal_type(x) # N: Revealed type is "builtins.int" +x: Optional[int] +if x in allowed: + reveal_type(x) # N: Revealed type is "builtins.int" [builtins fixtures/tuple.pyi] [case testJoinOfVariadicTupleCallablesNoCrash] diff --git a/test-data/unit/check-typevar-values.test b/test-data/unit/check-typevar-values.test index e0ebc6ddf13ae..72bf9f7f8083a 100644 --- a/test-data/unit/check-typevar-values.test +++ b/test-data/unit/check-typevar-values.test @@ -744,17 +744,3 @@ def fn(w: W) -> W: reveal_type(w) # N: Revealed type is "builtins.int" return w [builtins fixtures/isinstance.pyi] - -[case testTypeVarValuesSubtypeOfAll] -from typing import TypeVar - -class B: ... -class C(B): ... - -S = TypeVar("S", B, C) -b = B() -c = C() -def g(x: S = c): # OK - ... -def h(x: S = b): # E: Incompatible default for parameter "x" (default has type "B", parameter has type "S") - ... diff --git a/test-data/unit/cmdline.test b/test-data/unit/cmdline.test index cfba7a81e9285..eb8f4931fa2fe 100644 --- a/test-data/unit/cmdline.test +++ b/test-data/unit/cmdline.test @@ -363,11 +363,11 @@ mypy: error: Mypy no longer supports checking Python 2 code. Consider pinning to python_version = 3.10 [out] -[case testPythonVersionAccepted315] +[case testPythonVersionAccepted314] # cmd: mypy -c pass [file mypy.ini] \[mypy] -python_version = 3.15 +python_version = 3.14 [out] [case testPythonVersionFallback] diff --git a/test-data/unit/outputjson.test b/test-data/unit/outputjson.test index ec716d97b8f1e..89bcb99d3224c 100644 --- a/test-data/unit/outputjson.test +++ b/test-data/unit/outputjson.test @@ -43,25 +43,9 @@ bar('42') {"file": "main", "line": 14, "column": 0, "end_line": 14, "end_column": 9, "message": "No overload variant of \"foo\" matches argument type \"str\"", "hint": "Possible overload variants:\n def foo() -> None\n def foo(x: int) -> None", "code": "call-overload", "severity": "error"} {"file": "main", "line": 17, "column": 0, "end_line": 17, "end_column": 9, "message": "Too many arguments for \"bar\"", "hint": null, "code": "call-arg", "severity": "error"} -[case testOutputJsonParallel] -# flags: --output=json --num-workers=2 -def foo() -> None: - pass - -foo(1) -[out] -{"file": "main", "line": 5, "column": 0, "end_line": 5, "end_column": 6, "message": "Too many arguments for \"foo\"", "hint": null, "code": "call-arg", "severity": "error"} - [case testOutputJsonSyntaxError] # flags: --output=json klass foo [out] {"file": "main", "line": 2, "column": 7, "end_line": 2, "end_column": 8, "message": "Invalid syntax", "hint": null, "code": "syntax", "severity": "error"} !!! Mypy crashed !!! - -[case testOutputJsonSyntaxErrorParallel] -# flags: --output=json --num-workers=2 -break -[out] -{"file": "main", "line": 2, "column": 0, "end_line": 2, "end_column": 5, "message": "\"break\" outside loop", "hint": null, "code": null, "severity": "error"} -!!! Mypy crashed !!! diff --git a/test-requirements.txt b/test-requirements.txt index ec04e52e8a495..8ac31e0b34666 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,76 +5,71 @@ # pip-compile --allow-unsafe --output-file=test-requirements.txt --strip-extras test-requirements.in # ast-serialize==0.3.0 - # via -r mypy-requirements.txt -attrs==26.1.0 # via -r test-requirements.in -cfgv==3.5.0 +attrs==25.4.0 + # via -r test-requirements.in +cfgv==3.4.0 # via pre-commit -coverage==7.13.5 +coverage==7.10.7 # via pytest-cov distlib==0.4.0 # via virtualenv -execnet==2.1.2 +execnet==2.1.1 # via pytest-xdist -filelock==3.29.0 +filelock==3.20.0 # via # -r test-requirements.in - # python-discovery # virtualenv -identify==2.6.19 +identify==2.6.15 # via pre-commit -iniconfig==2.3.0 +iniconfig==2.1.0 # via pytest -librt==0.11.0 ; platform_python_implementation != "PyPy" +librt==0.10.0 ; platform_python_implementation != "PyPy" # via -r mypy-requirements.txt -lxml==6.1.0 ; python_version < "3.15" +lxml==6.0.2 ; python_version < "3.15" # via -r test-requirements.in mypy-extensions==1.1.0 # via -r mypy-requirements.txt -nodeenv==1.10.0 +nodeenv==1.9.1 # via pre-commit -packaging==26.2 +packaging==25.0 # via pytest -pathspec==1.1.1 +pathspec==1.0.0 # via -r mypy-requirements.txt -platformdirs==4.9.6 - # via - # python-discovery - # virtualenv +platformdirs==4.5.0 + # via virtualenv pluggy==1.6.0 # via # pytest # pytest-cov -pre-commit==4.6.0 +pre-commit==4.3.0 # via -r test-requirements.in -psutil==7.2.2 +psutil==7.1.0 # via -r test-requirements.in -pygments==2.20.0 +pygments==2.19.2 # via pytest -pytest==9.0.3 +pytest==8.4.2 # via # -r test-requirements.in # pytest-cov # pytest-xdist -pytest-cov==7.1.0 +pytest-cov==7.0.0 # via -r test-requirements.in pytest-xdist==3.8.0 # via -r test-requirements.in -python-discovery==1.3.0 - # via virtualenv pyyaml==6.0.3 # via pre-commit -tomli==2.4.1 +tomli==2.3.0 # via -r test-requirements.in -types-psutil==7.2.2.20260508 +types-psutil==7.0.0.20251001 # via -r build-requirements.txt -types-setuptools==82.0.0.20260508 +types-setuptools==80.9.0.20250822 # via -r build-requirements.txt typing-extensions==4.15.0 # via -r mypy-requirements.txt -virtualenv==21.3.1 +virtualenv==20.34.0 # via pre-commit # The following packages are considered to be unsafe in a requirements file: -setuptools==82.0.1 +setuptools==80.9.0 # via -r test-requirements.in diff --git a/tox.ini b/tox.ini index ab81b00d121f5..2126970afa991 100644 --- a/tox.ini +++ b/tox.ini @@ -7,7 +7,6 @@ envlist = py312, py313, py314, - py315, docs, lint, type,