diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..fac757e --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,41 @@ +name: Tests + +on: + push: + branches: [ master, main, develop ] + pull_request: + branches: [ master, main, develop ] + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest pytest-cov + pip install -e . + + - name: Download knots database + run: | + python -c "import pyknotid.catalogue; pyknotid.catalogue.download_database()" + + - name: Run tests + run: | + pytest -v --tb=short --cov=pyknotid --cov-report=xml --cov-report=term + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + file: ./coverage.xml + flags: unittests + name: codecov-umbrella + fail_ci_if_error: false diff --git a/.gitignore b/.gitignore index e956afb..44e91ad 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,28 @@ *.pyc *.so *.c -*.db \ No newline at end of file +*.db +dev/ + +# Build and distribution +*.egg-info/ +dist/ +build/ +__pycache__/ + +# Documentation +doc/_build/ +doc/_static/ + +# Testing +.pytest_cache/ +.coverage +coverage.xml +htmlcov/ + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ \ No newline at end of file diff --git a/.projectile b/.projectile deleted file mode 100644 index 02cb3ae..0000000 --- a/.projectile +++ /dev/null @@ -1,2 +0,0 @@ --/doc --*.png \ No newline at end of file diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..dc284fd --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,18 @@ +# Read the Docs configuration file for pyknotid +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: "3.8" + +sphinx: + configuration: doc/conf.py + +python: + install: + - requirements: doc/requirements.txt + - method: pip + path: . diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 2d8376f..0000000 --- a/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ - -language: python - -python: - - "2.7" - - "3.4" - - "3.5" - - "3.6" - -install: - - pip install -r requirements.txt - - pip install planarity - - python setup.py install - - python -c "from pyknotid.catalogue import download_database; download_database()" - -script: - - pytest \ No newline at end of file diff --git a/CITATION.bib b/CITATION.bib new file mode 100644 index 0000000..9129965 --- /dev/null +++ b/CITATION.bib @@ -0,0 +1,7 @@ +@Misc{pyknotid, + author = {Alexander J Taylor and other SPOCK contributors}, + title = {pyknotid knot identification toolkit}, + howpublished = {\url{https://github.com/SPOCKnots/pyknotid}}, + note = {Accessed YYYY-MM-DD}, + year = 2017, +} diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000..0d58110 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,29 @@ +cff-version: 1.2.0 +message: "If you use pyknotid in your research, please cite it as below." +title: "pyknotid knot identification toolkit" +version: 0.5.4 +date-released: 2017-01-01 +url: "https://github.com/SPOCKnots/pyknotid" +repository-code: "https://github.com/SPOCKnots/pyknotid" +license: MIT +authors: + - family-names: "Taylor" + given-names: "Alexander J" + email: "alexander.taylor@bristol.ac.uk" + - name: "SPOCK contributors" +keywords: + - knot-theory + - topology + - mathematics + - knot-identification + - linking +abstract: "Python modules for detecting and measuring knotting and linking. pyknotid can analyse space-curves, i.e. sets of points in three-dimensions, or can parse standard topological representations of knot diagrams." +preferred-citation: + type: software + title: "pyknotid knot identification toolkit" + authors: + - family-names: "Taylor" + given-names: "Alexander J" + - name: "other SPOCK contributors" + year: 2017 + url: "https://github.com/SPOCKnots/pyknotid" diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..9624d0e --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Alexander Taylor and other contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/LICENSE.txt b/LICENSE.txt deleted file mode 100644 index b348793..0000000 --- a/LICENSE.txt +++ /dev/null @@ -1,7 +0,0 @@ -Copyright 2018 Alexander Taylor and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..3bcf6d1 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +include README.md +include LICENSE.txt +include CITATION.cff +include CITATION.bib +recursive-include pyknotid *.tmpl *.pov *.py diff --git a/README.md b/README.md new file mode 100644 index 0000000..21f39d6 --- /dev/null +++ b/README.md @@ -0,0 +1,153 @@ +# Pyknotid + +[![PyPI version](https://img.shields.io/pypi/v/pyknotid.svg)](https://pypi.org/project/pyknotid/) +[![Documentation Status](https://readthedocs.org/projects/pyknotid/badge/?version=latest)](https://pyknotid.readthedocs.io/en/latest/?badge=latest) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) + +Python modules for detecting and measuring knotting and linking. pyknotid can analyse space-curves (sets of points in three dimensions) or parse standard topological representations of knot diagrams. + +

+ The knot 10_92, visualised by pyknotid +

+ +## Features + +- **Knot identification** from space curves or topological representations +- **Polynomial invariants** (Alexander, Jones, HOMFLY-PT, Kauffman) +- **Vassiliev invariants** computation +- **Knot simplification** using octree algorithms +- **Visualization** of knots in 3D +- **Database** of known knots for identification +- **High performance** with optional Numba JIT compilation (2-3x faster) + +## Installation + +pyknotid requires Python 3.8 or later. Install it with: + +```bash +pip install pyknotid +``` + +**For best performance**, install with Numba support: + +```bash +pip install pyknotid[performance] +``` + +This enables JIT-compiled high-performance numerical computations (2-3x faster). Without Numba, pyknotid will work but use slower pure Python fallbacks. + +### Development Installation + +To try the latest development version, clone this repository and run: + +```bash +git clone https://github.com/SPOCKnots/pyknotid.git +cd pyknotid +pip install -e . +``` + +For development with all optional dependencies: + +```bash +pip install -e ".[dev]" +``` + +## Quick Start + +```python +import pyknotid.spacecurves as sp +import pyknotid.make as mk + +# Create a figure-eight knot +k = sp.Knot(mk.figure_eight(num_points=100)) + +# Visualize it +k.plot() + +# Calculate the Alexander polynomial at t=-1 +k.alexander_polynomial(-1) +# Output: 6.9999999999999991 + +# Calculate the symbolic Alexander polynomial +import sympy as sym +t = sym.var('t') +k.alexander_polynomial(t) +# Output: 2/t - 3/t**2 + 2/t**3 + +# Simplify the knot representation +k.octree_simplify(5) +# Run 0 of 5, 100 points remain +# Run 1 of 5, 98 points remain +# ... +# Reduced to 77 points + +k.plot() +``` + +## Documentation + +Full documentation is available at [pyknotid.readthedocs.io](http://pyknotid.readthedocs.io). + +## Requirements + +The following dependencies are automatically installed: + +- numpy >= 1.19 +- networkx >= 2.5 +- planarity >= 0.4 +- peewee >= 3.14 +- vispy >= 0.6 +- sympy >= 1.8 +- appdirs >= 1.4 +- requests >= 2.25 +- tqdm >= 4.60 + +### Optional Dependencies + +- **numba >= 0.55** (recommended for performance): Install with `pip install pyknotid[performance]` +- **pytest, pytest-cov, black, flake8, mypy** (for development): Install with `pip install pyknotid[dev]` +- **sphinx, sphinx-rtd-theme** (for documentation): Install with `pip install pyknotid[docs]` + +## About + +pyknotid was originally developed as part of the Leverhulme Trust Research Programme Grant RP2013-K-009: Scientific Properties of Complex Knots (SPOCK), a collaboration between the University of Bristol and Durham University in the UK. + +For more information, see the [SPOCK homepage](http://www.maths.dur.ac.uk/spock/index.html/). + +## Citation + +If you use pyknotid in your research, please cite it. See [CITATION.cff](CITATION.cff) for citation information, or use: + +```bibtex +@Misc{pyknotid, + author = {Alexander J Taylor and other SPOCK contributors}, + title = {pyknotid knot identification toolkit}, + howpublished = {\url{https://github.com/SPOCKnots/pyknotid}}, + year = 2017, +} +``` + +For more details, see our [citation guidelines](http://pyknotid.readthedocs.io/en/latest/sources/about.html#cite-us). + +## License + +pyknotid is released under the [MIT License](LICENSE.txt). + +## Contributing + +Contributions are welcome! Please feel free to: + +- Report bugs or request features via [GitHub Issues](https://github.com/SPOCKnots/pyknotid/issues) +- Submit pull requests +- Improve documentation + +## Contact + +Questions or comments are welcome. Please do not hesitate to open an issue or pull request on GitHub. + +## Links + +- **Homepage**: https://github.com/SPOCKnots/pyknotid +- **Documentation**: http://pyknotid.readthedocs.io +- **PyPI**: https://pypi.org/project/pyknotid/ +- **Bug Tracker**: https://github.com/SPOCKnots/pyknotid/issues diff --git a/README.rst b/README.rst deleted file mode 100644 index bbb3935..0000000 --- a/README.rst +++ /dev/null @@ -1,106 +0,0 @@ -Pyknotid -======== - -.. image:: https://travis-ci.org/SPOCKnots/pyknotid.svg?branch=master - :target: https://travis-ci.org/SPOCKnots/pyknotid - -Python (and optional Cython) modules for detecting and measuring -knotting and linking. pyknotid can analyse space-curves, i.e. sets of -points in three-dimensions, or can parse standard topological -representations of knot diagrams. - -pyknotid is released under the `MIT license `__. - -A graphical interface to some of these tools is available online at -`Knot ID `__. - -pyknotid was originally developed as part of the Leverhulme Trust -Research Programme Grant RP2013-K-009: Scientific Properties of -Complex Knots (SPOCK), a collaboration between the University of -Bristol and Durham University in the UK. For more information, see the -`SPOCK homepage `__. - -If you use pyknotid in your research, please `cite us -`__. - -Questions or comments are welcome, please email alexander.taylor@bristol.ac.uk. - -.. image:: doc/k10_92_ideal_small.png - :align: center - :scale: 25% - :alt: The knot 10_92, visualised by pyknotid. - -Documentation -------------- - -pyknotid is documented online at `readthedocs -`__. - -Installation ------------- - -pyknotid supports both Python 2.7 and Python 3.5+, you can install it with:: - - $ pip install pyknotid - -To try the latest development version, clone this repository and run:: - - $ python setup.py install - -Requirements -~~~~~~~~~~~~ - -If installing pyknotid without pip, the following dependencies are required: - -- cython (not essential, but strongly recommended) -- numpy -- sympy -- peewee -- networkx -- planarity - -Most of these are not hard requirements, but some functionality will -not be available if they are not present. - - -Example usage -------------- - -.. code:: python - - In [1]: import pyknotid.spacecurves as sp - - In [2]: import pyknotid.make as mk - - In [3]: k = sp.Knot(mk.three_twist(num_points=100)) - - In [4]: k.plot() - - In [5]: k.alexander_polynomial(-1) - Finding crossings - i = 0 / 97 - 7 crossings found - - Simplifying: initially 14 crossings - -> 10 crossings after 2 runs - Out[5]: 6.9999999999999991 - - In [6]: import sympy as sym - - In [7]: t = sym.var('t') - - In [8]: k.alexander_polynomial(t) - Simplifying: initially 10 crossings - -> 10 crossings after 1 runs - Out[8]: 2/t - 3/t**2 + 2/t**3 - - In [9]: k.octree_simplify(5) - Run 0 of 5, 100 points remain - Run 1 of 5, 98 points remain - Run 2 of 5, 104 points remain - Run 3 of 5, 92 points remain - Run 4 of 5, 77 points remain - - Reduced to 77 points - - In [10]: k.plot() diff --git a/doc/Makefile b/doc/Makefile index 2178198..b6227e4 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -3,11 +3,11 @@ # You can set these variables from the command line. SPHINXOPTS = -SPHINXBUILD = sphinx-build2 +SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build -# User-friendly check for sphinx-build2 +# User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif diff --git a/doc/conf.py b/doc/conf.py index 7212062..d1ffa4f 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -122,7 +122,8 @@ #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] +# html_theme_path is no longer needed with modern sphinx_rtd_theme +# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". diff --git a/doc/index.rst b/doc/index.rst index 0582cde..6701abd 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,5 +1,5 @@ pyknotid -======= +======== .. image:: k10_51_ideal.png :scale: 50% @@ -30,7 +30,6 @@ Contents sources/invariants sources/representations/index sources/catalogue/index - sources/make/index sources/visualise sources/about diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000..d53954d --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,3 @@ +# Documentation build requirements for ReadTheDocs +sphinx>=4.0 +sphinx-rtd-theme diff --git a/doc/sources/overview.rst b/doc/sources/overview.rst index 02de17d..3e0ca87 100644 --- a/doc/sources/overview.rst +++ b/doc/sources/overview.rst @@ -113,8 +113,7 @@ Example knots ------------- pyknotid includes several functions for creating example knotted space -curves. See the :doc:`example knots documentation ` for -more details. +curves in the :mod:`pyknotid.make` module. Example:: diff --git a/pyknotid/catalogue/database.py b/pyknotid/catalogue/database.py index 73d8038..c0ac5a8 100644 --- a/pyknotid/catalogue/database.py +++ b/pyknotid/catalogue/database.py @@ -21,7 +21,10 @@ ''' -from peewee import * +from peewee import ( + Model, SqliteDatabase, CharField, IntegerField, + TextField, BooleanField +) import json import os from os.path import realpath, dirname, exists, join diff --git a/pyknotid/catalogue/getdb.py b/pyknotid/catalogue/getdb.py index ac29fe4..61263e3 100644 --- a/pyknotid/catalogue/getdb.py +++ b/pyknotid/catalogue/getdb.py @@ -15,7 +15,7 @@ from functools import wraps from os.path import realpath, dirname, exists, join, abspath -from os import mkdir, remove +from os import makedirs, remove def find_database(db_version=None): '''Returns the path to the knots.db file. @@ -70,7 +70,7 @@ def download_database(): ''' dirn = download_target_dir() if not exists(dirn): - mkdir(dirn) + makedirs(dirn, exist_ok=True) from pyknotid.catalogue import db_version db_name = 'knots_{}.db'.format(db_version) diff --git a/pyknotid/catalogue/improve.py b/pyknotid/catalogue/improve.py index 87203c8..f625c0e 100644 --- a/pyknotid/catalogue/improve.py +++ b/pyknotid/catalogue/improve.py @@ -13,14 +13,12 @@ ''' -from __future__ import print_function - from pyknotid.catalogue.database import Knot, db from pyknotid.catalogue.converters import homfly_to_jones, db2py_homfly, py2db_jones import csv import json -import numpy as n +import numpy as np def planar_writhe_from_dt_code(max_crossings=12): @@ -122,17 +120,17 @@ def alexander_imags_from_alexander(min_crossings=None): array = json.loads(knot.alexander) exp_2s = [(-1.)**i for i in range(len(array))] - alexander_imag_2 = int(n.round(n.abs(n.sum([ + alexander_imag_2 = int(np.round(np.abs(np.sum([ coefficient * exponential for coefficient, exponential in zip(array, exp_2s)])))) - exp_3s = [n.exp(i*2*n.pi*1j/3.) for i in range(len(array))] - alexander_imag_3 = int(n.round(n.abs(n.sum([ + exp_3s = [np.exp(i*2*np.pi*1j/3.) for i in range(len(array))] + alexander_imag_3 = int(np.round(np.abs(np.sum([ coefficient * exponential for coefficient, exponential in zip(array, exp_3s)])))) exp_4s = [(1.j)**i for i in range(len(array))] - alexander_imag_4 = int(n.round(n.abs(n.sum([ + alexander_imag_4 = int(np.round(np.abs(np.sum([ coefficient * exponential for coefficient, exponential in zip(array, exp_4s)])))) diff --git a/pyknotid/cinvariants.pyx b/pyknotid/cinvariants.pyx deleted file mode 100644 index 088219c..0000000 --- a/pyknotid/cinvariants.pyx +++ /dev/null @@ -1,71 +0,0 @@ - -import numpy as n -cimport numpy as n -cimport cython - -from pyknotid.utils import vprint - -cdef long crude_modulus(long val, long modulo): - if val < 0: - return val + modulo - return val - - -@cython.wraparound(False) -@cython.boundscheck(False) -cpdef vassiliev_degree_3(long [:, :] arrows): - cdef long num_arrows = len(arrows) - cdef long num_crossings = len(arrows) * 2 - - cdef long a1s, a1e, a2s, a2e, a3s, a3e - cdef long [:] arrow1, arrow2, arrow3 - cdef long i1, i2, i3 - cdef long sign1, sign2, sign3 - - cdef set used_sets = set() - cdef long representations_sum_1 = 0 - cdef long representations_sum_2 = 0 - cdef tuple ordered_indices - for i1 in range(num_arrows): - arrow1 = arrows[i1] - a1s = arrow1[0] - a1e = arrow1[1] - sign1 = arrow1[2] - - a1e = crude_modulus(a1e - a1s, num_crossings) - - for i2 in range(num_arrows): - arrow2 = arrows[i2] - a2s = arrow2[0] - a2e = arrow2[1] - sign2 = arrow2[2] - - a2s = crude_modulus(a2s - a1s, num_crossings) - a2e = crude_modulus(a2e - a1s, num_crossings) - - for i3 in range(num_arrows): - arrow3 = arrows[i3] - a3s = arrow3[0] - a3e = arrow3[1] - sign3 = arrow3[2] - - a3s = crude_modulus(a3s - a1s, num_crossings) - a3e = crude_modulus(a3e - a1s, num_crossings) - - ordered_indices = tuple(sorted((i1, i2, i3))) - if ordered_indices in used_sets: - continue - - if (a2s < a1e and a3e < a1e and a3e > a2s and - a3s > a1e and a2e > a3s): - representations_sum_1 += sign1 * sign2 * sign3 - used_sets.add(ordered_indices) - if (a2e < a1e and a3s < a1e and a3s > a2e and - a2s > a1e and a3e > a2s): - representations_sum_2 += sign1 * sign2 * sign3 - used_sets.add(ordered_indices) - - - return representations_sum_1 / 2. + representations_sum_2 - - diff --git a/pyknotid/cli/analyse_knot_file.py b/pyknotid/cli/analyse_knot_file.py index c4e4485..6f90413 100755 --- a/pyknotid/cli/analyse_knot_file.py +++ b/pyknotid/cli/analyse_knot_file.py @@ -1,6 +1,4 @@ -#!/usr/bin/env python2 - -from __future__ import print_function, division +#!/usr/bin/env python3 import argparse import sys diff --git a/pyknotid/cli/plot_knot.py b/pyknotid/cli/plot_knot.py index 4b565a2..d04e081 100755 --- a/pyknotid/cli/plot_knot.py +++ b/pyknotid/cli/plot_knot.py @@ -1,6 +1,4 @@ -#!/usr/bin/env python2 - -from __future__ import print_function, division +#!/usr/bin/env python3 import argparse import sys diff --git a/pyknotid/invariants.py b/pyknotid/invariants.py index 7976dbb..c5346d0 100644 --- a/pyknotid/invariants.py +++ b/pyknotid/invariants.py @@ -24,13 +24,15 @@ ----------------- ''' -from __future__ import print_function import subprocess import re import sympy as sym import numpy as np from pyknotid.utils import vprint +from pyknotid.logger import get_logger + +logger = get_logger(__name__) def alexander(representation, variable=-1, quadrant='lr', simplify=True, mode='python'): @@ -95,7 +97,7 @@ def alexander(representation, variable=-1, quadrant='lr', simplify=True, if len(code_list) == 0: return 1 elif len(code_list) > 1: - raise Exception('tried to calculate alexander polynomial' + raise ValueError('tried to calculate alexander polynomial ' 'for something with more than 1 component') crossings = code_list[0] @@ -104,7 +106,7 @@ def alexander(representation, variable=-1, quadrant='lr', simplify=True, return 1 if quadrant not in ['lr', 'ur', 'ul', 'll']: - raise Exception('invalid quadrant') + raise ValueError('invalid quadrant') mode = mode.lower() if mode == 'maxima': @@ -132,7 +134,6 @@ def _alexander_numpy(crossings, variable=-1.0, quadrant='lr'): at a float), assuming the input has been sanitised by :func:`alexander`. ''' - import numpy as n num_crossings = int(len(crossings)/2) dtype = complex if isinstance(variable, complex) else float matrix = np.zeros((num_crossings, num_crossings), dtype=dtype) @@ -287,7 +288,7 @@ def alexander_maxima(representation, quadrant='ul', verbose=False, result = subprocess.check_output( ['maxima', '-b', 'maxima_batch.maxima']) - print('Maxima output is:\n', result) + logger.info(f'Maxima computation completed, processing output ({len(result)} bytes)') result = result.decode('utf-8').split('\n')[-3][6:] t = sym.var('t') @@ -640,7 +641,7 @@ def _mathematica_matrix(cs, quadrant='lr', verbose=False): for i, crossing in enumerate(cs): if verbose and (i+1) % 100 == 0: sys.stdout.write('\ri = {0} / {1}'.format(i, len(cs))) - print('crossing is', crossing) + logger.info(f'Processing crossing {i+1}/{len(cs)}: identifier={crossing[0]}') identifier, upper, direc = crossing for entry in crossing_dict: if entry[0] == identifier: @@ -665,7 +666,7 @@ def _mathematica_matrix(cs, quadrant='lr', verbose=False): matrix_element)) spec = (crossing_num, line_num % num_crossings) if spec in written_indices: - print(spec) + logger.info(f'Duplicate matrix entry detected: crossing={crossing_num}, line={line_num % num_crossings}') else: written_indices.append((crossing_num, line_num % num_crossings)) @@ -678,7 +679,7 @@ def _mathematica_matrix(cs, quadrant='lr', verbose=False): line_num % num_crossings, '-1')) spec = (crossing_num, line_num % num_crossings) if spec in written_indices: - print(spec) + logger.info(f'Duplicate matrix entry detected: crossing={crossing_num}, line={line_num % num_crossings}') else: written_indices.append((crossing_num, line_num % num_crossings)) @@ -688,7 +689,7 @@ def _mathematica_matrix(cs, quadrant='lr', verbose=False): new_matrix_element)) spec = (crossing_num, line_num % num_crossings) if spec in written_indices: - print(spec) + logger.info(f'Duplicate matrix entry detected: crossing={crossing_num}, line={line_num % num_crossings}') else: written_indices.append((crossing_num, line_num % num_crossings)) @@ -792,7 +793,7 @@ def _maxima_matrix(cs, quadrant='lr', verbose=False): (crossing_num, line_num % num_crossings)] = matrix_element spec = (crossing_num, line_num % num_crossings) if spec in written_indices: - print(spec) + logger.info(f'Duplicate matrix entry detected: crossing={crossing_num}, line={line_num % num_crossings}') else: written_indices.append((crossing_num, line_num % num_crossings)) @@ -804,7 +805,7 @@ def _maxima_matrix(cs, quadrant='lr', verbose=False): mathmat_entries[(crossing_num, line_num % num_crossings)] = '-1' spec = (crossing_num, line_num % num_crossings) if spec in written_indices: - print(spec) + logger.info(f'Duplicate matrix entry detected: crossing={crossing_num}, line={line_num % num_crossings}') else: written_indices.append((crossing_num, line_num % num_crossings)) @@ -813,7 +814,7 @@ def _maxima_matrix(cs, quadrant='lr', verbose=False): (crossing_num, line_num % num_crossings)] = new_matrix_element spec = (crossing_num, line_num % num_crossings) if spec in written_indices: - print(spec) + logger.info(f'Duplicate matrix entry detected: crossing={crossing_num}, line={line_num % num_crossings}') else: written_indices.append((crossing_num, line_num % num_crossings)) @@ -891,7 +892,7 @@ def _cypari_matrix(cs, quadrant='lr', verbose=False): (crossing_num, line_num % num_crossings)] = matrix_element spec = (crossing_num, line_num % num_crossings) if spec in written_indices: - print(spec) + logger.info(f'Duplicate matrix entry detected: crossing={crossing_num}, line={line_num % num_crossings}') else: written_indices.append((crossing_num, line_num % num_crossings)) @@ -903,7 +904,7 @@ def _cypari_matrix(cs, quadrant='lr', verbose=False): mathmat_entries[(crossing_num, line_num % num_crossings)] = '-1' spec = (crossing_num, line_num % num_crossings) if spec in written_indices: - print(spec) + logger.info(f'Duplicate matrix entry detected: crossing={crossing_num}, line={line_num % num_crossings}') else: written_indices.append((crossing_num, line_num % num_crossings)) @@ -912,15 +913,15 @@ def _cypari_matrix(cs, quadrant='lr', verbose=False): (crossing_num, line_num % num_crossings)] = new_matrix_element spec = (crossing_num, line_num % num_crossings) if spec in written_indices: - print(spec) + logger.info(f'Duplicate matrix entry detected: crossing={crossing_num}, line={line_num % num_crossings}') else: written_indices.append((crossing_num, line_num % num_crossings)) if verbose: - print + logger.info('Matrix construction completed') - print('Warning: quadrant ignored!') + logger.info('Note: quadrant parameter is not used in this calculation mode') outstrs = ['['] num_crossings = int(len(cs) / 2) @@ -1217,8 +1218,8 @@ def _vassiliev_degree_3_python(representation): signs[i3]) used_sets.add(ordered_indices) - print() - + logger.info(f'Vassiliev degree 3 calculation completed: sum1={representations_sum_1}, sum2={representations_sum_2}') + return int(round(representations_sum_1 / 2.)) + representations_sum_2 @@ -1240,11 +1241,11 @@ def _vassiliev_degree_3_numpy(representation): gc, representation.crossing_numbers) try: - from pyknotid import cinvariants + from pyknotid import ninvariants except ImportError: - print('Failed to import cinvariants. Using *slow* python numpy method.') + logger.info('Numba extension ninvariants not available, using Python/NumPy implementation (slower performance)') else: - return int(round(cinvariants.vassiliev_degree_3(arrows))) + return int(round(ninvariants.vassiliev_degree_3(arrows))) num_crossings = len(arrows) * 2 @@ -1434,9 +1435,8 @@ def virtual_vassiliev_degree_3(representation): representations_sum -= (signs[i1] * signs[i2] * signs[i3]) used_sets.add(ordered_indices) - # print('6') - print() + logger.info(f'Virtual Vassiliev degree 3 calculation completed: result={representations_sum}') return representations_sum return int(round(representations_sum)) diff --git a/pyknotid/io.py b/pyknotid/io.py index 4d56bbb..dc6a07f 100644 --- a/pyknotid/io.py +++ b/pyknotid/io.py @@ -6,7 +6,7 @@ ''' import json -import numpy as n +import numpy as np def to_json_file(points, filen): @@ -21,7 +21,7 @@ def to_json_file(points, filen): filen : str The (relative) filename to save in ''' - points = n.array(points).tolist() + points = np.array(points).tolist() with open(filen, 'w') as fileh: json.dump(points, fileh) @@ -38,7 +38,7 @@ def from_json_file(filen): with open(filen, 'r') as fileh: points = json.load(fileh) - return n.array(points) + return np.array(points) def from_csv(filen, index_col=None, header_row=None, **kwargs): diff --git a/pyknotid/logger.py b/pyknotid/logger.py new file mode 100644 index 0000000..735c0ba --- /dev/null +++ b/pyknotid/logger.py @@ -0,0 +1,145 @@ +""" +Logging configuration for pyknotid. + +This module provides a centralized logging setup for the pyknotid package. +Use this instead of print statements for debug and informational output. + +Example usage: + from pyknotid.logger import get_logger + + logger = get_logger(__name__) + logger.debug("Debug information") + logger.info("Processing knot...") + logger.warning("Potential issue detected") + logger.error("Error occurred") +""" + +import logging +import sys +from typing import Optional + +# Default log format +DEFAULT_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' +SIMPLE_FORMAT = '%(levelname)s: %(message)s' + +# Global logger cache +_loggers = {} + + +def get_logger(name: str = 'pyknotid', level: Optional[int] = None) -> logging.Logger: + """ + Get or create a logger with the specified name. + + Parameters + ---------- + name : str + The name of the logger, typically __name__ from the calling module + level : int, optional + The logging level (e.g., logging.DEBUG, logging.INFO) + If None, uses the environment variable PYKNOTID_LOG_LEVEL or INFO + + Returns + ------- + logging.Logger + Configured logger instance + """ + if name in _loggers: + return _loggers[name] + + logger = logging.getLogger(name) + + # Only configure if no handlers exist + if not logger.handlers: + # Determine log level + if level is None: + import os + level_name = os.environ.get('PYKNOTID_LOG_LEVEL', 'INFO') + level = getattr(logging, level_name.upper(), logging.INFO) + + logger.setLevel(level) + + # Create console handler + handler = logging.StreamHandler(sys.stdout) + handler.setLevel(level) + + # Create formatter + formatter = logging.Formatter(SIMPLE_FORMAT) + handler.setFormatter(formatter) + + # Add handler to logger + logger.addHandler(handler) + + _loggers[name] = logger + return logger + + +def set_log_level(level: int, logger_name: Optional[str] = None): + """ + Set the log level for a specific logger or all pyknotid loggers. + + Parameters + ---------- + level : int + The logging level (e.g., logging.DEBUG, logging.INFO) + logger_name : str, optional + The name of the logger to configure. If None, sets level for all + cached loggers + """ + if logger_name: + logger = logging.getLogger(logger_name) + logger.setLevel(level) + for handler in logger.handlers: + handler.setLevel(level) + else: + # Set level for all cached loggers + for logger in _loggers.values(): + logger.setLevel(level) + for handler in logger.handlers: + handler.setLevel(level) + + +def configure_logger( + name: str = 'pyknotid', + level: int = logging.INFO, + format_string: str = SIMPLE_FORMAT, + stream=sys.stdout +) -> logging.Logger: + """ + Configure a logger with custom settings. + + Parameters + ---------- + name : str + The name of the logger + level : int + The logging level + format_string : str + The format string for log messages + stream : file-like + The output stream (default: sys.stdout) + + Returns + ------- + logging.Logger + Configured logger instance + """ + logger = logging.getLogger(name) + logger.setLevel(level) + + # Remove existing handlers + for handler in logger.handlers[:]: + logger.removeHandler(handler) + + # Add new handler + handler = logging.StreamHandler(stream) + handler.setLevel(level) + formatter = logging.Formatter(format_string) + handler.setFormatter(formatter) + logger.addHandler(handler) + + _loggers[name] = logger + return logger + + +# Create a default logger for convenience +default_logger = get_logger('pyknotid') diff --git a/pyknotid/make/__init__.py b/pyknotid/make/__init__.py index 481b711..89d63f4 100644 --- a/pyknotid/make/__init__.py +++ b/pyknotid/make/__init__.py @@ -11,4 +11,13 @@ # from pyknotid.make.ideal import available_ideal_knots, ideal_knot from pyknotid.make.torus import torus_knot, torus_link -from pyknotid.make.named import * +from pyknotid.make.named import ( + unknot, k3_1, trefoil, k4_1, figure_eight, + lissajous, k5_2, k6_1, k3_1_composite_3_1, k8_21 +) + +__all__ = [ + 'torus_knot', 'torus_link', + 'unknot', 'k3_1', 'trefoil', 'k4_1', 'figure_eight', + 'lissajous', 'k5_2', 'k6_1', 'k3_1_composite_3_1', 'k8_21' +] diff --git a/pyknotid/make/named.py b/pyknotid/make/named.py index c836108..2b68053 100644 --- a/pyknotid/make/named.py +++ b/pyknotid/make/named.py @@ -13,38 +13,42 @@ ''' -import numpy as n import numpy as np from pyknotid.spacecurves.knot import Knot +__all__ = [ + 'unknot', 'k3_1', 'trefoil', 'k4_1', 'figure_eight', + 'lissajous', 'k5_2', 'k6_1', 'k3_1_composite_3_1', 'k8_21' +] + def unknot(num_points=100): '''Returns a simple circle.''' - data = n.zeros((num_points, 3), dtype=n.float64) - ts = n.linspace(0, 2*n.pi, num_points) - data[:, 0] = 3*n.sin(ts) - data[:, 1] = 3*n.cos(ts) - data[:, 2] = n.sin(3*ts) + data = np.zeros((num_points, 3), dtype=np.float64) + ts = np.linspace(0, 2*np.pi, num_points) + data[:, 0] = 3*np.sin(ts) + data[:, 1] = 3*np.cos(ts) + data[:, 2] = np.sin(3*ts) return Knot(data) def k3_1(num_points=100): '''Returns a particular trefoil knot conformation.''' - data = n.zeros((num_points, 3), dtype=n.float64) - ts = n.linspace(0, 2*n.pi, num_points) - data[:, 0] = (2+n.cos(3*ts))*n.cos(2*ts) - data[:, 1] = (2+n.cos(3*ts))*n.sin(2*ts) - data[:, 2] = n.sin(3*ts) + data = np.zeros((num_points, 3), dtype=np.float64) + ts = np.linspace(0, 2*np.pi, num_points) + data[:, 0] = (2+np.cos(3*ts))*np.cos(2*ts) + data[:, 1] = (2+np.cos(3*ts))*np.sin(2*ts) + data[:, 2] = np.sin(3*ts) return Knot(data) trefoil = k3_1 def k4_1(num_points=100): '''Returns a particular figure eight knot conformation.''' - data = n.zeros((num_points, 3), dtype=n.float64) - ts = n.linspace(0, 2*n.pi, num_points) - data[:, 0] = (2+n.cos(2*ts))*n.cos(3*ts) - data[:, 1] = (2+n.cos(2*ts))*n.sin(3*ts) - data[:, 2] = n.sin(4*ts) + data = np.zeros((num_points, 3), dtype=np.float64) + ts = np.linspace(0, 2*np.pi, num_points) + data[:, 0] = (2+np.cos(2*ts))*np.cos(3*ts) + data[:, 1] = (2+np.cos(2*ts))*np.sin(3*ts) + data[:, 2] = np.sin(4*ts) return Knot(data) figure_eight = k4_1 @@ -52,11 +56,11 @@ def lissajous(nx=3, ny=2, nz=7, px=0.7, py=0.2, pz=0., num_points=100): '''Returns a `Lissajous knot `__ with the given parameters.''' - data = n.zeros((num_points, 3), dtype=n.float64) - ts = n.linspace(0, 2*n.pi, num_points) - data[:, 0] = n.cos(nx*ts+px) - data[:, 1] = n.cos(ny*ts+py) - data[:, 2] = n.cos(nz*ts+pz) + data = np.zeros((num_points, 3), dtype=np.float64) + ts = np.linspace(0, 2*np.pi, num_points) + data[:, 0] = np.cos(nx*ts+px) + data[:, 1] = np.cos(ny*ts+py) + data[:, 2] = np.cos(nz*ts+pz) return Knot(data) diff --git a/pyknotid/make/randomwalks/quaternionic.py b/pyknotid/make/randomwalks/quaternionic.py index d3d2a03..f896cde 100644 --- a/pyknotid/make/randomwalks/quaternionic.py +++ b/pyknotid/make/randomwalks/quaternionic.py @@ -5,7 +5,7 @@ [1] These polygons are generated following the algorithms of (Cantarella, Deguchi and Shonkwiler. "Probability Theory of Random Polygons from the Quaternionic Viewpoint". Communications on Pure and Applied Mathematics 67 (2014).) ''' -import numpy as n +import numpy as np def get_closed_loop(length, seed=0, normalisation=7.5): ''' @@ -52,22 +52,22 @@ def get_open_by_distance_line(length, distance=0., seed=0, normalisation=7.5): def open_line_segments(num, seed=0): if seed == 0: - seed = n.random.randint(1000000) + seed = np.random.randint(1000000) - generator = n.random.RandomState() + generator = np.random.RandomState() generator.seed(seed) # wjs is a vector uniformly distributed on the (4*num)-sphere # Such a vector is given by (4*num) normally distributed numbers in a # normalised vector wjs = generator.normal(size=4 * num) - normfac = 1./n.sqrt(n.sum(wjs*wjs)) + normfac = 1./np.sqrt(np.sum(wjs*wjs)) wjs *= normfac # Redistribute wjs as sets of 4-points points = wjs.reshape((num, 4)) - edges = n.zeros((num, 3)) + edges = np.zeros((num, 3)) # Create each edge as a transformation of the 4-points edges[:, 0] = (points[:, 0]**2 + points[:, 1]**2 - points[:, 2]**2 - @@ -81,21 +81,21 @@ def open_line_segments(num, seed=0): def closed_loop_segments(num, seed=0): if seed == 0: - seed = n.random.randint(1000000) + seed = np.random.randint(1000000) - generator = n.random.RandomState() + generator = np.random.RandomState() generator.seed(seed) uu = generator.normal(size=num) + 1j*generator.normal(size=num) vv = generator.normal(size=num) + 1j*generator.normal(size=num) - u = uu / n.sqrt(uu.dot(uu.conj())) + u = uu / np.sqrt(uu.dot(uu.conj())) uconj = u.conj() vc = vv - u.conj().dot(vv)*u - v = vc / n.sqrt(vc.dot(vc.conj())) + v = vc / np.sqrt(vc.dot(vc.conj())) vconj = v.conj() - edges = n.zeros((num, 3),dtype=float) + edges = np.zeros((num, 3),dtype=float) edges[:, 0] = (u * uconj - v * vconj).real edges[:, 1] = (-1j * (u * vconj - v * uconj)).real @@ -117,43 +117,43 @@ def open_by_distance_segments(num, distance=0., seed=0): The random seed. ''' if seed == 0: - seed = n.random.randint(1000000) + seed = np.random.randint(1000000) if distance > 1. or distance < 0.: raise ValueError('distance must be between 0 and 1') distance *= 2. - generator = n.random.RandomState() + generator = np.random.RandomState() generator.seed(seed) uu = generator.normal(size=num) + 1j*generator.normal(size=num) vv = generator.normal(size=num) + 1j*generator.normal(size=num) - u = uu / n.sqrt(uu.dot(uu.conj())) + u = uu / np.sqrt(uu.dot(uu.conj())) uconj = u.conj() vc = vv - u.conj().dot(vv)*u - v = vc / n.sqrt(vc.dot(vc.conj())) + v = vc / np.sqrt(vc.dot(vc.conj())) vconj = v.conj() - # modu = n.sqrt(n.sum(u**2 + uconj**2)) + # modu = np.sqrt(np.sum(u**2 + uconj**2)) # print 'modu is', modu # u /= modu # uconj /= modu - u *= n.sqrt(1 + distance / 2.) - uconj *= n.sqrt(1 + distance / 2.) + u *= np.sqrt(1 + distance / 2.) + uconj *= np.sqrt(1 + distance / 2.) - # modv = n.sqrt(n.sum(v**2 + vconj**2)) + # modv = np.sqrt(np.sum(v**2 + vconj**2)) # print 'modv is', modv # v /= modv # vconj /= modv - v *= n.sqrt(1 - distance / 2.) - vconj *= n.sqrt(1 - distance / 2.) + v *= np.sqrt(1 - distance / 2.) + vconj *= np.sqrt(1 - distance / 2.) - # print 'mod u', n.sqrt(n.sum(u**2 + uconj**2)), n.sqrt(1 + distance/2.) - # print 'mod v', n.sqrt(n.sum(v**2 + vconj**2)), n.sqrt(1 - distance/2.) + # print 'mod u', np.sqrt(np.sum(u**2 + uconj**2)), np.sqrt(1 + distance/2.) + # print 'mod v', np.sqrt(np.sum(v**2 + vconj**2)), np.sqrt(1 - distance/2.) - edges = n.zeros((num, 3),dtype=float) + edges = np.zeros((num, 3),dtype=float) edges[:, 0] = (u * uconj - v * vconj).real edges[:, 1] = (-1j * (u * vconj - v * uconj)).real @@ -163,7 +163,7 @@ def open_by_distance_segments(num, distance=0., seed=0): def edges_to_path(edges): num = len(edges) - path = n.zeros((num + 1, 3)) + path = np.zeros((num + 1, 3)) for i in range(num): path[i+1] = path[i] + edges[i] return path diff --git a/pyknotid/make/torus.py b/pyknotid/make/torus.py index bcfc3c4..f7e84a5 100644 --- a/pyknotid/make/torus.py +++ b/pyknotid/make/torus.py @@ -8,14 +8,12 @@ ----------------- ''' -from __future__ import division - import numpy as np try: from fractions import gcd -except: - from fractions import math +except ImportError: + import math gcd = math.gcd from pyknotid.spacecurves.knot import Knot @@ -60,7 +58,7 @@ def torus_link(p=3, q=6, num=100): return Link(TorusKnot(p, q, num).components) -class TorusKnot(object): +class TorusKnot: """Representation of a torus knot or link. A torus knot is one that can be drawn on the surface of a diff --git a/pyknotid/ninvariants.py b/pyknotid/ninvariants.py new file mode 100644 index 0000000..f10fe86 --- /dev/null +++ b/pyknotid/ninvariants.py @@ -0,0 +1,106 @@ +""" +Numba implementation of invariants (replacing cinvariants.pyx). + +This module requires numba for performance. If numba is not available, +import will fail and pure Python fallbacks will be used. +""" +import numpy as np + +try: + import numba +except ImportError: + raise ImportError("numba is required for ninvariants. Install with: pip install numba") + + +@numba.jit(nopython=True) +def _crude_modulus(val, modulo): + """Helper function for modulus operation.""" + if val < 0: + return val + modulo + return val + +@numba.jit(nopython=True) +def _vassiliev_degree_3_inner(arrows): + """ + Inner computation for vassiliev_degree_3 (JIT compiled). + + Note: Uses a simple array-based approach instead of Python sets + to track used index combinations in nopython mode. + """ + num_arrows = len(arrows) + num_crossings = len(arrows) * 2 + + representations_sum_1 = 0 + representations_sum_2 = 0 + + # Track used combinations with a fixed-size array + # We use a simple approach: mark combinations by a unique index + # Max possible combinations is num_arrows^3, but we only track what we've seen + max_combinations = num_arrows * num_arrows * num_arrows + used_combinations = np.zeros(max_combinations, dtype=np.int8) + + for i1 in range(num_arrows): + arrow1 = arrows[i1] + a1s = arrow1[0] + a1e = arrow1[1] + sign1 = arrow1[2] + + a1e = _crude_modulus(a1e - a1s, num_crossings) + + for i2 in range(num_arrows): + arrow2 = arrows[i2] + a2s = arrow2[0] + a2e = arrow2[1] + sign2 = arrow2[2] + + a2s = _crude_modulus(a2s - a1s, num_crossings) + a2e = _crude_modulus(a2e - a1s, num_crossings) + + for i3 in range(num_arrows): + arrow3 = arrows[i3] + a3s = arrow3[0] + a3e = arrow3[1] + sign3 = arrow3[2] + + a3s = _crude_modulus(a3s - a1s, num_crossings) + a3e = _crude_modulus(a3e - a1s, num_crossings) + + # Create ordered indices (sorted tuple equivalent) + indices = np.array([i1, i2, i3], dtype=np.int64) + indices.sort() + + # Map to unique index for tracking + combo_index = indices[0] * num_arrows * num_arrows + indices[1] * num_arrows + indices[2] + + if used_combinations[combo_index]: + continue + + if (a2s < a1e and a3e < a1e and a3e > a2s and + a3s > a1e and a2e > a3s): + representations_sum_1 += sign1 * sign2 * sign3 + used_combinations[combo_index] = 1 + if (a2e < a1e and a3s < a1e and a3s > a2e and + a2s > a1e and a3e > a2s): + representations_sum_2 += sign1 * sign2 * sign3 + used_combinations[combo_index] = 1 + + return representations_sum_1 / 2. + representations_sum_2 + + +def vassiliev_degree_3(arrows): + """ + Calculate the Vassiliev invariant of degree 3. + + Now uses Numba for performance instead of Cython. + + Parameters + ---------- + arrows : ndarray + Array of arrow data with shape (n, 3) where each row is [start, end, sign] + + Returns + ------- + float + The Vassiliev degree 3 invariant value + """ + return _vassiliev_degree_3_inner(arrows) diff --git a/pyknotid/representations/dtnotation.py b/pyknotid/representations/dtnotation.py index c36922d..a20d327 100644 --- a/pyknotid/representations/dtnotation.py +++ b/pyknotid/representations/dtnotation.py @@ -18,7 +18,7 @@ else: string_types = str -class DTNotation(object): +class DTNotation: ''' Class for containing and manipulation DT notation. diff --git a/pyknotid/representations/gausscode.py b/pyknotid/representations/gausscode.py index cdcc78f..7799143 100644 --- a/pyknotid/representations/gausscode.py +++ b/pyknotid/representations/gausscode.py @@ -11,13 +11,12 @@ ~~~~~~~~~~~~~~~~~ ''' -from __future__ import print_function -import numpy as n +import numpy as np import re import sys -class GaussCode(object): +class GaussCode: '''Class for containing and manipulating Gauss codes. By default you must pass an extended Gauss code that includes the @@ -57,7 +56,7 @@ def __init__(self, crossings='', verbose=True): raise NotImplementedError( 'planar diagram -> gauss code not implemented') else: - if isinstance(crossings, n.ndarray): + if isinstance(crossings, np.ndarray): crossings = [crossings] self._init_from_raw_crossings_array(crossings) @@ -108,7 +107,7 @@ def calculating_orientations(cls, code): def contains_virtual(self): for row in self._gauss_code: - if n.any(row[:, 0] < 0): + if np.any(row[:, 0] < 0): return True return False @@ -120,8 +119,8 @@ def without_virtual(self): new_rows = [] for row in gc._gauss_code: - keep = n.ones(len(row), dtype=n.bool) - virtual_cs = n.argwhere(row[:, 0] < 0) + keep = np.ones(len(row), dtype=np.bool) + virtual_cs = np.argwhere(row[:, 0] < 0) indices = virtual_cs.flatten() for index in indices: keep[index] = False @@ -161,7 +160,7 @@ def _init_from_raw_crossings_array(self, crossings): else: index = assigned_indices.pop(ident) line_gauss_code.append([index, over, clockwise]) - gauss_code.append(n.array(line_gauss_code)) + gauss_code.append(np.array(line_gauss_code)) self._gauss_code = gauss_code @@ -183,7 +182,7 @@ def _init_from_string(self, crossings): line_gauss_code.append([int(line_crossing[:-2]), over_under[line_crossing[-2]], signs[line_crossing[-1]]]) - gauss_code.append(n.array(line_gauss_code)) + gauss_code.append(np.array(line_gauss_code)) self._gauss_code = gauss_code @@ -236,7 +235,7 @@ def _do_reidemeister_moves(self, one=True, two=True, one_extended=True): # These lists keep track of which crossings have been removed, to # avoid modifying the arrays every time an RM is performed - keeps = [n.ones(l.shape[0], dtype=bool) for l in code] + keeps = [np.ones(l.shape[0], dtype=bool) for l in code] # First do RM1 and RM2 for line_index, line in enumerate(code): @@ -273,7 +272,7 @@ def _do_reidemeister_moves(self, one=True, two=True, one_extended=True): # Do extended RM1 as a separate step print code = [(line[keep] if len(line) > 0 else line) for (line, keep) in zip(code, keeps)] - keeps = [n.ones(l.shape[0], dtype=bool) for l in code] + keeps = [np.ones(l.shape[0], dtype=bool) for l in code] crossing_indices = {} for line_index, line in enumerate(code): @@ -302,7 +301,7 @@ def _do_reidemeister_moves(self, one=True, two=True, one_extended=True): if len(in_between) > 0: in_between = in_between[in_between_keeps] - if n.abs(n.sum(in_between[:, 1])) == len(in_between): + if np.abs(np.sum(in_between[:, 1])) == len(in_between): # all crossings over or under keeps[line_index][first_index] = False keeps[line_index][second_index] = False @@ -318,14 +317,14 @@ def _do_reidemeister_moves(self, one=True, two=True, one_extended=True): # Second, wrap around the list if a big rm1 wasn't performed if number not in crossing_numbers: continue - in_between = n.vstack((code[line_index][second_index+1:], + in_between = np.vstack((code[line_index][second_index+1:], code[line_index][:first_index])) - in_between_keeps = n.hstack((keeps[line_index][second_index+1:], + in_between_keeps = np.hstack((keeps[line_index][second_index+1:], keeps[line_index][:first_index])) if len(in_between) > 0: in_between = in_between[in_between_keeps] - if n.abs(n.sum(in_between[:, 1])) == len(in_between): + if np.abs(np.sum(in_between[:, 1])) == len(in_between): keeps[line_index][first_index] = False keeps[line_index][second_index] = False for entry in in_between: @@ -366,19 +365,19 @@ def simplify(self, one=True, two=True, one_extended=True): if self.verbose: print('Simplifying: initially {} crossings'.format( - n.sum([len(line) for line in self._gauss_code]))) + np.sum([len(line) for line in self._gauss_code]))) number_of_runs = 0 while True: original_gc = self._gauss_code - original_len = n.sum([len(line) for line in original_gc]) + original_len = np.sum([len(line) for line in original_gc]) self._do_reidemeister_moves(one, two) new_gc = self._gauss_code - new_len = n.sum([len(line) for line in new_gc]) + new_len = np.sum([len(line) for line in new_gc]) number_of_runs += 1 if self.verbose: sys.stdout.write('\r-> {} crossings after {} runs'.format( - n.sum([len(line) for line in new_gc]), number_of_runs)) + np.sum([len(line) for line in new_gc]), number_of_runs)) sys.stdout.flush() if new_len == original_len: break diff --git a/pyknotid/representations/gaussdiagram.py b/pyknotid/representations/gaussdiagram.py index e3819f3..55c676b 100644 --- a/pyknotid/representations/gaussdiagram.py +++ b/pyknotid/representations/gaussdiagram.py @@ -14,11 +14,10 @@ ~~~~~~~~~~~~~~~~~ ''' -from __future__ import print_function -import numpy as n +import numpy as np -class GaussDiagram(object): +class GaussDiagram: ''' Class for containing and manipulating Gauss diagrams. @@ -65,20 +64,20 @@ def plot(self, fig_ax=None): gc = self._gauss_code[0] num_points = len(gc) - angles = n.linspace(0, 2*n.pi, num_points+1)[:-1] + angles = np.linspace(0, 2*np.pi, num_points+1)[:-1] other_angles = {} for i, angle in enumerate(angles): gc_entry = gc[i, 0] - circ_pos = 100*n.array([n.cos(angle), n.sin(angle)]) + circ_pos = 100*np.array([np.cos(angle), np.sin(angle)]) rad_pos = 1.2*circ_pos ax.annotate(str(gc_entry), xy=circ_pos, xytext=rad_pos) if gc_entry in other_angles: other_angle = other_angles.pop(gc_entry) - other_pos = 100*n.array([n.cos(other_angle), - n.sin(other_angle)]) + other_pos = 100*np.array([np.cos(other_angle), + np.sin(other_angle)]) over = gc[i, 1] sign = gc[i, 2] @@ -94,7 +93,7 @@ def plot(self, fig_ax=None): ax.add_artist(c1) - ax.plot(100*n.cos(angles), 100*n.sin(angles), 'o', markersize=8, + ax.plot(100*np.cos(angles), 100*np.sin(angles), 'o', markersize=8, color='black') ax.set_xlim(-140, 140) diff --git a/pyknotid/representations/planardiagram.py b/pyknotid/representations/planardiagram.py index 66a87f8..6935fd1 100644 --- a/pyknotid/representations/planardiagram.py +++ b/pyknotid/representations/planardiagram.py @@ -10,9 +10,7 @@ ~~~~~~~~~~~~~~~~~ ''' -from __future__ import print_function - -import numpy as n +import numpy as np class PlanarDiagram(list): @@ -323,7 +321,7 @@ def is_outgoing(self, index): def shorthand_to_crossings(s): ''' Takes a planar diagram shorthand string, and returns a list of - :class:`Crossing`s. + :class:`Crossing` objects. ''' crossings = [] cs = s.split(' ') @@ -344,7 +342,7 @@ def gausscode_to_crossings(gc): incomplete_crossings = {} line_lengths = [len(line) for line in cl] total_lines = sum(line_lengths) - line_indices = [1] + list(n.cumsum(line_lengths)[:-1] + 1) + line_indices = [1] + list(np.cumsum(line_lengths)[:-1] + 1) curline = 1 for i, line in enumerate(cl): diff --git a/pyknotid/representations/representation.py b/pyknotid/representations/representation.py index 229b483..4a0eb9a 100644 --- a/pyknotid/representations/representation.py +++ b/pyknotid/representations/representation.py @@ -9,7 +9,6 @@ ~~~~~~~~~~~~~~~~~ ''' -from __future__ import print_function, division from pyknotid.representations.gausscode import GaussCode from collections import defaultdict import numpy as np @@ -530,7 +529,7 @@ def space_curve(self, **kwargs): return k -class CrossingLine(object): +class CrossingLine: def __init__(self, start, end, points): self.start = start self.end = end diff --git a/pyknotid/simplify/coctree.pyx b/pyknotid/simplify/coctree.pyx deleted file mode 100644 index e65fd1e..0000000 --- a/pyknotid/simplify/coctree.pyx +++ /dev/null @@ -1,233 +0,0 @@ -''' -Cython functions for octree calculations. -''' - - -import numpy as n -cimport numpy as n - -cimport cython - -from libc.math cimport abs, pow, sqrt as csqrt, floor, acos - -cpdef angle_exceeds(double [:, :] ps, double val=2*n.pi, - long include_closure=1): - '''Returns True if the sum of angles along ps exceeds - val, else False. - - If include_closure, includes the angles with the line closing - the end and start points. - ''' - cdef double angle = 0. - cdef double [:] nex = ps[0] - cdef double [:] nex2 = ps[1] - cdef double [:] dv2 = n.zeros(3, dtype=n.double) - diff(dv2, nex, nex2) - divide(dv2, mag(dv2)) - cdef double [:] cur - cdef double increment - cdef long lenps = len(ps) - cdef long [:] checks = n.arange(len(ps)) if include_closure else n.arange(len(ps)-2) - cdef int i - for i in checks: - cur = nex - nex = nex2 - nex2 = ps[(i+2) % lenps] - dv = dv2 - diff(dv2, nex, nex2) - divide(dv2, mag(dv2)) - increment = angle_between(dv, dv2) - if n.isnan(increment): - return True - angle += increment - if angle > val: - return True - assert not n.isnan(angle) - return False - -cdef void diff(double [:] dv2, double [:] nex, double [:] nex2): - dv2[0] = nex2[0] - nex[0] - dv2[1] = nex2[1] - nex[1] - dv2[2] = nex2[2] - nex[2] - -cdef double angle_between(double [:] v1, double [:] v2): - '''Returns angle between v1 and v2, assuming they are normalised to 1.''' - # clip becaus v1.dot(v2) may exceed 1 due to floating point - cdef double value = v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2] - if value > 1.: - value = 1. - elif value < 0.: - value = 0. - return value - -cdef void divide(double [:] arr, double val): - arr[0] = arr[0] / val - arr[1] = arr[1] / val - arr[2] = arr[2] / val - -cdef void multiply(double [:] arr, double val): - arr[0] = arr[0] * val - arr[1] = arr[1] * val - arr[2] = arr[2] * val - -cdef double mag(double [:] v): - return pow(v[0], 2) + pow(v[1], 2) + pow(v[2], 2) - - -#@cython.boundscheck(False) -#@cython.wraparound(False) -cpdef line_to_segments(line, cuts=None, join_ends=True): - '''Takes a line (set of points), a list of cut planes in - x, y, z, and a parameter to decide whether the line - joining the first and last point should also be cut. - - Returns a list of shorter lines resulting from cutting at - all these cut planes.''' - - cdef double [:, :] cy_line = line - - cdef double cut_x, cut_y, cut_z - if cuts is None: - xmin, ymin, zmin = n.min(line, axis=0) - 1 - xmax, ymax, zmax = n.max(line, axis=0) + 1 - cut_x = (xmax + xmin) / 2. - cut_y = (ymax + ymin) / 2. - cut_z = (zmin + zmax) / 2. - else: - cut_x, cut_y, cut_z = cuts - - - cdef double [:] cy_dv = n.zeros(3, dtype=n.double) - cdef double [:] cy_nex - cdef double [:] cy_cur - cdef double dx, dy, dz - cdef double x_cut_pos, y_cut_pos, z_cut_pos - - # Cut the line wherever it passes through a quad cell boundary - cdef list segments = [] - cdef long cut_i = 0 - cdef long i - for i in range(len(line)-1): - cy_cur = cy_line[i] - cy_nex = cy_line[i+1] - - diff(cy_dv, cy_cur, cy_nex) - dx = cy_dv[0] - dy = cy_dv[1] - dz = cy_dv[2] - - cross_cut_x = sign(cy_cur[0] - cut_x) != sign(cy_nex[0] - cut_x) - cross_cut_y = sign(cy_cur[1] - cut_y) != sign(cy_nex[1] - cut_y) - cross_cut_z = sign(cy_cur[2] - cut_z) != sign(cy_nex[2] - cut_z) - - if (not cross_cut_x and not cross_cut_y and not cross_cut_z): - continue - - cur = line[i] - nex = line[i+1] - dv = cur - nex - cur = line[i] - nex = line[i+1] - - if cross_cut_x and cross_cut_y and cross_cut_z: - x_cut_pos = -1 * (cur[0]-cut_x)/dx - y_cut_pos = -1 * (cur[1]-cut_y)/dy - z_cut_pos = -1 * (cur[2]-cut_z)/dz - order = n.sort((x_cut_pos, y_cut_pos, z_cut_pos)) - # assert 0 < x_cut_pos < 1 and 0 < y_cut_pos < 1 and 0 < z_cut_pos < 1 - join_point_1 = cur + order[0]*dv - join_point_2 = cur + order[1]*dv - join_point_3 = cur + order[2]*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point_1)) - second_seg = n.vstack((join_point_1, join_point_2)) - third_seg = n.vstack((join_point_2, join_point_3)) - line[i] = join_point_3 - cut_i = i - segments.append(first_seg) - segments.append(second_seg) - segments.append(third_seg) - elif cross_cut_x and cross_cut_y: - x_cut_pos = -1 * (cur[0]-cut_x)/dx - y_cut_pos = -1 * (cur[1]-cut_y)/dy - order = n.sort((x_cut_pos, y_cut_pos)) - join_point_1 = cur + order[0]*dv - join_point_2 = cur + order[1]*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point_1)) - second_seg = n.vstack((join_point_1, join_point_2)) - line[i] = join_point_2 - cut_i = i - segments.append(first_seg) - segments.append(second_seg) - elif cross_cut_x and cross_cut_z: - x_cut_pos = -1 * (cur[0]-cut_x)/dx - z_cut_pos = -1 * (cur[2]-cut_z)/dz - order = n.sort((x_cut_pos, z_cut_pos)) - join_point_1 = cur + order[0]*dv - join_point_2 = cur + order[1]*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point_1)) - second_seg = n.vstack((join_point_1, join_point_2)) - line[i] = join_point_2 - cut_i = i - segments.append(first_seg) - segments.append(second_seg) - elif cross_cut_y and cross_cut_z: - y_cut_pos = -1 * (cur[1]-cut_y)/dy - z_cut_pos = -1 * (cur[2]-cut_z)/dz - order = n.sort((y_cut_pos, z_cut_pos)) - join_point_1 = cur + order[0]*dv - join_point_2 = cur + order[1]*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point_1)) - second_seg = n.vstack((join_point_1, join_point_2)) - line[i] = join_point_2 - cut_i = i - segments.append(first_seg) - segments.append(second_seg) - elif cross_cut_x: - cut_pos = -1 * (cur[0]-cut_x)/dx - assert 0. <= cut_pos <= 1. - join_point = cur + cut_pos*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point)) - line[i] = join_point - cut_i = i - segments.append(first_seg) - elif cross_cut_y: - cut_pos = -1 * (cur[1]-cut_y)/dy - assert 0. <= cut_pos <= 1. - join_point = cur + cut_pos*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point)) - line[i] = join_point - cut_i = i - segments.append(first_seg) - elif cross_cut_z: - cut_pos = -1 * (cur[2]-cut_z)/dz - assert 0. <= cut_pos <= 1. - join_point = cur + cut_pos*dv - - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point)) - line[i] = join_point - # second_seg = n.vstack((join_point, line[(i+1):])) - - cut_i = i - segments.append(first_seg) - - final_seg = line[cut_i:] - if cut_i > 0: - if join_ends: - first_seg = segments.pop(0) - segments.append(n.vstack((final_seg, first_seg))) - else: - segments.append(final_seg) - else: - segments.append(final_seg) - - return segments - - - -cdef double sign(double v): - if v > 0: - return 1.0 - elif v < 0: - return -1.0 - return 0.0 - diff --git a/pyknotid/simplify/noctree.py b/pyknotid/simplify/noctree.py new file mode 100644 index 0000000..3c2d82b --- /dev/null +++ b/pyknotid/simplify/noctree.py @@ -0,0 +1,284 @@ +""" +Numba implementation of octree functions (replacing coctree.pyx). + +This module requires numba for performance. If numba is not available, +import will fail and pure Python fallbacks will be used. +""" +import numpy as np + +try: + import numba +except ImportError: + raise ImportError("numba is required for noctree. Install with: pip install numba") + + +@numba.jit(nopython=True) +def _diff(dv2, nex, nex2): + """Calculate difference vector.""" + dv2[0] = nex2[0] - nex[0] + dv2[1] = nex2[1] - nex[1] + dv2[2] = nex2[2] - nex[2] + +@numba.jit(nopython=True) +def _divide(arr, val): + """Divide array elements by scalar.""" + arr[0] = arr[0] / val + arr[1] = arr[1] / val + arr[2] = arr[2] / val + +@numba.jit(nopython=True) +def _mag(v): + """Magnitude squared of vector.""" + return v[0]**2 + v[1]**2 + v[2]**2 + +@numba.jit(nopython=True) +def _angle_between(v1, v2): + """ + Returns angle between v1 and v2, assuming they are normalised to 1. + Clips value to handle floating point errors. + """ + value = v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2] + if value > 1.: + value = 1. + elif value < 0.: + value = 0. + return value + +@numba.jit(nopython=True) +def _angle_exceeds_inner(ps, val, include_closure): + """ + Inner function for angle_exceeds (JIT compiled). + + Returns True if the sum of angles along ps exceeds val, else False. + """ + angle = 0. + nex = ps[0].copy() + nex2 = ps[1].copy() + dv2 = np.zeros(3, dtype=np.float64) + _diff(dv2, nex, nex2) + mag_val = np.sqrt(_mag(dv2)) + _divide(dv2, mag_val) + + lenps = len(ps) + + if include_closure: + num_checks = lenps + else: + num_checks = lenps - 2 + + for i in range(num_checks): + cur = nex.copy() + nex = nex2.copy() + nex2 = ps[(i + 2) % lenps].copy() + dv = dv2.copy() + _diff(dv2, nex, nex2) + mag_val = np.sqrt(_mag(dv2)) + _divide(dv2, mag_val) + increment = _angle_between(dv, dv2) + if np.isnan(increment): + return True + angle += increment + if angle > val: + return True + + if np.isnan(angle): + # Should match the assert behavior + return True + return False + +@numba.jit(nopython=True) +def _sign(v): + """Return sign of value.""" + if v > 0: + return 1.0 + elif v < 0: + return -1.0 + return 0.0 + +@numba.jit(nopython=True) +def _line_to_segments_inner(line, cut_x, cut_y, cut_z): + """ + Inner function for line cutting (JIT compiled). + + Returns indices where cuts occur and cut positions. + We'll do the actual array building in Python. + """ + n = len(line) + # Store cut information: (index, cut_type, positions...) + # cut_type: 0=none, 1=x, 2=y, 3=z, 4=xy, 5=xz, 6=yz, 7=xyz + max_cuts = n * 3 # Maximum possible cuts + cut_indices = np.zeros(max_cuts, dtype=np.int64) + cut_types = np.zeros(max_cuts, dtype=np.int64) + cut_positions = np.zeros((max_cuts, 3), dtype=np.float64) + num_cuts = 0 + + for i in range(n - 1): + cur = line[i] + nex = line[i + 1] + + dx = nex[0] - cur[0] + dy = nex[1] - cur[1] + dz = nex[2] - cur[2] + + cross_cut_x = _sign(cur[0] - cut_x) != _sign(nex[0] - cut_x) + cross_cut_y = _sign(cur[1] - cut_y) != _sign(nex[1] - cut_y) + cross_cut_z = _sign(cur[2] - cut_z) != _sign(nex[2] - cut_z) + + if not cross_cut_x and not cross_cut_y and not cross_cut_z: + continue + + cut_indices[num_cuts] = i + + # Determine cut type and calculate positions + if cross_cut_x and cross_cut_y and cross_cut_z: + cut_types[num_cuts] = 7 + x_cut_pos = -1 * (cur[0] - cut_x) / dx + y_cut_pos = -1 * (cur[1] - cut_y) / dy + z_cut_pos = -1 * (cur[2] - cut_z) / dz + cut_positions[num_cuts, 0] = x_cut_pos + cut_positions[num_cuts, 1] = y_cut_pos + cut_positions[num_cuts, 2] = z_cut_pos + elif cross_cut_x and cross_cut_y: + cut_types[num_cuts] = 4 + x_cut_pos = -1 * (cur[0] - cut_x) / dx + y_cut_pos = -1 * (cur[1] - cut_y) / dy + cut_positions[num_cuts, 0] = x_cut_pos + cut_positions[num_cuts, 1] = y_cut_pos + elif cross_cut_x and cross_cut_z: + cut_types[num_cuts] = 5 + x_cut_pos = -1 * (cur[0] - cut_x) / dx + z_cut_pos = -1 * (cur[2] - cut_z) / dz + cut_positions[num_cuts, 0] = x_cut_pos + cut_positions[num_cuts, 1] = z_cut_pos + elif cross_cut_y and cross_cut_z: + cut_types[num_cuts] = 6 + y_cut_pos = -1 * (cur[1] - cut_y) / dy + z_cut_pos = -1 * (cur[2] - cut_z) / dz + cut_positions[num_cuts, 0] = y_cut_pos + cut_positions[num_cuts, 1] = z_cut_pos + elif cross_cut_x: + cut_types[num_cuts] = 1 + cut_positions[num_cuts, 0] = -1 * (cur[0] - cut_x) / dx + elif cross_cut_y: + cut_types[num_cuts] = 2 + cut_positions[num_cuts, 0] = -1 * (cur[1] - cut_y) / dy + elif cross_cut_z: + cut_types[num_cuts] = 3 + cut_positions[num_cuts, 0] = -1 * (cur[2] - cut_z) / dz + + num_cuts += 1 + + return cut_indices[:num_cuts], cut_types[:num_cuts], cut_positions[:num_cuts] + + +def angle_exceeds(ps, val=2*np.pi, include_closure=1): + """ + Returns True if the sum of angles along ps exceeds val, else False. + + If include_closure, includes the angles with the line closing + the end and start points. + + Uses Numba for performance. + """ + ps = np.asarray(ps, dtype=np.float64) + return _angle_exceeds_inner(ps, val, include_closure) + + +def line_to_segments(line, cuts=None, join_ends=True): + """ + Takes a line (set of points), a list of cut planes in + x, y, z, and a parameter to decide whether the line + joining the first and last point should also be cut. + + Returns a list of shorter lines resulting from cutting at + all these cut planes. + + Uses Numba for performance-critical parts. + """ + line = np.asarray(line, dtype=np.float64) + + if cuts is None: + xmin, ymin, zmin = np.min(line, axis=0) - 1 + xmax, ymax, zmax = np.max(line, axis=0) + 1 + cut_x = (xmax + xmin) / 2. + cut_y = (ymax + ymin) / 2. + cut_z = (zmin + zmax) / 2. + else: + cut_x, cut_y, cut_z = cuts + + # Get cut information from numba + cut_indices, cut_types, cut_positions = _line_to_segments_inner( + line, cut_x, cut_y, cut_z + ) + + if len(cut_indices) == 0: + return [line] + + # Build segments from cut information + segments = [] + cut_i = 0 + line_copy = line.copy() + + for idx, (i, cut_type, positions) in enumerate(zip(cut_indices, cut_types, cut_positions)): + cur = line_copy[i] + nex = line_copy[i + 1] + dv = cur - nex + + if cut_type == 7: # xyz + order = np.sort(positions) + join_point_1 = cur + order[0] * dv + join_point_2 = cur + order[1] * dv + join_point_3 = cur + order[2] * dv + first_seg = np.vstack((line_copy[cut_i:(i+1)], join_point_1)) + second_seg = np.vstack((join_point_1, join_point_2)) + third_seg = np.vstack((join_point_2, join_point_3)) + line_copy[i] = join_point_3 + cut_i = i + segments.extend([first_seg, second_seg, third_seg]) + elif cut_type == 4: # xy + order = np.sort(positions[:2]) + join_point_1 = cur + order[0] * dv + join_point_2 = cur + order[1] * dv + first_seg = np.vstack((line_copy[cut_i:(i+1)], join_point_1)) + second_seg = np.vstack((join_point_1, join_point_2)) + line_copy[i] = join_point_2 + cut_i = i + segments.extend([first_seg, second_seg]) + elif cut_type == 5: # xz + order = np.sort(positions[:2]) + join_point_1 = cur + order[0] * dv + join_point_2 = cur + order[1] * dv + first_seg = np.vstack((line_copy[cut_i:(i+1)], join_point_1)) + second_seg = np.vstack((join_point_1, join_point_2)) + line_copy[i] = join_point_2 + cut_i = i + segments.extend([first_seg, second_seg]) + elif cut_type == 6: # yz + order = np.sort(positions[:2]) + join_point_1 = cur + order[0] * dv + join_point_2 = cur + order[1] * dv + first_seg = np.vstack((line_copy[cut_i:(i+1)], join_point_1)) + second_seg = np.vstack((join_point_1, join_point_2)) + line_copy[i] = join_point_2 + cut_i = i + segments.extend([first_seg, second_seg]) + elif cut_type in [1, 2, 3]: # x, y, or z only + cut_pos = positions[0] + join_point = cur + cut_pos * dv + first_seg = np.vstack((line_copy[cut_i:(i+1)], join_point)) + line_copy[i] = join_point + cut_i = i + segments.append(first_seg) + + # Handle final segment + final_seg = line_copy[cut_i:] + if cut_i > 0: + if join_ends: + first_seg = segments.pop(0) + segments.append(np.vstack((final_seg, first_seg))) + else: + segments.append(final_seg) + else: + segments.append(final_seg) + + return segments diff --git a/pyknotid/simplify/octree.py b/pyknotid/simplify/octree.py index 38cc6dd..df7b23e 100644 --- a/pyknotid/simplify/octree.py +++ b/pyknotid/simplify/octree.py @@ -5,10 +5,10 @@ Module for simplifying lines with an octree decomposition. ''' -import numpy as n +import numpy as np try: - from coctree import (angle_exceeds as cangle_exceeds, - line_to_segments as cline_to_segments) + from pyknotid.simplify.noctree import (angle_exceeds as cangle_exceeds, + line_to_segments as cline_to_segments) except ImportError: cangle_exceeds = None cline_to_segments = None @@ -18,7 +18,7 @@ class OctreeError(Exception): pass -class OctreeCell(object): +class OctreeCell: '''Stores line segments, and is capable of splitting and simplifying them. @@ -100,9 +100,9 @@ def from_single_line(cls, line, shape=None, **kwargs): Other kwargs are passed directly to the OctreeCell. ''' if shape is None: - shape = [n.min(line[:, 0]), n.max(line[:, 0]), - n.min(line[:, 1]), n.max(line[:, 1]), - n.min(line[:, 2]), n.max(line[:, 2])] + shape = [np.min(line[:, 0]), np.max(line[:, 0]), + np.min(line[:, 1]), np.max(line[:, 1]), + np.min(line[:, 2]), np.max(line[:, 2])] lineseg = LineSegment(line, identifier=1) lineseg.next = lineseg lineseg.prev = lineseg @@ -119,14 +119,14 @@ def from_lines(cls, lines, shape=None, **kwargs): Other kwargs are passed directly to the OctreeCell. ''' if shape is None: - extremes = [[n.min(line[:, 0]), n.max(line[:, 0]), - n.min(line[:, 1]), n.max(line[:, 1]), - n.min(line[:, 2]), n.max(line[:, 2])] for + extremes = [[np.min(line[:, 0]), np.max(line[:, 0]), + np.min(line[:, 1]), np.max(line[:, 1]), + np.min(line[:, 2]), np.max(line[:, 2])] for line in lines] - extremes = n.array(extremes) - shape = [n.min(extremes[:, 0]), n.max(extremes[:, 1]), - n.min(extremes[:, 2]), n.max(extremes[:, 3]), - n.min(extremes[:, 4]), n.max(extremes[:, 5])] + extremes = np.array(extremes) + shape = [np.min(extremes[:, 0]), np.max(extremes[:, 1]), + np.min(extremes[:, 2]), np.max(extremes[:, 3]), + np.min(extremes[:, 4]), np.max(extremes[:, 5])] segments = [] for identifier, line in enumerate(lines): lineseg = LineSegment(line, identifier=identifier) @@ -173,7 +173,7 @@ def from_cell_lines(cls, lines, shape, **kwargs): # The CellJumpSegments isn't in self.segments, so it # won't be simplified jump_segment = CellJumpSegment( - n.vstack((seg_class.points[-1], nex.points[0]))) + np.vstack((seg_class.points[-1], nex.points[0]))) seg_class.next = jump_segment nex.prev = jump_segment jump_segment.prev = seg_class @@ -213,7 +213,7 @@ def simplify(self, obey_knotting=True): if s1.next == s2.prev: if isinstance(s1.next, Handle): handle = s1.next - points = n.vstack((s1.points, s2.points)) + points = np.vstack((s1.points, s2.points)) new_segment = LineSegment(points, identifier=s1.identifier) handle.prev = s1.prev @@ -226,7 +226,7 @@ def simplify(self, obey_knotting=True): elif s2.next == s1.prev: if isinstance(s2.next, Handle): handle = s2.next - points = n.vstack((s2.points, s1.points)) + points = np.vstack((s2.points, s1.points)) new_segment = LineSegment(points, identifier=s2.identifier) handle.prev = s2.prev @@ -255,7 +255,7 @@ def simplify(self, obey_knotting=True): if not obey_knotting: segment.replace_with_straight_line() return - if not angle_exceeds_func(segment.points, 2.*n.pi, False): + if not angle_exceeds_func(segment.points, 2.*np.pi, False): segment.replace_with_straight_line() return if (self.is_knotted_func is not None and @@ -331,9 +331,9 @@ def get_uniform_random_planes(self): '''Returns x, y and z values each uniformly randomly distributed through self.shape.''' xmin, xmax, ymin, ymax, zmin, zmax = self.shape - return (n.array([xmin, ymin, zmin]) + - (0.1 + 0.8*n.random.random(3)) * - n.array(self.get_cell_size())) + return (np.array([xmin, ymin, zmin]) + + (0.1 + 0.8*np.random.random(3)) * + np.array(self.get_cell_size())) def get_cell_size(self): return (self.shape[1] - self.shape[0], @@ -347,7 +347,7 @@ def boundary_lines(self): later.''' xmin, xmax, ymin, ymax, zmin, zmax = self.shape - return n.array([[xmin, ymin, zmin], + return np.array([[xmin, ymin, zmin], [xmax, ymin, zmin], [xmax, ymax, zmin], [xmin, ymax, zmin], @@ -384,7 +384,7 @@ def get_single_line(self, remove_unnecessary_jumps=True): -class Handle(object): +class Handle: '''Simply stores a next and previous LineSegment, with a method to reconstruct the line by walking along the segments. @@ -428,7 +428,7 @@ def reconstruct_line(self, remove_unnecessary_jumps=True): comp_segs = [seg for i, seg in enumerate(comp_segs) if i not in invalid_indices] - return resample(n.vstack([seg.points[:-1] for + return resample(np.vstack([seg.points[:-1] for seg in comp_segs] + [comp_segs[0].points[:1] + 0.00001])) @@ -445,7 +445,7 @@ def get_line_components(self): -class LineSegment(object): +class LineSegment: '''Stores a section of line, and can split at boundaries.''' def __init__(self, points, next=None, prev=None, identifier=None): @@ -522,7 +522,7 @@ def replace_with_straight_line(self): about any other parts of the full curve.''' if self.next is not self.prev: - self.points = n.vstack((self.points[0], self.points[-1])) + self.points = np.vstack((self.points[0], self.points[-1])) else: # if the line is a loop, just cut out most points self.points = self.points[::int(len(self.points)/3.)] @@ -545,12 +545,12 @@ def line_to_segments(line, cuts=None, join_ends=True): line = line.copy() if cuts is None: - xmin = n.min(line[:,0]) - 1 - xmax = n.max(line[:,0]) + 1 - ymin = n.min(line[:,1]) - 1 - ymax = n.max(line[:,1]) + 1 - zmin = n.min(line[:,2]) - 1 - zmax = n.max(line[:,2]) + 1 + xmin = np.min(line[:,0]) - 1 + xmax = np.max(line[:,0]) + 1 + ymin = np.min(line[:,1]) - 1 + ymax = np.max(line[:,1]) + 1 + zmin = np.min(line[:,2]) - 1 + zmax = np.max(line[:,2]) + 1 cut_x = (xmax + xmin) / 2. cut_y = (ymax + ymin) / 2. @@ -568,22 +568,22 @@ def line_to_segments(line, cuts=None, join_ends=True): dv = nex - cur dx, dy, dz = dv - cross_cut_x = n.sign(cur[0] - cut_x) != n.sign(nex[0] - cut_x) - cross_cut_y = n.sign(cur[1] - cut_y) != n.sign(nex[1] - cut_y) - cross_cut_z = n.sign(cur[2] - cut_z) != n.sign(nex[2] - cut_z) + cross_cut_x = np.sign(cur[0] - cut_x) != np.sign(nex[0] - cut_x) + cross_cut_y = np.sign(cur[1] - cut_y) != np.sign(nex[1] - cut_y) + cross_cut_z = np.sign(cur[2] - cut_z) != np.sign(nex[2] - cut_z) if cross_cut_x and cross_cut_y and cross_cut_z: x_cut_pos = -1 * (cur[0]-cut_x)/dx y_cut_pos = -1 * (cur[1]-cut_y)/dy z_cut_pos = -1 * (cur[2]-cut_z)/dz - order = n.sort((x_cut_pos, y_cut_pos, z_cut_pos)) + order = np.sort((x_cut_pos, y_cut_pos, z_cut_pos)) # assert 0 < x_cut_pos < 1 and 0 < y_cut_pos < 1 and 0 < z_cut_pos < 1 join_point_1 = cur + order[0]*dv join_point_2 = cur + order[1]*dv join_point_3 = cur + order[2]*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point_1)) - second_seg = n.vstack((join_point_1, join_point_2)) - third_seg = n.vstack((join_point_2, join_point_3)) + first_seg = np.vstack((line[cut_i:(i+1)].copy(), join_point_1)) + second_seg = np.vstack((join_point_1, join_point_2)) + third_seg = np.vstack((join_point_2, join_point_3)) line[i] = join_point_3 cut_i = i segments.append(first_seg) @@ -592,11 +592,11 @@ def line_to_segments(line, cuts=None, join_ends=True): elif cross_cut_x and cross_cut_y: x_cut_pos = -1 * (cur[0]-cut_x)/dx y_cut_pos = -1 * (cur[1]-cut_y)/dy - order = n.sort((x_cut_pos, y_cut_pos)) + order = np.sort((x_cut_pos, y_cut_pos)) join_point_1 = cur + order[0]*dv join_point_2 = cur + order[1]*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point_1)) - second_seg = n.vstack((join_point_1, join_point_2)) + first_seg = np.vstack((line[cut_i:(i+1)].copy(), join_point_1)) + second_seg = np.vstack((join_point_1, join_point_2)) line[i] = join_point_2 cut_i = i segments.append(first_seg) @@ -604,11 +604,11 @@ def line_to_segments(line, cuts=None, join_ends=True): elif cross_cut_x and cross_cut_z: x_cut_pos = -1 * (cur[0]-cut_x)/dx z_cut_pos = -1 * (cur[2]-cut_z)/dz - order = n.sort((x_cut_pos, z_cut_pos)) + order = np.sort((x_cut_pos, z_cut_pos)) join_point_1 = cur + order[0]*dv join_point_2 = cur + order[1]*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point_1)) - second_seg = n.vstack((join_point_1, join_point_2)) + first_seg = np.vstack((line[cut_i:(i+1)].copy(), join_point_1)) + second_seg = np.vstack((join_point_1, join_point_2)) line[i] = join_point_2 cut_i = i segments.append(first_seg) @@ -616,11 +616,11 @@ def line_to_segments(line, cuts=None, join_ends=True): elif cross_cut_y and cross_cut_z: y_cut_pos = -1 * (cur[1]-cut_y)/dy z_cut_pos = -1 * (cur[2]-cut_z)/dz - order = n.sort((y_cut_pos, z_cut_pos)) + order = np.sort((y_cut_pos, z_cut_pos)) join_point_1 = cur + order[0]*dv join_point_2 = cur + order[1]*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point_1)) - second_seg = n.vstack((join_point_1, join_point_2)) + first_seg = np.vstack((line[cut_i:(i+1)].copy(), join_point_1)) + second_seg = np.vstack((join_point_1, join_point_2)) line[i] = join_point_2 cut_i = i segments.append(first_seg) @@ -629,7 +629,7 @@ def line_to_segments(line, cuts=None, join_ends=True): cut_pos = -1 * (cur[0]-cut_x)/dx assert 0. <= cut_pos <= 1. join_point = cur + cut_pos*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point)) + first_seg = np.vstack((line[cut_i:(i+1)].copy(), join_point)) line[i] = join_point cut_i = i segments.append(first_seg) @@ -637,7 +637,7 @@ def line_to_segments(line, cuts=None, join_ends=True): cut_pos = -1 * (cur[1]-cut_y)/dy assert 0. <= cut_pos <= 1. join_point = cur + cut_pos*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point)) + first_seg = np.vstack((line[cut_i:(i+1)].copy(), join_point)) line[i] = join_point cut_i = i segments.append(first_seg) @@ -646,9 +646,9 @@ def line_to_segments(line, cuts=None, join_ends=True): assert 0. <= cut_pos <= 1. join_point = cur + cut_pos*dv - first_seg = n.vstack((line[cut_i:(i+1)].copy(), join_point)) + first_seg = np.vstack((line[cut_i:(i+1)].copy(), join_point)) line[i] = join_point - # second_seg = n.vstack((join_point, line[(i+1):])) + # second_seg = np.vstack((join_point, line[(i+1):])) cut_i = i segments.append(first_seg) @@ -657,7 +657,7 @@ def line_to_segments(line, cuts=None, join_ends=True): if cut_i > 0: if join_ends: first_seg = segments.pop(0) - segments.append(n.vstack((final_seg, first_seg))) + segments.append(np.vstack((final_seg, first_seg))) else: segments.append(final_seg) else: @@ -684,7 +684,7 @@ def crude_segs_to_segs(csegs, identifier=None): return segs -def angle_exceeds(ps, val=2*n.pi, include_closure=True): +def angle_exceeds(ps, val=2*np.pi, include_closure=True): '''Returns True if the sum of angles along ps exceeds val, else False. @@ -705,22 +705,22 @@ def angle_exceeds(ps, val=2*n.pi, include_closure=True): dv2 = nex2-nex dv2 /= mag(dv2) increment = angle_between(dv, dv2) - if n.isnan(increment): + if np.isnan(increment): return True angle += increment if angle > val: return True - assert not n.isnan(angle) + assert not np.isnan(angle) return False def angle_between(v1, v2): '''Returns angle between v1 and v2, assuming they are normalised to 1.''' # clip becaus v1.dot(v2) may exceed 1 due to floating point - return n.arccos(n.clip(n.abs(v1.dot(v2)), 0., 1.)) + return np.arccos(np.clip(np.abs(v1.dot(v2)), 0., 1.)) def mag(v): - return n.sqrt(v.dot(v)) + return np.sqrt(v.dot(v)) def find_octants_of_segments(segments, cuts): '''For each LineSegment in the passed segments, returns an @@ -767,7 +767,7 @@ def resample(points): changing = False length = len(points) points = points.copy() - keep = n.ones(len(points), dtype=bool) + keep = np.ones(len(points), dtype=bool) cur = points[0] nex = points[1] dv2 = nex-cur @@ -777,14 +777,14 @@ def resample(points): prev = cur cur = nex nex = points[i+1] - if n.all(n.less(n.abs(cur-nex), 0.0001)): + if np.all(np.less(np.abs(cur-nex), 0.0001)): keep[i] = False else: dv1 = cur - prev dv1 /= mag(dv1) dv2 = nex - cur dv2 /= mag(dv2) - if n.abs(dv1.dot(dv2) - 1) < 0.0001: + if np.abs(dv1.dot(dv2) - 1) < 0.0001: keep[i] = False points = points[keep] new_length = len(points) @@ -796,12 +796,12 @@ def remove_nearby_points(points): '''Takes a set of points, and removes those that are no distance from the previous one.''' points = points.copy() - keep = n.ones(len(points), dtype=n.bool) + keep = np.ones(len(points), dtype=np.bool) comparator = points[0] for i, point in enumerate(points): nex = points[(i+1) % len(points)] - if n.all(n.abs((nex - point)) < 0.00001): + if np.all(np.abs((nex - point)) < 0.00001): keep[i] = False keep[-1] = True @@ -811,14 +811,14 @@ def remove_nearby_points(points): def split_cell_line(line, shape=(10, 10, 10.)): '''Takes a cell lines, and cuts at the periodic boundaries (in this represenation, this is where it jumps)''' - shape = n.array(shape) + shape = np.array(shape) line = line.copy() i = 0 out = [] while i < (len(line)-1): cur = line[i] nex = line[i+1] - if n.any(n.abs(nex-cur) > shape): + if np.any(np.abs(nex-cur) > shape): firsthalf = line[:(i+1)] secondhalf = line[(i+1):] out.append(firsthalf) diff --git a/pyknotid/spacecurves/__init__.py b/pyknotid/spacecurves/__init__.py index 42c22da..5bf8c26 100644 --- a/pyknotid/spacecurves/__init__.py +++ b/pyknotid/spacecurves/__init__.py @@ -64,7 +64,7 @@ :alt: A trefoil knot specified by vertex points :scale: 50% -The :doc:`pyknotid.make module<../make/index>` provides functions for +The :mod:`pyknotid.make` module provides functions for creating many types of example knots, such as torus knots or some specific knot types:: diff --git a/pyknotid/spacecurves/ccomplexity.pyx b/pyknotid/spacecurves/ccomplexity.pyx deleted file mode 100644 index ea8dfed..0000000 --- a/pyknotid/spacecurves/ccomplexity.pyx +++ /dev/null @@ -1,179 +0,0 @@ -from __future__ import print_function -import sys - -import numpy as np -cimport numpy as np - -cimport cython - -from libc.math cimport abs, pow, sqrt as csqrt, floor - -cpdef cython_higher_order_writhe(double [:, :] points, - double [:, :] contributions, - long [:] order): - - cdef long i1, i2, i3, i4 - cdef long [:] indices = np.zeros(4, dtype=int) - - cdef double writhe = 0.0 - - for i1 in range(len(points) - 3): - print('\rcython i1', i1, len(points) - 4, end='') - sys.stdout.flush() - indices[0] = i1 - for i2 in range(i1 + 1, len(points) - 1): - indices[1] = i2 - for i3 in range(i2 + 1, len(points) - 1): - indices[2] = i3 - for i4 in range(i3 + 1, len(points) - 1): - indices[3] = i4 - - writhe += (contributions[indices[order[0]], - indices[order[1]]] * - contributions[indices[order[2]], - indices[order[3]]]) - print() - - return writhe - - -cpdef cython_second_order_writhes(double [:, :] points, - double [:, :] contributions): - - cdef long i1, i2, i3, i4 - cdef long [:] indices = np.zeros(4, dtype=int) - - cdef double writhe_1 = 0.0 - cdef double writhe_2 = 0.0 - cdef double writhe_3 = 0.0 - - for i1 in range(len(points) - 3): - if i1 % 5 == 0: - print('\rcython i1', i1, len(points) - 4, end='') - sys.stdout.flush() - indices[0] = i1 - for i2 in range(i1 + 1, len(points) - 1): - indices[1] = i2 - for i3 in range(i2 + 1, len(points) - 1): - indices[2] = i3 - for i4 in range(i3 + 1, len(points) - 1): - indices[3] = i4 - - writhe_1 += contributions[i1, i2] * contributions[i3, i4] - writhe_2 += contributions[i1, i3] * contributions[i2, i4] - writhe_3 += contributions[i1, i4] * contributions[i2, i3] - print() - - return (writhe_1 / (2*np.pi)**2, - writhe_2 / (2*np.pi)**2, - writhe_3 / (2*np.pi)**2) - -cpdef cython_second_order_writhes_no_basepoint(double [:, :] points, - double [:, :] contributions): - - cdef long i1, i2, i3, i4 - cdef long [:] indices = np.zeros(4, dtype=np.int) - - cdef double writhe_1 = 0.0 - cdef double writhe_2 = 0.0 - cdef double writhe_3 = 0.0 - - for i1 in range(len(points) - 1): - if i1 % 5 == 0: - print('\rnbp cython i1', i1, len(points) - 4, end='') - sys.stdout.flush() - indices[0] = i1 - possible_i2s = list(range(i1 + 1, len(points) - 1)) + list(range(i1 )) - for i2 in possible_i2s: - indices[1] = i2 - if i2 > i1: - possible_i3s = list(range(i2 + 1, len(points) - 1)) + list(range(i1 )) - else: - possible_i3s = list(range(i2 + 1, i1 )) - for i3 in possible_i3s: - indices[2] = i3 - if i3 > i1: - possible_i4s = list(range(i3 + 1, len(points) - 1)) + list(range(i1 )) - else: - possible_i4s = list(range(i3 + 1, i1 )) - for i4 in possible_i4s: - # print('i1, i2, i3, i4 = {}, {}, {}, {}'.format(i1, i2, i3, i4)) - indices[3] = i4 - - writhe_1 += contributions[i1, i2] * contributions[i3, i4] - writhe_2 += contributions[i1, i3] * contributions[i2, i4] - writhe_3 += contributions[i1, i4] * contributions[i2, i3] - print() - - return (writhe_1 / (2*np.pi)**2, - writhe_2 / (2*np.pi)**2, - writhe_3 / (2*np.pi)**2) - - -# cpdef writhing_matrix(double [:, :] points): -# for i1 in range(len(points) - 3): -# print('\ri = {} / {}'.format(i1, len(points) - 4), end='') -# sys.stdout.flush() -# p1 = points[i1] -# for i2 in range(i1 + 2, len(points) - 1): -# p2 = points[i2] - -# p4 = points[i2 + 1] - -# r12 = p2 - p1 -# r13 = p3 - p1 -# r14 = p4 - p1 -# r23 = p3 - p2 -# r24 = p4 - p2 -# r34 = p4 - p3 - -# n1 = np.cross(r13, r14) -# n1 /= np.sqrt(np.sum(n1**2)) - -# n2 = np.cross(r14, r24) -# n2 /= np.sqrt(np.sum(n2**2)) - -# n3 = np.cross(r24, r23) -# if np.any(np.abs(n3) > 0.): -# n3 /= np.sqrt(np.sum(n3**2)) - -# n4 = np.cross(r23, r13) -# if np.any(np.abs(n4) > 0.): -# n4 /= np.sqrt(np.sum(n4**2)) - -# if np.any(np.isnan(n1)): -# print('!!! nan') -# print(i1, i2) -# print(p1, p2, p3, p4) -# print('nan', r23, r13, np.cross(r23, r13)) - -# # When the vectors are nearly the same, floating point -# # errors can sometimes make the output a tiny bit higher -# # than 1 -# t1, t2, t3, t4 = np.clip([n1.dot(n2), -# n2.dot(n3), -# n3.dot(n4), -# n4.dot(n1)], -# -1, 1) - -# writhe_contribution = (np.arcsin(t1) + -# np.arcsin(t2) + -# np.arcsin(t3) + -# np.arcsin(t4)) - -# if np.isnan(writhe_contribution): -# print() -# print('nan!') -# print(i1, i2, n1, n2, n3, n4, writhe_contribution) -# print(n1.dot(n2) > 1, n2.dot(n3) > 1, n3.dot(n4) > 1, n4.dot(n1) > 1) -# print(n1.dot(n2), np.arcsin(n1.dot(n2))) -# print(n2.dot(n3), np.arcsin(n2.dot(n3))) -# print(n3.dot(n4), np.arcsin(n3.dot(n4))) -# print(n4.dot(n1), np.arcsin(n4.dot(n1))) - -# writhe_contribution *= np.sign(np.cross(r34, r12).dot(r13)) - -# contributions[i1, i2] = writhe_contribution -# contributions[i2, i1] = writhe_contribution - -# return contributions diff --git a/pyknotid/spacecurves/chelpers.pyx b/pyknotid/spacecurves/chelpers.pyx deleted file mode 100644 index 5832057..0000000 --- a/pyknotid/spacecurves/chelpers.pyx +++ /dev/null @@ -1,165 +0,0 @@ -''' -Cython functions for space curve analysis. -''' - -import numpy as n -cimport numpy as n - -cimport cython - -from libc.math cimport abs, pow, sqrt as csqrt, floor - -cpdef find_crossings(double [:] v, double [:] dv, - double [:, :] points, - double [:] segment_lengths, - long current_index, - long comparison_index, - double max_segment_length, - long jump_mode=1 - ): - ''' - Searches for crossings between the given vector and any other - vector in the - list of points, returning all of them as a list. - - Parameters - ---------- - v0 : ndarray - The current point, a 1D vector. - dv : ndarray - The vector connecting the current point to the next one - points : ndarray - The array or (x, y) values of all the other points - segment_lengths : ndarray - The length of each segment joining a point to the - next one. - current_index : long - The index of the point currently being tested. - comparison_index : long - The index of the first comparison point - jump_mode : int - 1 to check every jump distance, 2 to jump based on - the maximum one, 3 to never jump and check the length - of every step. - ''' - - cdef list crossings = [] - cdef double vx = v[0] - cdef double vy = v[1] - cdef double vz = v[2] - cdef double dvx = dv[0] - cdef double dvy = dv[1] - cdef double dvz = dv[2] - - cdef double twice_max_segment_length = 2*max_segment_length - - cdef int i = 0 - cdef double distance, distance_travelled - cdef double [:] point - cdef double [:] next_point - cdef double jump_x, jump_y, jump_z - cdef double pz - cdef double dpz - cdef long intersect - cdef double intersect_i, intersect_j - cdef double crossing_sign - cdef double crossing_direction - cdef long jumps - cdef long num_jumps - cdef int already_jumped = 0 - - while i < len(points) - 1: - point = points[i] - distance = csqrt(pow(vx - point[0], 2) + pow(vy - point[1], 2)) - if distance < twice_max_segment_length or already_jumped: - already_jumped = 0 - next_point = points[i+1] - jump_x = next_point[0] - point[0] - jump_y = next_point[1] - point[1] - jump_z = next_point[2] - point[2] - - - intersect, intersect_i, intersect_j = do_vectors_intersect( - vx, vy, dvx, dvy, point[0], point[1], - jump_x, jump_y) - - if intersect: - pz = point[2] - dpz = jump_z - - crossing_sign = sign((vz + intersect_i * dvz) - - (pz + intersect_j * dpz)) - - crossing_direction = sign(cross_product( - dvx, dvy, jump_x, jump_y)) - - crossings.append([current_index + intersect_i, - (comparison_index + intersect_j + - i), - crossing_sign, - crossing_sign * crossing_direction]) - crossings.append([(comparison_index + intersect_j + - i), - current_index + intersect_i, - -1. * crossing_sign, - crossing_sign * crossing_direction]) - i += 1 - - elif jump_mode == 3: - i += 1 # naive mode - check everything - already_jumped = 1 - elif jump_mode == 2: - num_jumps = (floor(distance / max_segment_length)) - 1 - if num_jumps < 1: - num_jumps = 1 - i += num_jumps - already_jumped = 1 - else: # Catch all other jump modes - distance_travelled = 0. - jumps = 0 - while (distance_travelled < (distance - max_segment_length) and - i < len(points)): - jumps += 1 - distance_travelled += segment_lengths[i] - i += 1 - if jumps > 1: - i -= 2 - already_jumped = 1 - # This keeps jumping until we might be close enough to intersect, - # without doing vector arithmetic at every step - - return crossings - - - - -cdef tuple do_vectors_intersect(double px, double py, double dpx, double dpy, - double qx, double qy, double dqx, double dqy): - """Takes four vectors p, dp and q, dq, then tests whether they cross in - the dp/dq region. Returns this boolean, and the (fractional) point where - the crossing actually occurs. - """ - cdef double t, u - - if abs(cross_product(dpx, dpy, dqx, dqy)) < 0.000001: - return (0, 0., 0.) - - t = cross_product(qx - px, qy - py, dqx, dqy) / cross_product(dpx, dpy, - dqx, dqy) - if t < 1.0 and t > 0.0: - u = cross_product(qx - px, qy - py, dpx, dpy) / cross_product(dpx, dpy, - dqx, dqy) - if u < 1.0 and u > 0.0: - return (1, t, u) - return (0, -1., -1.) - -cpdef double cross_product(double px, double py, double qx, double qy): - '''Simple cython cross product for 2D vectors.''' - return px * qy - py * qx - -cpdef double sign(double a): - return (1. if a > 0. else (-1. if a < 0. else 0.)) - -cpdef double mag_difference(double [:] a, double [:] b): - '''The magnitude of the vector joining a and b''' - return csqrt((b[0] - a[0])**2 + (b[1] - a[1])**2) diff --git a/pyknotid/spacecurves/complexity.py b/pyknotid/spacecurves/complexity.py index cb493d7..9e452e1 100644 --- a/pyknotid/spacecurves/complexity.py +++ b/pyknotid/spacecurves/complexity.py @@ -6,14 +6,11 @@ curves. ''' -from __future__ import print_function - from . import Knot from . import SpaceCurve from pyknotid.spacecurves.rotation import (get_rotation_angles, rotate_to_top) from pyknotid.utils import vprint -import numpy as n import numpy as np import sys @@ -56,9 +53,9 @@ def writhe_and_crossing_number(points, number_of_samples=10, k._apply_matrix(rotate_to_top(theta, phi)) crossings = k.raw_crossings(include_closure=include_closure, **kwargs) crossing_numbers.append(len(crossings) / 2) - writhes.append(n.sum(crossings[:, 3]) / 2. if len(crossings) else 0) + writhes.append(np.sum(crossings[:, 3]) / 2. if len(crossings) else 0) - return n.average(crossing_numbers), n.average(writhes) + return np.average(crossing_numbers), np.average(writhes) # def writhe_integral(points, closed=False): @@ -242,11 +239,11 @@ def higher_order_writhe_integral(points, order=(1, 3, 2, 4), try_cython=True): if try_cython: try: - from pyknotid.spacecurves.ccomplexity import cython_higher_order_writhe + from pyknotid.spacecurves.ncomplexity import higher_order_writhe order = np.array(order) - how_func = cython_higher_order_writhe + how_func = higher_order_writhe except ImportError: - print('Failed to import ccomplexity, using pure python instead') + print('Failed to import ncomplexity (numba), using pure python instead') writhe = how_func(points, contributions, order) @@ -440,11 +437,11 @@ def second_order_writhes(points, try_cython=True, basepoint=True): if basepoint: - from pyknotid.spacecurves.ccomplexity import cython_second_order_writhes + from pyknotid.spacecurves.ncomplexity import second_order_writhes else: - from pyknotid.spacecurves.ccomplexity import cython_second_order_writhes_no_basepoint as cython_second_order_writhes + from pyknotid.spacecurves.ncomplexity import second_order_writhes_no_basepoint as second_order_writhes - return cython_second_order_writhes(points, contributions) + return second_order_writhes(points, contributions) def second_order_twist(points, z): diff --git a/pyknotid/spacecurves/geometry.py b/pyknotid/spacecurves/geometry.py index d9059e8..eccddb4 100644 --- a/pyknotid/spacecurves/geometry.py +++ b/pyknotid/spacecurves/geometry.py @@ -5,7 +5,7 @@ Functions for evaluating geometrical properties of space curves. ''' -import numpy as n +import numpy as np def arclength(points, include_closure=True): ''' @@ -24,9 +24,9 @@ def arclength(points, include_closure=True): if len(points) < 2: return 0. - lengths = n.roll(points, -1, axis=0) - points - length_mags = n.sqrt(n.sum(lengths*lengths, axis=1)) - arclength = n.sum(length_mags[:-1]) + lengths = np.roll(points, -1, axis=0) - points + length_mags = np.sqrt(np.sum(lengths*lengths, axis=1)) + arclength = np.sum(length_mags[:-1]) if include_closure: arclength += length_mags[-1] return arclength @@ -42,11 +42,11 @@ def radius_of_gyration(points): points : array-like Nx3 array of points in the line. ''' - av_pos = n.average(points, axis=0) + av_pos = np.average(points, axis=0) diffs = (points - av_pos)**2 - rogs = n.sum(diffs, 1) - rog = n.average(rogs) - return n.sqrt(rog) + rogs = np.sum(diffs, 1) + rog = np.average(rogs) + return np.sqrt(rog) # def persistences(points, step=None): @@ -56,4 +56,4 @@ def radius_of_gyration(points): # if step is None: # import pyknot. -# step = n.average +# step = np.average diff --git a/pyknotid/spacecurves/helpers.py b/pyknotid/spacecurves/helpers.py index 5256496..e74446a 100644 --- a/pyknotid/spacecurves/helpers.py +++ b/pyknotid/spacecurves/helpers.py @@ -2,10 +2,10 @@ Python version of cython functions for space curve analysis. ''' -import numpy as n +import numpy as np -csqrt = n.sqrt -floor = n.floor +csqrt = np.sqrt +floor = np.floor def find_crossings(v, dv, diff --git a/pyknotid/spacecurves/knot.py b/pyknotid/spacecurves/knot.py index 7c7603f..3655d13 100644 --- a/pyknotid/spacecurves/knot.py +++ b/pyknotid/spacecurves/knot.py @@ -10,9 +10,6 @@ ''' -from __future__ import division - -import numpy as n import numpy as np from pyknotid.spacecurves.spacecurve import SpaceCurve @@ -106,11 +103,11 @@ def alexander_at_root(self, root, round=True, **kwargs): ''' if hasattr(root, '__contains__'): return [self.alexander_at_root(r, round=round, **kwargs) for r in root] - variable = n.exp(2 * n.pi * 1.j / root) + variable = np.exp(2 * np.pi * 1.j / root) value = self.alexander_polynomial(variable, **kwargs) - value = n.abs(value) + value = np.abs(value) if round and root in (1, 2, 3, 4): - value = int(n.round(value)) + value = int(np.round(value)) return value def determinant(self): @@ -180,9 +177,9 @@ def hyperbolic_volume(self): digits, e.g. 0.00021 with 1 digit precision = 2E-4. solution_type : str The solution type of the manifold. Normally one of: + - 'contains degenerate tetrahedra' => may not be a valid result - - 'all tetrahedra positively oriented' => - really probably hyperbolic + - 'all tetrahedra positively oriented' => really probably hyperbolic ''' from ..invariants import hyperbolic_volume m = self.exterior_manifold() @@ -307,16 +304,16 @@ def slipknot_alexander(self, num_samples=0, **kwargs): means to sample at all indices. **kwargs : Keyword arguments, passed directly to - :meth:`pyknotid.spacecurves.openknot.OpenKnot.alexander_fractions. + :meth:`pyknotid.spacecurves.openknot.OpenKnot.alexander_fractions`. ''' points = self.points if num_samples == 0: num_samples = len(points) - indices = n.linspace(0, len(points), num_samples).astype(n.int) + indices = np.linspace(0, len(points), num_samples).astype(np.int_) from pyknotid.spacecurves.openknot import OpenKnot - arr = n.ones((num_samples, num_samples)) + arr = np.ones((num_samples, num_samples)) for index, points_index in enumerate(indices): self._vprint('\rindex = {} / {}'.format(index, len(indices)), False) @@ -334,8 +331,8 @@ def slipknot_alexander(self, num_samples=0, **kwargs): fig, ax = plt.subplots() ax.imshow(arr, interpolation='none') - ax.plot(n.linspace(0, num_samples, 100) - 0.5, - n.linspace(num_samples, 0, 100) - 0.5, + ax.plot(np.linspace(0, num_samples, 100) - 0.5, + np.linspace(num_samples, 0, 100) - 0.5, color='black', linewidth=3) @@ -366,7 +363,7 @@ def isolate_knot(self): return start, end roll_dist = int(0.25*len(self.points)) - k2 = OpenKnot(n.roll(self.points, roll_dist, axis=0), verbose=False) + k2 = OpenKnot(np.roll(self.points, roll_dist, axis=0), verbose=False) start, end = _isolate_open_knot(k2, determinant, 0, len(k2))[1:] print('se', start, end) start -= roll_dist @@ -391,7 +388,7 @@ def plot_isolated(self, **kwargs): start, end = self.isolate_knot() if end < start: end, start = start, end - mus = n.zeros(len(self.points)) + mus = np.zeros(len(self.points)) mus[start:end+1] = 0.4 if end - start > 0.6*len(self) or end == start: mus = 0.4 - mus @@ -434,11 +431,11 @@ def slip_triangle(self, func): new_cs = new_cs[new_cs[:, 0] != new_cs[-1, 0]] new_r._remove_crossing(new_r._gauss_code[0][-1, 0]) - new_points = points[int(n.ceil(new_start)):int(new_end)] + new_points = points[int(np.ceil(new_start)):int(new_end)] new_start_remainder = new_start % 1 new_end_remainder = new_end % 1 - new_points = n.vstack((points[int(new_start)] + new_start_remainder * (points[int(new_start) + 1] - points[int(new_start)]), + new_points = np.vstack((points[int(new_start)] + new_start_remainder * (points[int(new_start) + 1] - points[int(new_start)]), new_points, points[int(new_end)] + new_end_remainder * (points[int(new_end) + 1] - points[int(new_end)]))) # results[(i, j)] = points[int(new_start):int(new_end)] @@ -449,8 +446,8 @@ def slip_triangle(self, func): from matplotlib.gridspec import GridSpec points = self.points - mins = n.min(self.points, axis=0) - maxs = n.max(self.points, axis=0) + mins = np.min(self.points, axis=0) + maxs = np.max(self.points, axis=0) span = max((maxs - mins)[:2]) size = span * 1.16 @@ -726,7 +723,7 @@ def plot_secant_crossings(self, radius=None, **kwargs): def mag(v): - return n.sqrt(v.dot(v)) + return np.sqrt(v.dot(v)) def _isolate_open_knot(k, det, start, end): diff --git a/pyknotid/spacecurves/link.py b/pyknotid/spacecurves/link.py index 563b749..b217a4f 100644 --- a/pyknotid/spacecurves/link.py +++ b/pyknotid/spacecurves/link.py @@ -16,26 +16,25 @@ ~~~~~~~~~~~~~~~~~ ''' -import numpy as n +import numpy as np try: - from pyknotid.spacecurves import chelpers -except: - from pyknotid.spacecurves import helpers as chelpers -from pyknotid.spacecurves import helpers + from pyknotid.spacecurves import nhelpers +except ImportError: + from pyknotid.spacecurves import helpers as nhelpers from pyknotid.spacecurves.knot import Knot from pyknotid.visualise import plot_projection from pyknotid.utils import (vprint, get_rotation_matrix, ensure_shape_tuple) -class Link(object): +class Link: ''' Class for holding the vertices of multiple lines, providing helper methods for convenient manipulation and analysis. The data is stored - internally as multiple :class:`Knot`s. + internally as multiple :class:`Knot` objects. Parameters ---------- @@ -94,7 +93,7 @@ def from_periodic_lines(cls, lines, shape, perturb=True): for line in lines] link = cls(lines) if perturb: - link.translate(n.array([0.00123, 0.00231, 0.00321])) + link.translate(np.array([0.00123, 0.00231, 0.00321])) link.rotate() return link @@ -154,17 +153,14 @@ def raw_crossings(self, mode='use_max_jump', only_with_other_lines=True, self._crossings[0] == only_with_other_lines): return self._crossings[1] - if try_cython: - helpers_module = chelpers - else: - helpers_module = helpers + helpers_module = nhelpers lines = self.lines # Get length of each line line_lengths = [0.] line_lengths.extend([line.arclength() for line in lines]) - cumulative_lengths = n.cumsum(line_lengths)[:-1] + cumulative_lengths = np.cumsum(line_lengths)[:-1] if only_with_other_lines: crossings = [[] for _ in lines] @@ -183,16 +179,16 @@ def raw_crossings(self, mode='use_max_jump', only_with_other_lines=True, 'naive': 3}[mode] segment_lengths = [ - n.roll(line.points[:, :2], -1, axis=0) - line.points[:, :2] for + np.roll(line.points[:, :2], -1, axis=0) - line.points[:, :2] for line in lines] segment_lengths = [ - n.sqrt(n.sum(lengths * lengths, axis=1)) for + np.sqrt(np.sum(lengths * lengths, axis=1)) for lengths in segment_lengths] if include_closures: - max_segment_length = n.max(n.hstack(segment_lengths)) + max_segment_length = np.max(np.hstack(segment_lengths)) else: - max_segment_length = n.max(n.hstack([ + max_segment_length = np.max(np.hstack([ lengths[:-1] for lengths in segment_lengths])) for line_index, line in enumerate(lines): @@ -206,7 +202,7 @@ def raw_crossings(self, mode='use_max_jump', only_with_other_lines=True, points = line.points comparison_points = other_line.points if include_closures: - comparison_points = n.vstack((comparison_points, + comparison_points = np.vstack((comparison_points, comparison_points[:1])) other_seg_lengths = segment_lengths[other_index] @@ -236,10 +232,10 @@ def raw_crossings(self, mode='use_max_jump', only_with_other_lines=True, if not len(new_crossings): continue - first_crossings = n.array(new_crossings[::2]) + first_crossings = np.array(new_crossings[::2]) first_crossings[:, 0] += cumulative_lengths[line_index] first_crossings[:, 1] += cumulative_lengths[other_index] - sec_crossings = n.array(new_crossings[1::2]) + sec_crossings = np.array(new_crossings[1::2]) sec_crossings[:, 0] += cumulative_lengths[other_index] sec_crossings[:, 1] += cumulative_lengths[line_index] @@ -249,7 +245,7 @@ def raw_crossings(self, mode='use_max_jump', only_with_other_lines=True, self._vprint('\n{} crossings found\n'.format( [len(cs) / 2 for cs in crossings])) [cs.sort(key=lambda s: s[0]) for cs in crossings] - crossings = [n.array(cs) for cs in crossings] + crossings = [np.array(cs) for cs in crossings] self._crossings = (only_with_other_lines, crossings) return crossings @@ -287,7 +283,7 @@ def rotate(self, angles=None): are used. Defaults to None. ''' if angles is None: - angles = n.random.random(3) + angles = np.random.random(3) for line in self.lines: line.rotate(angles) self._reset_cache() @@ -321,7 +317,7 @@ def plot_projection(self, with_crossings=True, mark_start=False, include_self_crossings=False): all_points = [line.points for line in self.lines] lengths = [k.arclength() for k in self.lines] - cum_lengths = n.hstack([[0], n.cumsum(lengths)]) + cum_lengths = np.hstack([[0], np.cumsum(lengths)]) crossings = None plot_crossings = [] @@ -336,7 +332,7 @@ def plot_projection(self, with_crossings=True, mark_start=False, x, y, over, orientation = crossing # Work out which line the crossing is on - # next_x_start = n.argmax(cum_lengths > x) + # next_x_start = np.argmax(cum_lengths > x) # x_line = next_x_start - 1 # x -= cum_lengths[x_line] x -= remove_length @@ -346,11 +342,11 @@ def plot_projection(self, with_crossings=True, mark_start=False, dr = points[(xint+1) % len(points)] - r plot_crossings.append(r + (x-xint) * dr) fig, ax = plot_projection(all_points[0], - crossings=n.array(plot_crossings), + crossings=np.array(plot_crossings), mark_start=mark_start) for line in all_points[1:]: plot_projection(line, - crossings=n.array(plot_crossings), + crossings=np.array(plot_crossings), mark_start=mark_start, fig_ax=(fig, ax)) @@ -361,7 +357,7 @@ def octree_simplify(self, runs=1, plot=False, rotate=True, obey_knotting=False, **kwargs): ''' Simplifies the curves via the octree reduction of - :module:`pyknotid.simplify.octree`. + :mod:`pyknotid.simplify.octree`. Parameters ---------- @@ -377,20 +373,20 @@ def octree_simplify(self, runs=1, plot=False, rotate=True, Whether to not let the line pass through itself. Defaults to False - knotting of individual components will be ignored! This is *much* faster than the alternative. - - kwargs are passed to the :class:`pyknotid.simplify.octree.OctreeCell` - constructor. + **kwargs + Additional keyword arguments are passed to the + :class:`pyknotid.simplify.octree.OctreeCell` constructor. ''' from ..simplify.octree import OctreeCell, remove_nearby_points for line in self.lines: line.points = remove_nearby_points(line.points) for i in range(runs): - if n.sum([len(knot.points) for knot in self.lines]) > 30: + if np.sum([len(knot.points) for knot in self.lines]) > 30: vprint('\rRun {} of {}, {} points remain'.format( i, runs, len(self)), False, self.verbose) if rotate: - rot_mat = get_rotation_matrix(n.random.random(3)) + rot_mat = get_rotation_matrix(np.random.random(3)) for line in self.lines: line._apply_matrix(rot_mat) @@ -412,7 +408,7 @@ def octree_simplify(self, runs=1, plot=False, rotate=True, vprint('\nReduced to {} points'.format(len(self))) def __len__(self): - return n.sum(map(len, self.lines)) + return np.sum(map(len, self.lines)) def arclength(self, include_closures=True): ''' @@ -424,7 +420,7 @@ def arclength(self, include_closures=True): Whether to include the distance between the final and first points of each line. Defaults to True. ''' - return n.sum(k.arclength(include_closures) for k in self.lines) + return np.sum(k.arclength(include_closures) for k in self.lines) def _vprint(self, s, newline=True): '''Prints s, with optional newline. Intended for internal use @@ -466,8 +462,8 @@ def linking_number(self, **kwargs): number = 0 for line in crossings: if len(line): - number += n.sum(line[:, 3]) - return int(n.abs(number / 2)) + number += np.sum(line[:, 3]) + return int(np.abs(number / 2)) def smooth(self, *args, **kwargs): ''' diff --git a/pyknotid/spacecurves/ncomplexity.py b/pyknotid/spacecurves/ncomplexity.py new file mode 100644 index 0000000..9bcee15 --- /dev/null +++ b/pyknotid/spacecurves/ncomplexity.py @@ -0,0 +1,165 @@ +""" +Numba implementation of complexity functions (replacing ccomplexity.pyx). + +This module requires numba for performance. If numba is not available, +import will fail and pure Python fallbacks will be used. +""" +import sys +import numpy as np + +try: + import numba +except ImportError: + raise ImportError("numba is required for ncomplexity. Install with: pip install numba") + + +@numba.jit(nopython=True) +def _higher_order_writhe_inner(points, contributions, order): + """Inner loop for higher_order_writhe (JIT compiled).""" + indices = np.zeros(4, dtype=np.int64) + writhe = 0.0 + n = len(points) + + for i1 in range(n - 3): + indices[0] = i1 + for i2 in range(i1 + 1, n - 1): + indices[1] = i2 + for i3 in range(i2 + 1, n - 1): + indices[2] = i3 + for i4 in range(i3 + 1, n - 1): + indices[3] = i4 + writhe += (contributions[indices[order[0]], indices[order[1]]] * + contributions[indices[order[2]], indices[order[3]]]) + return writhe + +@numba.jit(nopython=True) +def _second_order_writhes_inner(points, contributions): + """Inner loop for second_order_writhes (JIT compiled).""" + writhe_1 = 0.0 + writhe_2 = 0.0 + writhe_3 = 0.0 + n = len(points) + + for i1 in range(n - 3): + for i2 in range(i1 + 1, n - 1): + for i3 in range(i2 + 1, n - 1): + for i4 in range(i3 + 1, n - 1): + writhe_1 += contributions[i1, i2] * contributions[i3, i4] + writhe_2 += contributions[i1, i3] * contributions[i2, i4] + writhe_3 += contributions[i1, i4] * contributions[i2, i3] + + pi2_squared = (2 * np.pi) ** 2 + return (writhe_1 / pi2_squared, + writhe_2 / pi2_squared, + writhe_3 / pi2_squared) + +@numba.jit(nopython=True) +def _second_order_writhes_no_basepoint_inner(points, contributions): + """Inner loop for second_order_writhes_no_basepoint (JIT compiled).""" + writhe_1 = 0.0 + writhe_2 = 0.0 + writhe_3 = 0.0 + n = len(points) + + for i1 in range(n - 1): + # Build possible_i2s: list(range(i1 + 1, n - 1)) + list(range(i1)) + for i2_pass in range(2): + if i2_pass == 0: + i2_start, i2_end = i1 + 1, n - 1 + else: + i2_start, i2_end = 0, i1 + + for i2 in range(i2_start, i2_end): + # Build possible_i3s based on i2 + if i2 > i1: + # list(range(i2 + 1, n - 1)) + list(range(i1)) + for i3_pass in range(2): + if i3_pass == 0: + i3_start, i3_end = i2 + 1, n - 1 + else: + i3_start, i3_end = 0, i1 + + for i3 in range(i3_start, i3_end): + # Build possible_i4s based on i3 + if i3 > i1: + # list(range(i3 + 1, n - 1)) + list(range(i1)) + for i4_pass in range(2): + if i4_pass == 0: + i4_start, i4_end = i3 + 1, n - 1 + else: + i4_start, i4_end = 0, i1 + + for i4 in range(i4_start, i4_end): + writhe_1 += contributions[i1, i2] * contributions[i3, i4] + writhe_2 += contributions[i1, i3] * contributions[i2, i4] + writhe_3 += contributions[i1, i4] * contributions[i2, i3] + else: + # list(range(i3 + 1, i1)) + for i4 in range(i3 + 1, i1): + writhe_1 += contributions[i1, i2] * contributions[i3, i4] + writhe_2 += contributions[i1, i3] * contributions[i2, i4] + writhe_3 += contributions[i1, i4] * contributions[i2, i3] + else: + # list(range(i2 + 1, i1)) + for i3 in range(i2 + 1, i1): + # i3 is in range(i2 + 1, i1), so i3 < i1 + # possible_i4s = list(range(i3 + 1, i1)) + for i4 in range(i3 + 1, i1): + writhe_1 += contributions[i1, i2] * contributions[i3, i4] + writhe_2 += contributions[i1, i3] * contributions[i2, i4] + writhe_3 += contributions[i1, i4] * contributions[i2, i3] + + pi2_squared = (2 * np.pi) ** 2 + return (writhe_1 / pi2_squared, + writhe_2 / pi2_squared, + writhe_3 / pi2_squared) + + +def higher_order_writhe(points, contributions, order): + """ + Calculate higher order writhe. + + Uses Numba for performance. + """ + # Print progress (can't do this inside numba nopython mode) + for i1 in range(len(points) - 3): + print('\rcython i1', i1, len(points) - 4, end='') + sys.stdout.flush() + + result = _higher_order_writhe_inner(points, contributions, order) + print() + return result + + +def second_order_writhes(points, contributions): + """ + Calculate second order writhes. + + Uses Numba for performance. + """ + # Print progress + for i1 in range(len(points) - 3): + if i1 % 5 == 0: + print('\rcython i1', i1, len(points) - 4, end='') + sys.stdout.flush() + + result = _second_order_writhes_inner(points, contributions) + print() + return result + + +def second_order_writhes_no_basepoint(points, contributions): + """ + Calculate second order writhes without basepoint. + + Uses Numba for performance. + """ + # Print progress + for i1 in range(len(points) - 1): + if i1 % 5 == 0: + print('\rnbp cython i1', i1, len(points) - 4, end='') + sys.stdout.flush() + + result = _second_order_writhes_no_basepoint_inner(points, contributions) + print() + return result diff --git a/pyknotid/spacecurves/nhelpers.py b/pyknotid/spacecurves/nhelpers.py new file mode 100644 index 0000000..cf7d895 --- /dev/null +++ b/pyknotid/spacecurves/nhelpers.py @@ -0,0 +1,187 @@ +""" +Numba implementation of space curve helper functions (replacing chelpers.pyx). + +This module requires numba for performance. If numba is not available, +import will fail and the pure Python 'helpers' module will be used instead. +""" +import numpy as np + +try: + import numba + from numba.typed import List +except ImportError: + raise ImportError("numba is required for nhelpers. Install with: pip install numba") + + +@numba.jit(nopython=True) +def cross_product(px, py, qx, qy): + """Simple 2D cross product.""" + return px * qy - py * qx + +@numba.jit(nopython=True) +def sign(a): + """Return sign of a number.""" + if a > 0.: + return 1. + elif a < 0.: + return -1. + return 0. + +@numba.jit(nopython=True) +def mag_difference(a, b): + """The magnitude of the vector joining a and b.""" + return np.sqrt((b[0] - a[0])**2 + (b[1] - a[1])**2) + +@numba.jit(nopython=True) +def _do_vectors_intersect(px, py, dpx, dpy, qx, qy, dqx, dqy): + """ + Takes four vectors p, dp and q, dq, then tests whether they cross in + the dp/dq region. Returns this boolean, and the (fractional) point where + the crossing actually occurs. + """ + cross_prod = cross_product(dpx, dpy, dqx, dqy) + if abs(cross_prod) < 0.000001: + return (0, 0., 0.) + + t = cross_product(qx - px, qy - py, dqx, dqy) / cross_prod + if t < 1.0 and t > 0.0: + u = cross_product(qx - px, qy - py, dpx, dpy) / cross_prod + if u < 1.0 and u > 0.0: + return (1, t, u) + return (0, -1., -1.) + +@numba.jit(nopython=True) +def _find_crossings_inner(v, dv, points, segment_lengths, + current_index, comparison_index, + max_segment_length, jump_mode): + """ + Inner function for finding crossings (JIT compiled). + + Returns crossings as a 2D array instead of list of lists. + """ + vx = v[0] + vy = v[1] + vz = v[2] + dvx = dv[0] + dvy = dv[1] + dvz = dv[2] + + twice_max_segment_length = 2 * max_segment_length + + # Use a pre-allocated array to store crossings (max possible) + # Each crossing creates 2 entries, max is len(points) crossings + max_crossings = len(points) * 2 + crossings_data = np.zeros((max_crossings, 4), dtype=np.float64) + crossing_count = 0 + + i = 0 + already_jumped = 0 + + while i < len(points) - 1: + point = points[i] + distance = np.sqrt((vx - point[0])**2 + (vy - point[1])**2) + + if distance < twice_max_segment_length or already_jumped: + already_jumped = 0 + next_point = points[i + 1] + jump_x = next_point[0] - point[0] + jump_y = next_point[1] - point[1] + jump_z = next_point[2] - point[2] + + intersect, intersect_i, intersect_j = _do_vectors_intersect( + vx, vy, dvx, dvy, point[0], point[1], + jump_x, jump_y) + + if intersect: + pz = point[2] + dpz = jump_z + + crossing_sign = sign((vz + intersect_i * dvz) - + (pz + intersect_j * dpz)) + + crossing_direction = sign(cross_product( + dvx, dvy, jump_x, jump_y)) + + # Add first crossing + crossings_data[crossing_count, 0] = current_index + intersect_i + crossings_data[crossing_count, 1] = comparison_index + intersect_j + i + crossings_data[crossing_count, 2] = crossing_sign + crossings_data[crossing_count, 3] = crossing_sign * crossing_direction + crossing_count += 1 + + # Add second crossing (reverse) + crossings_data[crossing_count, 0] = comparison_index + intersect_j + i + crossings_data[crossing_count, 1] = current_index + intersect_i + crossings_data[crossing_count, 2] = -1. * crossing_sign + crossings_data[crossing_count, 3] = crossing_sign * crossing_direction + crossing_count += 1 + + i += 1 + + elif jump_mode == 3: + i += 1 # naive mode - check everything + already_jumped = 1 + elif jump_mode == 2: + num_jumps = int(np.floor(distance / max_segment_length)) - 1 + if num_jumps < 1: + num_jumps = 1 + i += num_jumps + already_jumped = 1 + else: # Catch all other jump modes + distance_travelled = 0. + jumps = 0 + while (distance_travelled < (distance - max_segment_length) and + i < len(points)): + jumps += 1 + distance_travelled += segment_lengths[i] + i += 1 + if jumps > 1: + i -= 2 + already_jumped = 1 + + # Return only the filled part of the array + return crossings_data[:crossing_count] + + +def find_crossings(v, dv, points, segment_lengths, current_index, + comparison_index, max_segment_length, jump_mode=1): + """ + Searches for crossings between the given vector and any other + vector in the list of points, returning all of them as a list. + + Now uses Numba for performance instead of Cython. + + Parameters + ---------- + v0 : ndarray + The current point, a 1D vector. + dv : ndarray + The vector connecting the current point to the next one + points : ndarray + The array or (x, y) values of all the other points + segment_lengths : ndarray + The length of each segment joining a point to the + next one. + current_index : long + The index of the point currently being tested. + comparison_index : long + The index of the first comparison point + jump_mode : int + 1 to check every jump distance, 2 to jump based on + the maximum one, 3 to never jump and check the length + of every step. + + Returns + ------- + list + List of crossing data, each entry is [index1, index2, sign, direction] + """ + # Get crossings as array + crossings_array = _find_crossings_inner( + v, dv, points, segment_lengths, + current_index, comparison_index, + max_segment_length, jump_mode + ) + + # Convert to list of lists for backward compatibility + return [list(row) for row in crossings_array] diff --git a/pyknotid/spacecurves/openknot.py b/pyknotid/spacecurves/openknot.py index bfadb6c..25e2e74 100644 --- a/pyknotid/spacecurves/openknot.py +++ b/pyknotid/spacecurves/openknot.py @@ -11,8 +11,7 @@ ''' -from __future__ import print_function -import numpy as n +import numpy as np import sympy as sym from pyknotid.spacecurves.spacecurve import SpaceCurve @@ -55,7 +54,7 @@ def closing_distance(self): ''' Returns the distance between the first and last points. ''' - return n.linalg.norm(self.points[-1] - self.points[0]) + return np.linalg.norm(self.points[-1] - self.points[0]) def raw_crossings(self, mode='use_max_jump', virtual_closure=False, @@ -76,7 +75,7 @@ def raw_crossings(self, mode='use_max_jump', try_cython=try_cython) if len(cs) > 0: - closure_cs = n.argwhere(((cs[:, 0] > len(self.points)-1)) | + closure_cs = np.argwhere(((cs[:, 0] > len(self.points)-1)) | ((cs[:, 1] > len(self.points)-1))) indices = closure_cs.flatten() for index in indices: @@ -107,7 +106,7 @@ def closures(self, quantity, num_closures=10): k.zero_centroid() cs = k.raw_crossings() if len(cs) > 0: - closure_cs = n.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | + closure_cs = np.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | ((cs[:, 1] > len(self.points)-1) & (cs[:, 2] > 0.))) indices = closure_cs.flatten() for index in indices: @@ -229,7 +228,7 @@ def alexander_polynomials(self, number_of_samples=10, radius=None, cache_radius = radius if radius is None: - radius = 100 * n.max(self.points) + radius = 100 * np.max(self.points) # Not guaranteed to give 10* the real radius, but good enough print_dist = int(max(1, 3000. / len(self.points))) @@ -245,7 +244,7 @@ def alexander_polynomials(self, number_of_samples=10, radius=None, if optimise_closure: cs = k.raw_crossings() if len(cs) > 0: - closure_cs = n.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | + closure_cs = np.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | ((cs[:, 1] > len(self.points)-1) & (cs[:, 2] > 0.))) indices = closure_cs.flatten() for index in indices: @@ -258,16 +257,16 @@ def alexander_polynomials(self, number_of_samples=10, radius=None, points = k.points closure_point = points[-1] + points[0] / 2. closure_point[2] = radius - k.points = n.vstack([points, closure_point]) + k.points = np.vstack([points, closure_point]) polys.append([angs[0], angs[1], k.alexander_polynomial()]) except IndexError: self.failed_case = k self._cached_alexanders[ - (number_of_samples, cache_radius)] = n.array(polys) + (number_of_samples, cache_radius)] = np.array(polys) - return n.array(polys) + return np.array(polys) def closure_alexander_polynomial(self, theta=0, phi=0): '''Returns the Alexander polynomial of the knot, when projected in @@ -286,7 +285,7 @@ def closure_alexander_polynomial(self, theta=0, phi=0): cs = k.raw_crossings() if len(cs) > 0: - closure_cs = n.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | + closure_cs = np.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | ((cs[:, 1] > len(self.points)-1) & (cs[:, 2] > 0.))) indices = closure_cs.flatten() for index in indices: @@ -302,12 +301,12 @@ def alexander_fractions(self, number_of_samples=10, **kwargs): ''' polys = self.alexander_polynomials( number_of_samples=number_of_samples, **kwargs) - alexs = n.round(polys[:, 2]).astype(n.int) + alexs = np.round(polys[:, 2]).astype(np.int) fracs = [] length = float(len(alexs)) - for alex in n.unique(alexs): - fracs.append((alex, n.sum(alexs == alex) / length)) + for alex in np.unique(alexs): + fracs.append((alex, np.sum(alexs == alex) / length)) return sorted(fracs, key=lambda j: j[1]) @@ -321,14 +320,14 @@ def _alexander_map_values(self, number_of_samples=10, interpolation=100, positions = [] for i, row in enumerate(polys): positions.append(gall_peters(row[0], row[1])) - positions = n.array(positions) + positions = np.array(positions) - interpolation_points = n.mgrid[0:2*n.pi:int(1.57*interpolation)*1j, + interpolation_points = np.mgrid[0:2*np.pi:int(1.57*interpolation)*1j, -2.:2.:interpolation*1j] - # interpolation_points = n.mgrid[0:2 * n.pi:157j, + # interpolation_points = np.mgrid[0:2 * np.pi:157j, # -2.:2.:100j] - # '''interpolation_points = n.mgrid[-n.pi:n.pi:157j, - # -n.pi/2:n.pi/2:100j]''' + # '''interpolation_points = np.mgrid[-np.pi:np.pi:157j, + # -np.pi/2:np.pi/2:100j]''' values = griddata(positions, polys[:, 2], tuple(interpolation_points), @@ -376,7 +375,7 @@ def plot_alexander_map(self, number_of_samples=10, fig.colorbar(cax) else: ax.contourf(values.T, cmap='jet', - levels=[0] + range(3, int(n.max(values) + 1.1), 2)) + levels=[0] + range(3, int(np.max(values) + 1.1), 2)) ax.set_xticks([]) ax.set_yticks([]) @@ -419,7 +418,7 @@ def virtual_check(self): virtual = False for crossing_number in self.gauss_code().crossing_numbers: - occurences = n.where(gauss_code == crossing_number)[0] + occurences = np.where(gauss_code == crossing_number)[0] first_occurence = occurences[0] second_occurence = occurences[1] crossing_difference = second_occurence - first_occurence @@ -469,7 +468,7 @@ def virtual_checks(self, number_of_samples=10, isvirtual = k.virtual_check() polys.append([angs[0], angs[1], isvirtual]) - return n.array(polys) + return np.array(polys) def virtual_fractions(self, number_of_samples=10, **kwargs): '''Returns each of the virtual booleans from @@ -477,12 +476,12 @@ def virtual_fractions(self, number_of_samples=10, **kwargs): ''' polys = self.virtual_checks( number_of_samples=number_of_samples, **kwargs) - alexs = n.round(polys[:, 2]).astype(n.int) + alexs = np.round(polys[:, 2]).astype(np.int) fracs = [] length = float(len(alexs)) - for alex in n.unique(alexs): - fracs.append((alex, n.sum(alexs == alex) / length)) + for alex in np.unique(alexs): + fracs.append((alex, np.sum(alexs == alex) / length)) return sorted(fracs, key=lambda j: j[1]) @@ -495,9 +494,9 @@ def _virtual_map_values(self, number_of_samples=10, **kwargs): positions = [] for i, row in enumerate(polys): positions.append(gall_peters(row[0], row[1])) - positions = n.array(positions) + positions = np.array(positions) - interpolation_points = n.mgrid[0:2 * n.pi:157j, + interpolation_points = np.mgrid[0:2 * np.pi:157j, -2.:2.:100j] values = griddata(positions, polys[:, 2], tuple(interpolation_points), @@ -525,7 +524,7 @@ def plot_virtual_map(self, number_of_samples=10, fig.colorbar(cax) else: ax.contourf(values.T, cmap='jet', - levels=[0] + range(3, int(n.max(values) + 1.1), 2)) + levels=[0] + range(3, int(np.max(values) + 1.1), 2)) ax.set_xticks([]) ax.set_yticks([]) @@ -565,15 +564,15 @@ def plot_virtual_shell(self, number_of_samples=10, positions, values = self._virtual_map_values( number_of_samples, zero_centroid=False) - thetas = n.arcsin(n.linspace(-1, 1, 100)) + n.pi / 2. - phis = n.linspace(0, 2 * n.pi, 157) + thetas = np.arcsin(np.linspace(-1, 1, 100)) + np.pi / 2. + phis = np.linspace(0, 2 * np.pi, 157) - thetas, phis = n.meshgrid(thetas, phis) + thetas, phis = np.meshgrid(thetas, phis) - r = sphere_radius_factor * n.max(self.points) - zs = r * n.cos(thetas) - xs = r * n.sin(thetas) * n.cos(phis) - ys = r * n.sin(thetas) * n.sin(phis) + r = sphere_radius_factor * np.max(self.points) + zs = r * np.cos(thetas) + xs = r * np.sin(thetas) * np.cos(phis) + ys = r * np.sin(thetas) * np.sin(phis) import mayavi.mlab as may @@ -621,7 +620,7 @@ def closure_alexander(self, theta=0, phi=0): cs = k.raw_crossings() if len(cs) > 0: - closure_cs = n.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | + closure_cs = np.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | ((cs[:, 1] > len(self.points)-1) & (cs[:, 2] > 0.))) indices = closure_cs.flatten() for index in indices: @@ -672,7 +671,7 @@ def self_linkings(self, number_of_samples=10, self_linking = k.self_linking() polys.append([angs[0], angs[1], self_linking]) - return n.array(polys) + return np.array(polys) def self_linking_fractions(self, number_of_samples=10, **kwargs): '''Returns each of the self linking numbers from @@ -680,16 +679,16 @@ def self_linking_fractions(self, number_of_samples=10, **kwargs): ''' self_linkings = self.self_linkings( number_of_samples=number_of_samples, **kwargs) - self_linkings = n.round(self_linkings[:, 2]).astype(n.int) + self_linkings = np.round(self_linkings[:, 2]).astype(np.int) fracs = [] length = float(len(self_linkings)) - for alex in n.unique(self_linkings): - fracs.append((alex, n.sum(self_linkings == alex) / length)) - # fracs = n.array(fracs) + for alex in np.unique(self_linkings): + fracs.append((alex, np.sum(self_linkings == alex) / length)) + # fracs = np.array(fracs) return sorted(fracs, key=lambda j: j[1]) - #return fracs[n.argsort(fracs[:, 1])] + #return fracs[np.argsort(fracs[:, 1])] def _self_linking_map_values(self, number_of_samples=10, **kwargs): polys = self.self_linkings( @@ -700,9 +699,9 @@ def _self_linking_map_values(self, number_of_samples=10, **kwargs): positions = [] for i, row in enumerate(polys): positions.append(gall_peters(row[0], row[1])) - positions = n.array(positions) + positions = np.array(positions) - interpolation_points = n.mgrid[0:2 * n.pi:157j, + interpolation_points = np.mgrid[0:2 * np.pi:157j, -2.:2.:100j] values = griddata(positions, polys[:, 2], tuple(interpolation_points), @@ -730,7 +729,7 @@ def plot_self_linking_map(self, number_of_samples=10, fig.colorbar(cax) else: ax.contourf(values.T, cmap='jet', - levels=[0] + range(3, int(n.max(values) + 1.1), 2)) + levels=[0] + range(3, int(np.max(values) + 1.1), 2)) ax.set_xticks([]) ax.set_yticks([]) @@ -784,8 +783,8 @@ def plot_alexander_shell(self, number_of_samples=100, mode='mesh', self.plot(**kwargs) if radius is None: - self_radii = n.sqrt(n.sum(self.points*self.points, axis=1)) - radius = n.max(self_radii) * 2 + self_radii = np.sqrt(np.sum(self.points*self.points, axis=1)) + radius = np.max(self_radii) * 2 print('radius is', radius) if mode == 'mesh': plot_sphere_shell_vispy(self.closure_alexander_polynomial, @@ -848,7 +847,7 @@ def alexander_polynomials_multiroots(self, number_of_samples=10, cache_radius = radius if radius is None: - radius = 100 * n.max(self.points) + radius = 100 * np.max(self.points) # Not guaranteed to give 10* the real radius, but good enough print_dist = int(max(1, 3000. / len(self.points))) @@ -862,7 +861,7 @@ def alexander_polynomials_multiroots(self, number_of_samples=10, points = k.points closure_point = points[-1] + points[0] / 2. closure_point[2] = radius - k.points = n.vstack([points, closure_point]) + k.points = np.vstack([points, closure_point]) root_at_two = k.alexander_at_root(2) root_at_three = k.alexander_at_root(3) root_at_four = k.alexander_at_root(4) @@ -879,7 +878,7 @@ def alexander_polynomials_multiroots(self, number_of_samples=10, knot_type]) # self._cached_alexanders[ - # (number_of_samples, cache_radius)] = n.array(polys) + # (number_of_samples, cache_radius)] = np.array(polys) return polys @@ -925,7 +924,7 @@ def generalised_alexander(self): counter = 0 for crossing_number in self.gauss_code().crossing_numbers: - occurrences = n.where(gauss_code_crossings == crossing_number)[0] + occurrences = np.where(gauss_code_crossings == crossing_number)[0] if gauss_code_orientations[occurrences[0]] == 1: m = m_plus else: @@ -999,7 +998,7 @@ def slip_vassiliev_degree_2_average(self, samples=10, recalculate=False, **kwarg v2 = k._slip_vassiliev_degree_2_projection() v2s.append(v2) - result = n.average(n.abs(v2s)) + result = np.average(np.abs(v2s)) return result def _slip_vassiliev_degree_2_projection(self): @@ -1039,7 +1038,7 @@ def vassiliev_degree_2_average(self, samples=10, recalculate=False, **kwargs): v2 = k._vassiliev_degree_2_projection() v2s.append(v2) - result = n.average(n.abs(v2s)) + result = np.average(np.abs(v2s)) self._cached_v2[samples] = result return result @@ -1068,11 +1067,11 @@ def vassiliev_degree_3_average(self, samples=10, recalculate=False, v3s.append(v3) if signed: - result = n.average(v3s) + result = np.average(v3s) elif signed is None: - result = (n.average(v3s), n.average(n.abs(v3s))) + result = (np.average(v3s), np.average(np.abs(v3s))) else: - result = n.average(n.abs(v3s)) + result = np.average(np.abs(v3s)) self._cached_v3[(samples, signed)] = result return result @@ -1100,7 +1099,7 @@ def _determinants_and_self_linkings(self, number_of_samples=10, radius=None, cache_radius = radius if radius is None: - radius = 100 * n.max(self.points) + radius = 100 * np.max(self.points) # Not guaranteed to give 10* the real radius, but good enough print_dist = int(max(1, 3000. / len(self.points))) @@ -1113,7 +1112,7 @@ def _determinants_and_self_linkings(self, number_of_samples=10, radius=None, k.zero_centroid() cs = k.raw_crossings() if len(cs) > 0: - closure_cs = n.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | + closure_cs = np.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | ((cs[:, 1] > len(self.points)-1) & (cs[:, 2] > 0.))) indices = closure_cs.flatten() for index in indices: @@ -1123,8 +1122,8 @@ def _determinants_and_self_linkings(self, number_of_samples=10, radius=None, polys.append([angs[0], angs[1], alexander(gc, simplify=False)]) # Remove closing crossings to calculate self linking - xs, ys = n.where(cs[:, :2] > len(k.points) - 1) - keeps = n.ones(len(cs), dtype=n.bool) + xs, ys = np.where(cs[:, :2] > len(k.points) - 1) + keeps = np.ones(len(cs), dtype=np.bool) for x in xs: keeps[x] = False cs = cs[keeps] @@ -1142,29 +1141,29 @@ def _determinants_and_self_linkings(self, number_of_samples=10, radius=None, self_linkings.append((angs[0], angs[1], self_linking_counter)) - return n.array(polys), n.array(self_linkings) + return np.array(polys), np.array(self_linkings) def _determinant_and_self_linking_fractions(self, number_of_samples=10, **kwargs): polys, self_linkings = self._determinants_and_self_linkings( number_of_samples, **kwargs) - alexs = n.round(polys[:, 2]).astype(n.int) + alexs = np.round(polys[:, 2]).astype(np.int) fracs = [] length = float(len(alexs)) - for alex in n.unique(alexs): - fracs.append((alex, n.sum(alexs == alex) / length)) + for alex in np.unique(alexs): + fracs.append((alex, np.sum(alexs == alex) / length)) det_fracs = sorted(fracs, key=lambda j: j[1]) - self_linkings = n.round(self_linkings[:, 2]).astype(n.int) + self_linkings = np.round(self_linkings[:, 2]).astype(np.int) fracs = [] length = float(len(self_linkings)) - for linking in n.unique(self_linkings): - fracs.append((linking, n.sum(self_linkings == linking) / length)) + for linking in np.unique(self_linkings): + fracs.append((linking, np.sum(self_linkings == linking) / length)) self_linking_fracs = sorted(fracs, key=lambda j: j[1]) @@ -1186,7 +1185,7 @@ def _closure_and_projection_invariants(self, number_of_samples=10, radius=None, cache_radius = radius if radius is None: - radius = 100 * n.max(self.points) + radius = 100 * np.max(self.points) # Not guaranteed to give 10* the real radius, but good enough print_dist = int(max(1, 3000. / len(self.points))) @@ -1199,7 +1198,7 @@ def _closure_and_projection_invariants(self, number_of_samples=10, radius=None, k.zero_centroid() cs = k.raw_crossings() if len(cs) > 0: - closure_cs = n.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | + closure_cs = np.argwhere(((cs[:, 0] > len(self.points)-1) & (cs[:, 2] < 0.)) | ((cs[:, 1] > len(self.points)-1) & (cs[:, 2] > 0.))) indices = closure_cs.flatten() for index in indices: @@ -1210,8 +1209,8 @@ def _closure_and_projection_invariants(self, number_of_samples=10, radius=None, closure_knotted.append([angs[0], angs[1], is_knotted]) # Remove closing crossings to calculate self linking - xs, ys = n.where(cs[:, :2] > len(k.points) - 1) - keeps = n.ones(len(cs), dtype=n.bool) + xs, ys = np.where(cs[:, :2] > len(k.points) - 1) + keeps = np.ones(len(cs), dtype=np.bool) for x in xs: keeps[x] = False cs = cs[keeps] @@ -1226,8 +1225,8 @@ def _closure_and_projection_invariants(self, number_of_samples=10, radius=None, projection_knotted.append([angs[0], angs[1], False]) - return (n.array(closure_knotted), n.array(projection_virtual), - n.array(projection_knotted)) + return (np.array(closure_knotted), np.array(projection_virtual), + np.array(projection_knotted)) def _closure_and_projection_knotted_fractions(self, number_of_samples=10, **kwargs): @@ -1235,11 +1234,11 @@ def _closure_and_projection_knotted_fractions(self, number_of_samples=10, number_of_samples, **kwargs) ck, pv, pk = self._closure_and_projection_invariants(number_of_samples, **kwargs) - ck_fraction = n.average(ck[:, -1]) - pv_fraction = n.average(pv[:, -1]) + ck_fraction = np.average(ck[:, -1]) + pv_fraction = np.average(pv[:, -1]) - return ck_fraction, pv_fraction, n.average(pv[:, -1].astype(n.bool) | - pk[:, -1].astype(n.bool)) + return ck_fraction, pv_fraction, np.average(pv[:, -1].astype(np.bool) | + pk[:, -1].astype(np.bool)) def plot_vassiliev_spectrum(self, angles=100, max_crossings=8): v2 = self.vassiliev_degree_2_average() @@ -1280,8 +1279,8 @@ def gall_peters(theta, phi): phi : float The longitude, in radians. ''' - theta -= n.pi / 2 - return (phi, 2 * n.sin(theta)) + theta -= np.pi / 2 + return (phi, 2 * np.sin(theta)) def mollweide(phi, lambda_): ''' @@ -1298,17 +1297,17 @@ def mollweide(phi, lambda_): The longitude, in radians. ''' - if phi == n.pi/2 or phi == -n.pi/2: + if phi == np.pi/2 or phi == -np.pi/2: theta_m = phi else: theta_n = phi theta_m = 0 while abs(theta_m - theta_n) > 0.000001: theta_n = theta_m - theta_m = (theta_n - (2*theta_n + n.sin(2*theta_n) - n.pi * n.sin(phi)) / - (2 + 2*n.cos(2*theta_n))) - x = ((2 * n.sqrt(2)) / (n.pi)) * lambda_ * n.cos(theta_m) - y = n.sqrt(2) * n.sin(theta_m) + theta_m = (theta_n - (2*theta_n + np.sin(2*theta_n) - np.pi * np.sin(phi)) / + (2 + 2*np.cos(2*theta_n))) + x = ((2 * np.sqrt(2)) / (np.pi)) * lambda_ * np.cos(theta_m) + y = np.sqrt(2) * np.sin(theta_m) return(x,y) diff --git a/pyknotid/spacecurves/periodic.py b/pyknotid/spacecurves/periodic.py index cd21522..0a5a7f8 100644 --- a/pyknotid/spacecurves/periodic.py +++ b/pyknotid/spacecurves/periodic.py @@ -1,6 +1,4 @@ -from __future__ import print_function, division - -import numpy as n +import numpy as np from pyknotid.utils import ensure_shape_tuple, vprint from periodiccell import _cram_into_cell, _cut_line_at_jumps, _interpret_line @@ -8,14 +6,14 @@ from collections import defaultdict try: - from pyknotid.spacecurves import chelpers + from pyknotid.spacecurves import nhelpers except ImportError: - from pyknotid.spacecurves import helpers as chelpers + from pyknotid.spacecurves import helpers as nhelpers ROTATION_MAGIC_NUMBERS = (0.02, 1.1) ROTATION_MAGIC_NUMBERS = (0.53, 1.0) -class PeriodicKnot(object): +class PeriodicKnot: def __init__(self, points, period_vector=None, autorotate=True, repeats=1): if repeats < 1: @@ -25,9 +23,9 @@ def __init__(self, points, period_vector=None, autorotate=True, repeats=1): new_points = [points] for i in range(1, repeats): new_points.append(k.translated_points(i)) - points = n.vstack(new_points) + points = np.vstack(new_points) - self.points = n.array(points).copy() + self.points = np.array(points).copy() self._period_vector = period_vector from pyknotid.spacecurves.rotation import rotate_to_top @@ -56,7 +54,7 @@ def from_periodic_line(cls, line, shape, perturb=True, **kwargs): shape = ensure_shape_tuple(shape) points = line.copy() - points += n.array([0.00123, 0.00231, 0.00321]) + points += np.array([0.00123, 0.00231, 0.00321]) dx, dy, dz = shape for i in range(1, len(points)): @@ -87,7 +85,7 @@ def __len__(self): @property def period_vector(self): if self._period_vector is not None: - return n.array(self._period_vector) + return np.array(self._period_vector) return self.points[-1] - self.points[0] def rotate_period_vector_to_x(self): @@ -95,7 +93,7 @@ def rotate_period_vector_to_x(self): from pyknotid.spacecurves.rotation import ( rotate_vector_to_top, rotate_axis_angle) self._apply_matrix(rotate_vector_to_top(v)) - self._apply_matrix(rotate_axis_angle((0, 1., 0), n.pi/2.)) + self._apply_matrix(rotate_axis_angle((0, 1., 0), np.pi/2.)) def get_periodic_segments(self): '''Returns a list of line segments making up the curve, cut at the @@ -110,12 +108,12 @@ def interpolate(self, factor=2): for i in range(len(self.points) - 1): v1 = self.points[i] v2 = self.points[i+1] - interpolated_points = n.zeros((factor, 3)) - interpolated_points[:, 0] = n.linspace(v1[0], v2[0], factor + 1)[:-1] - interpolated_points[:, 1] = n.linspace(v1[1], v2[1], factor + 1)[:-1] - interpolated_points[:, 2] = n.linspace(v1[2], v2[2], factor + 1)[:-1] + interpolated_points = np.zeros((factor, 3)) + interpolated_points[:, 0] = np.linspace(v1[0], v2[0], factor + 1)[:-1] + interpolated_points[:, 1] = np.linspace(v1[1], v2[1], factor + 1)[:-1] + interpolated_points[:, 2] = np.linspace(v1[2], v2[2], factor + 1)[:-1] new_points.append(interpolated_points) - self.points = n.vstack(new_points) + self.points = np.vstack(new_points) def nearest_non_overlapping_translation(self): '''Returns the minimal number of translations that can be used in @@ -124,14 +122,14 @@ def nearest_non_overlapping_translation(self): points = self.points[:, :2] end_end_distance = mag(points[-1] - self.translated_points(1)[-1, :2]) - max_dist_from_start = n.max(n.apply_along_axis(mag, 1, points - points[0])) - max_dist_from_end = n.max(n.apply_along_axis(mag, 1, points - points[-1])) - largest_possible_distance = n.max([max_dist_from_start, max_dist_from_end]) + max_dist_from_start = np.max(np.apply_along_axis(mag, 1, points - points[0])) + max_dist_from_end = np.max(np.apply_along_axis(mag, 1, points - points[-1])) + largest_possible_distance = np.max([max_dist_from_start, max_dist_from_end]) # print('largest possible distance', largest_possible_distance) # print('end_end', end_end_distance) - return int(n.ceil(largest_possible_distance / end_end_distance) + 4) + return int(np.ceil(largest_possible_distance / end_end_distance) + 4) def roll(self, num): pv = self.period_vector @@ -139,9 +137,9 @@ def roll(self, num): for _ in range(abs(num)): if num > 0: - ps = n.vstack((ps[1:], [ps[1] + pv])) + ps = np.vstack((ps[1:], [ps[1] + pv])) elif num < 0: - ps = n.vstack(([ps[-2] - pv], ps[:-1])) + ps = np.vstack(([ps[-2] - pv], ps[:-1])) self.points = ps def plot_with_translation(self, with_translation=0, **kwargs): @@ -189,8 +187,8 @@ def plot_projection_with(self, num_translations=0, mat=None): points = self.points translated_points = self.translated_points(num_translations) if mat is not None: - points = n.apply_along_axis(mat.dot, 1, points) - translated_points = n.apply_along_axis( + points = np.apply_along_axis(mat.dot, 1, points) + translated_points = np.apply_along_axis( mat.dot, 1, translated_points) ax.plot(points[:, 0], points[:, 1], color='purple', linewidth=1.5) ax.plot(translated_points[:, 0], @@ -205,7 +203,7 @@ def points_with_translations(self, num_translations=3): for i in range(-num_translations, num_translations): new_points.append(self.translated_points(i)[:-1]) new_points.append(self.translated_points(num_translations)) - points = n.vstack(new_points) + points = np.vstack(new_points) return points def rotate(self, angles=None): @@ -221,7 +219,7 @@ def rotate(self, angles=None): ''' from pyknotid.utils import get_rotation_matrix if angles is None: - angles = n.random.random(3) * 2*n.pi + angles = np.random.random(3) * 2*np.pi phi, theta, psi = angles rot_mat = get_rotation_matrix(angles) self._apply_matrix(rot_mat) @@ -233,7 +231,7 @@ def _apply_matrix(self, mat): ''' Applies the given matrix to all of self.points. ''' - self.points = n.apply_along_axis(mat.dot, 1, self.points) + self.points = np.apply_along_axis(mat.dot, 1, self.points) def plot_projection(self, num_translations=None): if num_translations is None: @@ -253,7 +251,7 @@ def plot_projection(self, num_translations=None): # prev_points = self.translated_points(-1) # ax.plot(prev_points[:, 0], prev_points[:, 1], color='orange', linewidth=1.5) fig, ax = plot_projection(points, - crossings=n.array(plot_crossings), + crossings=np.array(plot_crossings), mark_start=True, fig_ax = (fig, ax), show=True) @@ -274,9 +272,9 @@ def raw_crossings(self, num_translations=None): core_num = len(self.points) - 1 core_index = num_translations * core_num - segment_lengths = n.roll(points[:, :2], -1, axis=0) - points[:, :2] - segment_lengths = n.sqrt(n.sum(segment_lengths**2, axis=1)) - max_segment_length = n.max(segment_lengths[:-1]) + segment_lengths = np.roll(points[:, :2], -1, axis=0) - points[:, :2] + segment_lengths = np.sqrt(np.sum(segment_lengths**2, axis=1)) + max_segment_length = np.max(segment_lengths[:-1]) jump_mode = 2 core_points = points[(core_index):(core_index + core_num + 1)] @@ -296,28 +294,28 @@ def raw_crossings(self, num_translations=None): vnum = core_index + i compnum = 0 - crossings.extend(chelpers.find_crossings( + crossings.extend(nhelpers.find_crossings( v0, dv, first_block, first_block_lengths, vnum, compnum, max_segment_length, jump_mode)) compnum = core_index + core_num - crossings.extend(chelpers.find_crossings( + crossings.extend(nhelpers.find_crossings( v0, dv, second_block, second_block_lengths, vnum, compnum, max_segment_length, jump_mode)) compnum = core_index + i + 2 - crossings.extend(chelpers.find_crossings( + crossings.extend(nhelpers.find_crossings( v0, dv, core_points[i+2:], core_segment_lengths[i+2:], vnum, compnum, max_segment_length, jump_mode)) crossings.sort(key=lambda j: j[0]) - return n.array(crossings), core_num, core_index + return np.array(crossings), core_num, core_index def alternative_raw_crossings(self, num_translations=None): if num_translations is None: @@ -429,7 +427,7 @@ def vassiliev_degree_2s(self, number_of_samples=10): k._apply_matrix(rotate_to_top(*angs)) v2s.append([angs[0], angs[1], k.vassiliev_degree_2()]) - return n.array(v2s) + return np.array(v2s) def vassiliev_degree_3(self, num_translations=None): if num_translations is None: @@ -477,7 +475,7 @@ def vassiliev_degree_3s(self, number_of_samples=10): k._apply_matrix(rotate_to_top(*angs)) v3s.append([angs[0], angs[1], k.vassiliev_degree_3()]) - return n.array(v3s) + return np.array(v3s) def vassiliev_degree_2_integral(self, num_translations=3): ps = self.points_with_translations(num_translations) @@ -510,13 +508,13 @@ def vassiliev_degree_2_integral(self, num_translations=3): def mag(v): - return n.sqrt(v.dot(v)) + return np.sqrt(v.dot(v)) -prefactor = 1 / (4*n.pi) +prefactor = 1 / (4*np.pi) def writhe_contribution(v0, dv0, v1, dv1): diff = v1 - v0 return (mag(dv0) * mag(dv1) * prefactor * - n.cross(dv0, dv1).dot(diff) / + np.cross(dv0, dv1).dot(diff) / (mag(diff)**3)) @@ -524,10 +522,10 @@ def get_equivalent_crossing_indices(crossings, span): equivalencies = defaultdict(set) for i, row1 in enumerate(crossings): for j, row2 in enumerate(crossings[i+1:], i+1): - proximity = n.abs(row1[0] - row2[0]) % span + proximity = np.abs(row1[0] - row2[0]) % span if proximity < 0.00000001 or proximity > (span - 0.00000001): - # if ((n.abs((row1[0] - row2[0])) % span < 0.000001) or - # (span - (n.abs((row1[0] - row2[0])) % span) > (span - 0.000001))): + # if ((np.abs((row1[0] - row2[0])) % span < 0.000001) or + # (span - (np.abs((row1[0] - row2[0])) % span) > (span - 0.000001))): equivalencies[i].add(j) for val in equivalencies[i]: equivalencies[val].add(j) @@ -550,16 +548,16 @@ def get_equivalent_crossing_numbers(indices, gauss_code): return equivalent_crossing_numbers -class CellKnot(object): +class CellKnot: def __init__(self, points, shape, period=None): # points will automatically be folded points = _interpret_line(points) - self.shape = n.array(ensure_shape_tuple(shape)) + self.shape = np.array(ensure_shape_tuple(shape)) self.points = _cut_line_at_jumps(points, self.shape) if period is None: - period = (n.round((self.unfolded_points()[-1] - self.points[0][0]) / + period = (np.round((self.unfolded_points()[-1] - self.points[0][0]) / self.shape)) self.period = period @@ -567,7 +565,7 @@ def __init__(self, points, shape, period=None): def folding(cls, points, shape, origin=(0, 0, 0.), **kwargs): '''Return an instance of PeriodicKnot resulting from folding the points within the given shape.''' - origin = n.array(origin) + origin = np.array(origin) shape = ensure_shape_tuple(shape) points -= origin @@ -585,28 +583,28 @@ def plot(self, boundary=True, clf=True, tube_radius=1.0, def unfolded_points(self): points = [self.points[0]] - current_shift = n.array([0., 0., 0.]) + current_shift = np.array([0., 0., 0.]) for i, segment in enumerate(self.points[1:], 1): segment = segment - current_shift * self.shape diff = segment[0] - points[-1][-1] - diff = n.round(diff / self.shape) + diff = np.round(diff / self.shape) current_shift += diff points.append(segment - diff * self.shape) - return n.vstack(points) + return np.vstack(points) def unfolded_points_with_translations(self, num=3, mat=None): points = [] unfolded = self.unfolded_points() for i in range(-num, num+1): points.append(unfolded + self.period_vector() * i) - points = n.vstack(points) + points = np.vstack(points) if mat is not None: - points = n.apply_along_axis(mat.dot, 1, points) - return n.vstack(points) + points = np.apply_along_axis(mat.dot, 1, points) + return np.vstack(points) def translate(self, distance): - distance = n.array(ensure_shape_tuple(distance)) + distance = np.array(ensure_shape_tuple(distance)) points = self.unfolded_points() + distance points = _fold(points, self.shape) self.points = _cut_line_at_jumps(points, self.shape) @@ -616,11 +614,11 @@ def _fix_endpoints(self): if len(self.points) < 2: return closing_vector = self.points[-1][-1] - self.points[0][0] - if n.all(closing_vector < 0.9*self.shape): + if np.all(closing_vector < 0.9*self.shape): first = self.points[0] self.points = self.points[1:] last = self.points.pop() - self.points.append(n.vstack((last, first))) + self.points.append(np.vstack((last, first))) def period_vector(self): return self.period * self.shape @@ -634,9 +632,9 @@ def raw_crossings_by_unfolding(self, num_translations=0, shift=0): core_num = len(self.unfolded_points()) core_index = num_translations * core_num + shift - segment_lengths = n.roll(points[:, :2], -1, axis=0) - points[:, :2] - segment_lengths = n.sqrt(n.sum(segment_lengths**2, axis=1)) - max_segment_length = n.max(segment_lengths[:-1]) + segment_lengths = np.roll(points[:, :2], -1, axis=0) - points[:, :2] + segment_lengths = np.sqrt(np.sum(segment_lengths**2, axis=1)) + max_segment_length = np.max(segment_lengths[:-1]) jump_mode = 2 core_points = points[(core_index):(core_index + core_num + 1)] @@ -656,28 +654,28 @@ def raw_crossings_by_unfolding(self, num_translations=0, shift=0): vnum = core_index + i compnum = 0 - crossings.extend(chelpers.find_crossings( + crossings.extend(nhelpers.find_crossings( v0, dv, first_block, first_block_lengths, vnum, compnum, max_segment_length, jump_mode)) compnum = core_index + core_num - crossings.extend(chelpers.find_crossings( + crossings.extend(nhelpers.find_crossings( v0, dv, second_block, second_block_lengths, vnum, compnum, max_segment_length, jump_mode)) compnum = core_index + i + 2 - crossings.extend(chelpers.find_crossings( + crossings.extend(nhelpers.find_crossings( v0, dv, core_points[i+2:], core_segment_lengths[i+2:], vnum, compnum, max_segment_length, jump_mode)) crossings.sort(key=lambda j: j[0]) - return n.array(crossings), core_num, core_index + return np.array(crossings), core_num, core_index def plot_projection_by_unfolding(self, num_translations=0, shift=0): from pyknotid.visualise import plot_line, plot_projection @@ -693,7 +691,7 @@ def plot_projection_by_unfolding(self, num_translations=0, shift=0): dr = points[(xint+1) % len(points)] - r plot_crossings.append(r + (x-xint) * dr) fig, ax = plot_projection(points, - crossings=n.array(plot_crossings), + crossings=np.array(plot_crossings), mark_start=True, show=True) core_points = points[num_translations * len(self.unfolded_points()) + shift: @@ -712,8 +710,8 @@ def gauss_code_by_unfolding(self, num_translations=0, mat=None, shift=None): length = len(self.points) for i, row1 in enumerate(crossings): for j, row2 in enumerate(crossings[i+1:], i+1): - # print(i, j, n.abs((row1[0] - row2[0]) % 40)) - if n.abs((row1[0] - row2[0]) % len(self.unfolded_points())) < 0.000001: + # print(i, j, np.abs((row1[0] - row2[0]) % 40)) + if np.abs((row1[0] - row2[0]) % len(self.unfolded_points())) < 0.000001: equivalent_crossing_indices[i].add(j) for val in equivalent_crossing_indices[i]: equivalent_crossing_indices[val].add(j) @@ -745,10 +743,10 @@ def gauss_code_by_unfolding(self, num_translations=0, mat=None, shift=None): crossing_number = code[i][0] if crossing_number not in translation_numbers: position1 = row[0] - translation1 = int(n.floor((position1 - centre_start) / (len(self.unfolded_points() + 0)))) + translation1 = int(np.floor((position1 - centre_start) / (len(self.unfolded_points() + 0)))) position2 = row[1] - translation2 = int(n.floor((position2 - centre_start) / (len(self.unfolded_points() + 0)))) + translation2 = int(np.floor((position2 - centre_start) / (len(self.unfolded_points() + 0)))) translation_numbers[crossing_number] = (translation1, translation2) @@ -758,17 +756,17 @@ def gauss_code_by_unfolding(self, num_translations=0, mat=None, shift=None): def raw_crossings(self, num_translations=0, mat=None, shift=0): from pyknotid.spacecurves import OpenKnot points = self.unfolded_points() - points = n.vstack((points[shift:], points[:(shift+1)] + self.period_vector())) + points = np.vstack((points[shift:], points[:(shift+1)] + self.period_vector())) translated_points = points + num_translations * self.period_vector() if mat is not None: - points = n.apply_along_axis(mat.dot, 1, points) - translated_points = n.apply_along_axis(mat.dot, 1, translated_points) + points = np.apply_along_axis(mat.dot, 1, points) + translated_points = np.apply_along_axis(mat.dot, 1, translated_points) # 1) get crossings of self with self self_crossings = OpenKnot(points, verbose=False).raw_crossings().tolist() if num_translations == 0: - return n.array(self_crossings) + return np.array(self_crossings) # # 2) get crossings of other with other # other_crossings = OpenKnot(translated_points).raw_crossings().tolist() @@ -777,11 +775,11 @@ def raw_crossings(self, num_translations=0, mat=None, shift=0): # 3) get crossings of self with other inter_crossings = [] - segment_lengths = (n.roll(translated_points[:, :2], -1, axis=0) - + segment_lengths = (np.roll(translated_points[:, :2], -1, axis=0) - translated_points[:, :2]) - segment_lengths = n.sqrt(n.sum(segment_lengths * segment_lengths, + segment_lengths = np.sqrt(np.sum(segment_lengths * segment_lengths, axis=1)) - max_segment_length = n.max(segment_lengths[:-1]) + max_segment_length = np.max(segment_lengths[:-1]) jump_mode = 2 for i in range(len(points) - 1): v0 = points[i] @@ -795,27 +793,27 @@ def raw_crossings(self, num_translations=0, mat=None, shift=0): vnum = len(points) + i compnum = 0 - inter_crossings.extend(chelpers.find_crossings( + inter_crossings.extend(nhelpers.find_crossings( v0, dv, s, segment_lengths, vnum, compnum, max_segment_length, jump_mode )) # if len(inter_crossings): - # inter_crossings = n.array(inter_crossings) + # inter_crossings = np.array(inter_crossings) # inter_crossings[::2, 1] += len(points) # inter_crossings[1::2, 0] += len(points) # inter_crossings = inter_crossings.tolist() if num_translations < 0: - self_crossings = n.array(self_crossings) + self_crossings = np.array(self_crossings) self_crossings[:, :2] += len(points) self_crossings = self_crossings.tolist() all_crossings = self_crossings + inter_crossings # + other_crossings all_crossings.sort(key=lambda j: j[0]) - return n.array(all_crossings) + return np.array(all_crossings) def gauss_code(self, num_translations=0, mat=None, shift=0): '''Returns the Gauss code alongside a list of that are @@ -841,11 +839,11 @@ def plot_projection(self, num_translations=0, mat=None, shift=0): import matplotlib.pyplot as plt fig, ax = plt.subplots() points = self.unfolded_points() - points = n.vstack((points[shift:], points[:(shift+1)] + self.period_vector())) + points = np.vstack((points[shift:], points[:(shift+1)] + self.period_vector())) translated_points = points + num_translations * self.period_vector() if mat is not None: - points = n.apply_along_axis(mat.dot, 1, points) - translated_points = n.apply_along_axis(mat.dot, 1, translated_points) + points = np.apply_along_axis(mat.dot, 1, points) + translated_points = np.apply_along_axis(mat.dot, 1, translated_points) ax.plot(points[:, 0], points[:, 1]) ax.plot(translated_points[:, 0], translated_points[:, 1]) fig.show() @@ -854,11 +852,11 @@ def plot_projection(self, num_translations=0, mat=None, shift=0): def plot(self, num_translations=0, mat=None, shift=0): import matplotlib.pyplot as plt points = self.unfolded_points() - points = n.vstack((points[shift:], points[:(shift+1)] + self.period_vector())) + points = np.vstack((points[shift:], points[:(shift+1)] + self.period_vector())) translated_points = points + num_translations * self.period_vector() if mat is not None: - points = n.apply_along_axis(mat.dot, 1, points) - translated_points = n.apply_along_axis(mat.dot, 1, translated_points) + points = np.apply_along_axis(mat.dot, 1, points) + translated_points = np.apply_along_axis(mat.dot, 1, translated_points) from pyknotid.spacecurves import Link Link([points, translated_points]).plot() @@ -880,11 +878,11 @@ def periodic_vassiliev_degree_2(self, num_translations=3, # print(theta + 0.13, phi + 0.43, gc, numbers) print() print('translation {:03}, v2s_cur {}'.format(translations, v2s_cur)) - v2s.append(n.average(v2s_cur)) + v2s.append(np.average(v2s_cur)) v2_arrs.append(v2s_cur) vprint() - print('totals are', n.sum(v2_arrs, axis=0)) - return n.sum(v2s) + print('totals are', np.sum(v2_arrs, axis=0)) + return np.sum(v2s) def periodic_vassiliev_degree_2_by_unfolding(self, num_translations=3, shift=0): gc, equivalencies, translation_indices = self.gauss_code_by_unfolding(num_translations, shift=shift) @@ -1264,11 +1262,11 @@ def periodic_vassiliev_degree_2(representation, relevant_crossing_numbers=[]): def _fold(points, shape): '''Imposes the shape as periodic boundary conditions.''' - shape = n.array(shape) + shape = np.array(shape) dx, dy, dz = shape - points = n.vstack([[points[0] + 0.001], points]) - rest = n.array(points).copy() + points = np.vstack([[points[0] + 0.001], points]) + rest = np.array(points).copy() new_points = [] i = 0 while i < len(rest) - 1: @@ -1280,42 +1278,42 @@ def _fold(points, shape): new_points.append(rest[:i+1]) boundary_point = cur + cur[0] / (cur[0] - nex[0]) * (nex - cur) new_points.append([boundary_point]) - rest = n.vstack([[boundary_point], rest[i+1:]]) + rest = np.vstack([[boundary_point], rest[i+1:]]) rest[:, 0] += dx i = 0 elif nex[0] > dx: new_points.append(rest[:i+1]) boundary_point = cur + (dx - cur[0]) / (nex[0] - cur[0]) * (nex - cur) new_points.append([boundary_point]) - rest = n.vstack([[boundary_point], rest[i+1:]]) + rest = np.vstack([[boundary_point], rest[i+1:]]) rest[:, 0] -= dx i = 0 elif nex[1] < 0: new_points.append(rest[:i+1]) boundary_point = cur + cur[1] / (cur[1] - nex[1]) * (nex - cur) new_points.append([boundary_point]) - rest = n.vstack([[boundary_point], rest[i+1:]]) + rest = np.vstack([[boundary_point], rest[i+1:]]) rest[:, 1] += dy i = 0 elif nex[1] > dy: new_points.append(rest[:i+1]) boundary_point = cur + (dx - cur[1]) / (nex[1] - cur[1]) * (nex - cur) new_points.append([boundary_point]) - rest = n.vstack([[boundary_point], rest[i+1:]]) + rest = np.vstack([[boundary_point], rest[i+1:]]) rest[:, 1] -= dy i = 0 elif nex[2] < 0: new_points.append(rest[:i+1]) boundary_point = cur + cur[2] / (cur[2] - nex[2]) * (nex - cur) new_points.append([boundary_point]) - rest = n.vstack([[boundary_point], rest[i+1:]]) + rest = np.vstack([[boundary_point], rest[i+1:]]) rest[:, 2] += dz i = 0 elif nex[2] > dz: new_points.append(rest[:i+1]) boundary_point = cur + (dx - cur[2]) / (nex[2] - cur[2]) * (nex - cur) new_points.append([boundary_point]) - rest = n.vstack([[boundary_point], rest[i+1:]]) + rest = np.vstack([[boundary_point], rest[i+1:]]) rest[:, 2] -= dz i = 0 else: @@ -1324,7 +1322,7 @@ def _fold(points, shape): print('new points', new_points) - return n.vstack(new_points)[1:] + return np.vstack(new_points)[1:] def get_true_crossing_numbers(equivalent_cs, cores): output = {} diff --git a/pyknotid/spacecurves/periodiccell.py b/pyknotid/spacecurves/periodiccell.py index a99c629..21e64b5 100644 --- a/pyknotid/spacecurves/periodiccell.py +++ b/pyknotid/spacecurves/periodiccell.py @@ -11,7 +11,7 @@ from pyknotid.spacecurves import Knot, OpenKnot import numpy as np -class Cell(object): +class Cell: '''Class for holding the vertices of some number of lines with periodic boundary conditions. @@ -348,7 +348,7 @@ def _is_closed_loop(line, cutoff=5.): from pyknotid.spacecurves.link import Link -class BoundingBox(object): +class BoundingBox: def __init__(self, l): if hasattr(l, 'points'): # crudely get the points if l is a diff --git a/pyknotid/spacecurves/periodicline.py b/pyknotid/spacecurves/periodicline.py index 6e950f9..95020d5 100644 --- a/pyknotid/spacecurves/periodicline.py +++ b/pyknotid/spacecurves/periodicline.py @@ -1,4 +1,4 @@ -import numpy as n +import numpy as np class PeriodicKnot diff --git a/pyknotid/spacecurves/rotation.py b/pyknotid/spacecurves/rotation.py index fd8c558..fec339c 100644 --- a/pyknotid/spacecurves/rotation.py +++ b/pyknotid/spacecurves/rotation.py @@ -1,4 +1,4 @@ -import numpy as n +import numpy as np def get_rotation_angles(number): @@ -12,14 +12,14 @@ def get_rotation_angles(number): Mathematical Intelligencer 19(1) 1997. ''' - angles = n.zeros((number, 2)) - angles[0] = n.array([n.arccos(-1), 0]) + angles = np.zeros((number, 2)) + angles[0] = np.array([np.arccos(-1), 0]) for k in range(2, number+1): h_k = -1. + (2. * (k - 1)) / (number - 1) - theta = n.arccos(h_k) - phi = (angles[k-2, 1] + 3.6/n.sqrt(number) * - 1. / n.sqrt(1 - h_k**2)) % (2*n.pi) + theta = np.arccos(h_k) + phi = (angles[k-2, 1] + 3.6/np.sqrt(number) * + 1. / np.sqrt(1 - h_k**2)) % (2*np.pi) angles[k-1, 0] = theta angles[k-1, 1] = phi angles[-1, 1] = 0. # Last phi will be inf otherwise @@ -36,8 +36,8 @@ def rotate_vector_to_top(vector): The (3D) vector to rotate. ''' - theta = n.arccos(vector[2] / n.linalg.norm(vector)) - phi = n.arctan2(vector[1], vector[0]) + theta = np.arccos(vector[2] / np.linalg.norm(vector)) + phi = np.arctan2(vector[1], vector[0]) return rotate_to_top(theta, phi) def rotate_to_top(theta, phi): @@ -57,27 +57,27 @@ def rotate_to_top(theta, phi): chi = -1 * phi alpha = theta - cc = n.cos(chi) - sc = n.sin(chi) - ca = n.cos(alpha) - sa = n.sin(alpha) - first_rotation = n.array([[cc, -sc, 0], + cc = np.cos(chi) + sc = np.sin(chi) + ca = np.cos(alpha) + sa = np.sin(alpha) + first_rotation = np.array([[cc, -sc, 0], [sc, cc, 0], [0, 0, 1]]) - second_rotation = n.array([[ca, 0, -sa], + second_rotation = np.array([[ca, 0, -sa], [0, 1, 0], [sa, 0, ca]]) return second_rotation.dot(first_rotation) def rotate_axis_angle(axis, angle): - axis = n.array(axis) / n.linalg.norm(axis) + axis = np.array(axis) / np.linalg.norm(axis) ux, uy, uz = axis - ct = n.cos(angle) - st = n.sin(angle) + ct = np.cos(angle) + st = np.sin(angle) - arr = n.array( + arr = np.array( [[ct + ux**2*(1-ct), ux*uy*(1-ct) - uz*st, ux*uz*(1-ct) + uy*st], [uy*ux*(1-ct) + uz*st, ct + uy**2*(1-ct), uy*ux*(1-ct) - ux*st], [uz*ux*(1-ct) - uy*st, uz*uy*(1-ct) + ux*st, ct + uz**2*(1-ct)]]) diff --git a/pyknotid/spacecurves/smooth.py b/pyknotid/spacecurves/smooth.py index 650ee92..5c6dbd8 100644 --- a/pyknotid/spacecurves/smooth.py +++ b/pyknotid/spacecurves/smooth.py @@ -3,7 +3,7 @@ Modified version of code from http://scipy.org/Cookbook/SignalSmooth """ -import numpy as n +import numpy as np def smooth(x, window_len=10, window='hanning'): """smooth the data using a window with requested size. @@ -25,8 +25,8 @@ def smooth(x, window_len=10, window='hanning'): example: import numpy as n - t = n.linspace(-2,2,0.1) - x = n.sin(t)+n.random.randn(len(t))*0.1 + t = np.linspace(-2,2,0.1) + x = np.sin(t)+np.random.randn(len(t))*0.1 y = smooth(x) see also: @@ -49,14 +49,14 @@ def smooth(x, window_len=10, window='hanning'): if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman', 'myown']: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") - s=n.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]] + s=np.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]] #print(len(s)) if window == 'flat': #moving average - w = n.ones(window_len,'d') + w = np.ones(window_len,'d') elif window == 'myown': w = my_smoothing_kernel(window_len) else: - w = getattr(n, window)(window_len) - y = n.convolve(w/w.sum(), s, mode='same') + w = getattr(np, window)(window_len) + y = np.convolve(w/w.sum(), s, mode='same') return y[window_len-1:-window_len+1] diff --git a/pyknotid/spacecurves/spacecurve.py b/pyknotid/spacecurves/spacecurve.py index 64f0700..ed9eaa5 100644 --- a/pyknotid/spacecurves/spacecurve.py +++ b/pyknotid/spacecurves/spacecurve.py @@ -22,14 +22,16 @@ import numpy as np import sys +from pyknotid.logger import get_logger + +logger = get_logger(__name__) + try: - from pyknotid.spacecurves import chelpers + from pyknotid.spacecurves import nhelpers except ImportError: - print('Could not import cythonised chelpers, using Python ' - 'alternative. This will ' - 'give the same result, but is slower.') - from pyknotid.spacecurves import helpers as chelpers -from pyknotid.spacecurves import helpers as helpers + logger.info('Numba extension not available, using Python implementation. ' + 'Performance may be reduced but results will be identical.') + from pyknotid.spacecurves import helpers as nhelpers from pyknotid.spacecurves.geometry import arclength, radius_of_gyration from pyknotid.spacecurves.smooth import smooth @@ -41,7 +43,7 @@ ensure_shape_tuple) -class SpaceCurve(object): +class SpaceCurve: ''' Class for holding the vertices of a single line, providing helper methods for convenient manipulation and analysis. @@ -347,8 +349,7 @@ def from_braid_word(cls, word): for i in range(num_strands): cur_strand = strands_by_initial_index[index] line.extend(cur_strand) - print('index is', index) - print('cur strand is', cur_strand) + logger.info(f'Processing strand {i}/{num_strands}: index={index}, points={len(cur_strand)}') index = int(np.round(cur_strand[-1][0])) - 1 k = cls(np.array(line)*5.) @@ -415,7 +416,7 @@ def cuaps(self, include_closure=True): tangents = np.roll(points, -1, axis=0) - points angles = np.arctan2(tangents[:, 1], tangents[:, 0]) - print('angles', angles) + logger.info(f'Calculated {len(angles)} tangent angles for curve analysis') cuaps = [] @@ -499,10 +500,7 @@ def raw_crossings(self, mode='use_max_jump', include_closure=True, if not recalculate and self._crossings is not None: return self._crossings - if try_cython: - helpers_module = chelpers - else: - helpers_module = helpers + helpers_module = nhelpers self._vprint('Finding crossings') @@ -870,7 +868,7 @@ def octree_simplify(self, runs=1, plot=False, rotate=True, obey_knotting=True, **kwargs): ''' Simplifies the curve via the octree reduction of - :module:`pyknotid.simplify.octree`. + :mod:`pyknotid.simplify.octree`. Parameters ---------- diff --git a/pyknotid/utils.py b/pyknotid/utils.py index b003af2..e6c941e 100644 --- a/pyknotid/utils.py +++ b/pyknotid/utils.py @@ -4,7 +4,7 @@ ''' import sys -import numpy as n +import numpy as np def vprint(string='', newline=True, condition=True): ''' @@ -38,7 +38,7 @@ def mag(v): v : ndarray A vector of any dimension. ''' - return n.sqrt(v.dot(v)) + return np.sqrt(v.dot(v)) def get_rotation_matrix(angles): ''' @@ -53,9 +53,9 @@ def get_rotation_matrix(angles): ''' phi, theta, psi = angles - sin = n.sin - cos = n.cos - rotmat = n.array([ + sin = np.sin + cos = np.cos + rotmat = np.array([ [cos(theta)*cos(psi), -1*cos(phi)*sin(psi) + sin(phi)*sin(theta)*cos(psi), sin(phi)*sin(psi) + cos(phi)*sin(theta)*cos(psi)], diff --git a/pyknotid/visualise.py b/pyknotid/visualise.py index 9843b48..31d1b82 100644 --- a/pyknotid/visualise.py +++ b/pyknotid/visualise.py @@ -18,17 +18,13 @@ ''' -from __future__ import division - import vispy # vispy.use('PyQt5') -import numpy as n import numpy as np from colorsys import hsv_to_rgb from pyknotid.utils import ensure_shape_tuple, vprint import random -from colorsys import hsv_to_rgb try: from vispy.visuals.transforms import MatrixTransform @@ -114,7 +110,7 @@ def plot_line_mayavi(points, clf=True, tube_radius=1., colormap='hsv', if clf: may.clf() if mus is None: - mus = n.linspace(0, 1, len(points)) + mus = np.linspace(0, 1, len(points)) may.plot3d(points[:, 0], points[:, 1], points[:, 2], mus, colormap=colormap, tube_radius=tube_radius, **kwargs) @@ -272,7 +268,7 @@ def plot_line_vispy(points, clf=True, tube_radius=1., tube_points=8, **kwargs): # Add an extra point to fix tube drawing bug last_tangent = points[-1] - points[-2] - points = n.vstack([points, points[-1] + 0.0001 * last_tangent]) + points = np.vstack([points, points[-1] + 0.0001 * last_tangent]) ensure_vispy_canvas() if clf: @@ -283,17 +279,17 @@ def plot_line_vispy(points, clf=True, tube_radius=1., if isinstance(cmap, str): from matplotlib.cm import get_cmap mpl_cmap = get_cmap(cmap) - cmap = lambda v: n.array(mpl_cmap(v)) + cmap = lambda v: np.array(mpl_cmap(v)) cmap = cmap or (lambda c: hsv_to_rgb(c, 1, 1)) if colour is None: - colours = n.linspace(0, 1, len(points)) - colours = n.array([cmap(c) for c in colours]) + colours = np.linspace(0, 1, len(points)) + colours = np.array([cmap(c) for c in colours]) else: colours = color.ColorArray(colour) if mus is not None: - colours = n.array([hsv_to_rgb(c, 1, 1) for c in mus]) + colours = np.array([hsv_to_rgb(c, 1, 1) for c in mus]) l = scene.visuals.Tube(points, color=colours, @@ -304,13 +300,13 @@ def plot_line_vispy(points, clf=True, tube_radius=1., canvas.view.add(l) # canvas.view.camera = 'arcball' - canvas.view.camera = scene.ArcballCamera(fov=30, distance=7.5*n.max( - n.abs(points))) + canvas.view.camera = scene.ArcballCamera(fov=30, distance=7.5*np.max( + np.abs(points))) #canvas.view.camera = scene.TurntableCamera(fov=30) if zero_centroid: l.transform = MatrixTransform() # l.transform = scene.transforms.AffineTransform() - l.transform.translate(-1*n.average(points, axis=0)) + l.transform.translate(-1*np.average(points, axis=0)) canvas.show() # import ipdb @@ -349,13 +345,13 @@ def plot_lines_vispy(lines, clf=True, tube_radius=1., canvas.view.camera = 'arcball' canvas.view.camera.fov = 30 # canvas.view.camera = scene.TurntableCamera( - # fov=90, up='z', distance=1.2*n.max(n.max( + # fov=90, up='z', distance=1.2*np.max(np.max( # points, axis=0))) if zero_centroid: l.transform = MatrixTransform() # l.transform = scene.transforms.AffineTransform() - l.transform.translate(-1*n.average(points, axis=0)) + l.transform.translate(-1*np.average(points, axis=0)) canvas.show() return canvas @@ -385,8 +381,8 @@ def plot_projection(points, crossings=None, mark_start=False, ax.set_xticks([]) ax.set_yticks([]) - xmin, ymin = n.min(points[:, :2], axis=0) - xmax, ymax = n.max(points[:, :2], axis=0) + xmin, ymin = np.min(points[:, :2], axis=0) + xmax, ymax = np.max(points[:, :2], axis=0) dx = (xmax - xmin) / 10. dy = (ymax - ymin) / 10. @@ -398,7 +394,7 @@ def plot_projection(points, crossings=None, mark_start=False, marker='o') if crossings is not None and len(crossings): - crossings = n.array(crossings) + crossings = np.array(crossings) ax.plot(crossings[:, 0], crossings[:, 1], 'ro', alpha=0.5) if show: fig.show() @@ -436,15 +432,15 @@ def plot_shell_mayavi(func, positions, values = func( number_of_samples, zero_centroid=zero_centroid) - thetas = n.arcsin(n.linspace(-1, 1, 100)) + n.pi / 2. - phis = n.linspace(0, 2 * n.pi, 157) + thetas = np.arcsin(np.linspace(-1, 1, 100)) + np.pi / 2. + phis = np.linspace(0, 2 * np.pi, 157) - thetas, phis = n.meshgrid(thetas, phis) + thetas, phis = np.meshgrid(thetas, phis) - r = sphere_radius_factor * n.max(points) - zs = r * n.cos(thetas) - xs = r * n.sin(thetas) * n.cos(phis) - ys = r * n.sin(thetas) * n.sin(phis) + r = sphere_radius_factor * np.max(points) + zs = r * np.cos(thetas) + xs = r * np.sin(thetas) * np.cos(phis) + ys = r * np.sin(thetas) * np.sin(phis) import mayavi.mlab as may @@ -472,20 +468,20 @@ def plot_sphere_shell_vispy(func, rows=100, cols=100, mesh = s.mesh md = mesh._meshdata vertices = md.get_vertices() - md.set_vertices(vertices + n.array(translation)) + md.set_vertices(vertices + np.array(translation)) - values = n.zeros(len(vertices)) - opacities = n.ones(len(vertices)) + values = np.zeros(len(vertices)) + opacities = np.ones(len(vertices)) print('pre') for i, vertex in enumerate(vertices): if i % 10 == 0: vprint('\ri = {} / {}'.format(i, len(vertices)), newline=False) - vertex = vertex / n.sqrt(n.sum(vertex*vertex)) - theta = n.arccos(vertex[2]) - phi = n.arctan2(vertex[1], vertex[0]) + vertex = vertex / np.sqrt(np.sum(vertex*vertex)) + theta = np.arccos(vertex[2]) + phi = np.arctan2(vertex[1], vertex[0]) - if n.isnan(theta): + if np.isnan(theta): theta = 0.0 values[i] = func(theta, phi) distance = vertex[1] - cutoff @@ -494,10 +490,10 @@ def plot_sphere_shell_vispy(func, rows=100, cols=100, opacities[i] = 1. if vertex[1] < cutoff else opacity vprint() - colours = n.zeros((len(values), 4)) - max_val = n.max(values) - min_val = n.min(values) - unique_values = n.unique(colours) + colours = np.zeros((len(values), 4)) + max_val = np.max(values) + min_val = np.min(values) + unique_values = np.unique(colours) # max_val += (1. + 1./len(unique_values))*(max_val - min_val) diff = (max_val - min_val) @@ -510,7 +506,7 @@ def plot_sphere_shell_vispy(func, rows=100, cols=100, faces = md.get_faces() for si in range(smooth): - new_colours = [[n.array(row) for _ in range(3)] + new_colours = [[np.array(row) for _ in range(3)] for row in colours] for i, face in enumerate(faces): new_colours[face[0]].append(colours[face[1]]) @@ -520,7 +516,7 @@ def plot_sphere_shell_vispy(func, rows=100, cols=100, new_colours[face[2]].append(colours[face[0]]) new_colours[face[2]].append(colours[face[1]]) - new_colours = n.array([n.average(cs, axis=0) for cs in new_colours]) + new_colours = np.array([np.average(cs, axis=0) for cs in new_colours]) colours = new_colours @@ -553,20 +549,20 @@ def plot_shell_vispy(func, number_of_samples, radius=radius, zero_centroid=zero_centroid) - thetas = n.arcsin(n.linspace(-1, 1, 100)) + n.pi/2. - phis = n.linspace(0, 2*n.pi, 157) + thetas = np.arcsin(np.linspace(-1, 1, 100)) + np.pi/2. + phis = np.linspace(0, 2*np.pi, 157) - thetas, phis = n.meshgrid(thetas, phis) + thetas, phis = np.meshgrid(thetas, phis) - r = sphere_radius_factor*n.max(points) - zs = r*n.cos(thetas) - xs = r*n.sin(thetas)*n.cos(phis) - ys = r*n.sin(thetas)*n.sin(phis) + r = sphere_radius_factor*np.max(points) + zs = r*np.cos(thetas) + xs = r*np.sin(thetas)*np.cos(phis) + ys = r*np.sin(thetas)*np.sin(phis) - colours = n.zeros((values.shape[0], values.shape[1], 4)) - max_val = n.max(values) - min_val = n.min(values) - unique_values = n.unique(colours) + colours = np.zeros((values.shape[0], values.shape[1], 4)) + max_val = np.max(values) + min_val = np.min(values) + unique_values = np.unique(colours) max_val += (1. + 1./len(unique_values))*(max_val - min_val) diff = (max_val - min_val) @@ -610,7 +606,7 @@ def plot_cell_mayavi(lines, boundary=None, clf=True, smooth=True, import mayavi.mlab as may may.clf() - hues = n.linspace(0, 1, len(lines) + 1)[:-1] + hues = np.linspace(0, 1, len(lines) + 1)[:-1] colours = [hsv_to_rgb(hue, 1, 1) for hue in hues] random.shuffle(colours) i = 0 @@ -637,18 +633,18 @@ def draw_bounding_box_mayavi(shape, colour=(0, 0, 0), tube_radius=1, markz=False xmin, xmax, ymin, ymax, zmin, zmax = shape ls = [] - ls.append(n.array([[xmax, ymax, zmin],[xmax, ymax, zmax]])) - ls.append(n.array([[xmax, ymin, zmin],[xmax, ymin, zmax]])) - ls.append(n.array([[xmin, ymax, zmin],[xmin, ymax, zmax]])) - ls.append(n.array([[xmin, ymin, zmin],[xmin, ymin, zmax]])) - ls.append(n.array([[xmin, ymax, zmax],[xmax, ymax, zmax]])) - ls.append(n.array([[xmin, ymin, zmax],[xmax, ymin, zmax]])) - ls.append(n.array([[xmin, ymax, zmin],[xmax, ymax, zmin]])) - ls.append(n.array([[xmin, ymin, zmin],[xmax, ymin, zmin]])) - ls.append(n.array([[xmax, ymin, zmax],[xmax, ymax, zmax]])) - ls.append(n.array([[xmin, ymin, zmax],[xmin, ymax, zmax]])) - ls.append(n.array([[xmax, ymin, zmin],[xmax, ymax, zmin]])) - ls.append(n.array([[xmin, ymin, zmin],[xmin, ymax, zmin]])) + ls.append(np.array([[xmax, ymax, zmin],[xmax, ymax, zmax]])) + ls.append(np.array([[xmax, ymin, zmin],[xmax, ymin, zmax]])) + ls.append(np.array([[xmin, ymax, zmin],[xmin, ymax, zmax]])) + ls.append(np.array([[xmin, ymin, zmin],[xmin, ymin, zmax]])) + ls.append(np.array([[xmin, ymax, zmax],[xmax, ymax, zmax]])) + ls.append(np.array([[xmin, ymin, zmax],[xmax, ymin, zmax]])) + ls.append(np.array([[xmin, ymax, zmin],[xmax, ymax, zmin]])) + ls.append(np.array([[xmin, ymin, zmin],[xmax, ymin, zmin]])) + ls.append(np.array([[xmax, ymin, zmax],[xmax, ymax, zmax]])) + ls.append(np.array([[xmin, ymin, zmax],[xmin, ymax, zmax]])) + ls.append(np.array([[xmax, ymin, zmin],[xmax, ymax, zmin]])) + ls.append(np.array([[xmin, ymin, zmin],[xmin, ymax, zmin]])) ls = [interpolate(p) for p in ls] @@ -662,7 +658,7 @@ def plot_cell_vispy(lines, boundary=None, clf=True, colours=None, clear_vispy_canvas() if colours is None: - hues = n.linspace(0, 1, len(lines) + 1)[:-1] + hues = np.linspace(0, 1, len(lines) + 1)[:-1] colours = [hsv_to_rgb(hue, 1, 1) for hue in hues] if randomise_colours: random.shuffle(colours) @@ -704,18 +700,18 @@ def draw_bounding_box_vispy(shape, colour=(0, 0, 0), tube_radius=1.): xmin, xmax, ymin, ymax, zmin, zmax = shape ls = [] - ls.append(n.array([[xmax, ymax, zmin],[xmax, ymax, zmax]])) - ls.append(n.array([[xmax, ymin, zmin],[xmax, ymin, zmax]])) - ls.append(n.array([[xmin, ymax, zmin],[xmin, ymax, zmax]])) - ls.append(n.array([[xmin, ymin, zmin],[xmin, ymin, zmax]])) - ls.append(n.array([[xmin, ymax, zmax],[xmax, ymax, zmax]])) - ls.append(n.array([[xmin, ymin, zmax],[xmax, ymin, zmax]])) - ls.append(n.array([[xmin, ymax, zmin],[xmax, ymax, zmin]])) - ls.append(n.array([[xmin, ymin, zmin],[xmax, ymin, zmin]])) - ls.append(n.array([[xmax, ymin, zmax],[xmax, ymax, zmax]])) - ls.append(n.array([[xmin, ymin, zmax],[xmin, ymax, zmax]])) - ls.append(n.array([[xmax, ymin, zmin],[xmax, ymax, zmin]])) - ls.append(n.array([[xmin, ymin, zmin],[xmin, ymax, zmin]])) + ls.append(np.array([[xmax, ymax, zmin],[xmax, ymax, zmax]])) + ls.append(np.array([[xmax, ymin, zmin],[xmax, ymin, zmax]])) + ls.append(np.array([[xmin, ymax, zmin],[xmin, ymax, zmax]])) + ls.append(np.array([[xmin, ymin, zmin],[xmin, ymin, zmax]])) + ls.append(np.array([[xmin, ymax, zmax],[xmax, ymax, zmax]])) + ls.append(np.array([[xmin, ymin, zmax],[xmax, ymin, zmax]])) + ls.append(np.array([[xmin, ymax, zmin],[xmax, ymax, zmin]])) + ls.append(np.array([[xmin, ymin, zmin],[xmax, ymin, zmin]])) + ls.append(np.array([[xmax, ymin, zmax],[xmax, ymax, zmax]])) + ls.append(np.array([[xmin, ymin, zmax],[xmin, ymax, zmax]])) + ls.append(np.array([[xmax, ymin, zmin],[xmax, ymax, zmin]])) + ls.append(np.array([[xmin, ymin, zmin],[xmin, ymax, zmin]])) ls = [interpolate(p) for p in ls] @@ -734,7 +730,7 @@ def draw_bounding_box_vispy(shape, colour=(0, 0, 0), tube_radius=1.): def interpolate(p, num=10): p1, p2 = p - return n.array([n.linspace(i, j, num) for i, j in zip(p1, p2)]).T + return np.array([np.linspace(i, j, num) for i, j in zip(p1, p2)]).T def cell_to_povray(filen, lines, shape): from jinja2 import Environment, FileSystemLoader @@ -742,8 +738,8 @@ def cell_to_povray(filen, lines, shape): '/home/asandy/devel/pyknotid/pyknotid/templates')) template = env.get_template('cell.pov') - colours = n.linspace(0, 1, len(lines) + 1)[:-1] - colours = n.array([hsv_to_rgb(c, 1, 1) for c in colours]) + colours = np.linspace(0, 1, len(lines) + 1)[:-1] + colours = np.array([hsv_to_rgb(c, 1, 1) for c in colours]) coloured_segments = [] for line, colour in zip(lines, colours): @@ -775,14 +771,14 @@ def plot_sphere_mollweide_vispy(func, circle_points=50, depth=2, vertices, indices = circles_ellipse_mesh(circle_points, depth) else: vertices, indices = recursive_ellipse_mesh(circle_points, depth) - vertices[:, 0] *= 2*n.sqrt(2) - vertices[:, 1] *= n.sqrt(2) + vertices[:, 0] *= 2*np.sqrt(2) + vertices[:, 1] *= np.sqrt(2) mesh = Mesh(vertices, indices) md = mesh._meshdata vertices = md.get_vertices() - values = n.zeros(len(vertices)) + values = np.zeros(len(vertices)) print('pre') thetas = [] @@ -791,32 +787,32 @@ def plot_sphere_mollweide_vispy(func, circle_points=50, depth=2, if i % 10 == 0: vprint('\ri = {} / {}'.format(i, len(vertices)), newline=False) - intermediate = n.arcsin(vertex[1] / n.sqrt(2)) - theta = n.arcsin((2*intermediate + n.sin(2*intermediate)) / n.pi) - phi = n.pi * vertex[0] / (2*n.sqrt(2) * n.cos(intermediate)) + intermediate = np.arcsin(vertex[1] / np.sqrt(2)) + theta = np.arcsin((2*intermediate + np.sin(2*intermediate)) / np.pi) + phi = np.pi * vertex[0] / (2*np.sqrt(2) * np.cos(intermediate)) - # theta = n.arccos(vertex[2]) - # phi = n.arctan2(vertex[1], vertex[0]) + # theta = np.arccos(vertex[2]) + # phi = np.arctan2(vertex[1], vertex[0]) - if n.isnan(theta): + if np.isnan(theta): theta = 0.0 print('theta', vertex) - if n.isnan(phi): + if np.isnan(phi): phi = 0.0 print('phi', vertex) thetas.append(theta) phis.append(phi) - values[i] = func(theta + n.pi/2, phi + n.pi) + values[i] = func(theta + np.pi/2, phi + np.pi) vprint() - print('thetas', n.min(thetas), n.max(thetas)) - print('phis', n.min(phis), n.max(phis)) + print('thetas', np.min(thetas), np.max(thetas)) + print('phis', np.min(phis), np.max(phis)) - colours = n.zeros((len(values), 4)) - max_val = n.max(values) - min_val = n.min(values) - unique_values = n.unique(colours) + colours = np.zeros((len(values), 4)) + max_val = np.max(values) + min_val = np.min(values) + unique_values = np.unique(colours) max_val += (1. + 1./len(unique_values))*(max_val - min_val) diff = (max_val - min_val) @@ -829,7 +825,7 @@ def plot_sphere_mollweide_vispy(func, circle_points=50, depth=2, faces = md.get_faces() for si in range(smooth): - new_colours = [[n.array(row) for _ in range(3)] + new_colours = [[np.array(row) for _ in range(3)] for row in colours] for i, face in enumerate(faces): new_colours[face[0]].append(colours[face[1]]) @@ -839,7 +835,7 @@ def plot_sphere_mollweide_vispy(func, circle_points=50, depth=2, new_colours[face[2]].append(colours[face[0]]) new_colours[face[2]].append(colours[face[1]]) - new_colours = n.array([n.average(cs, axis=0) for cs in new_colours]) + new_colours = np.array([np.average(cs, axis=0) for cs in new_colours]) colours = new_colours @@ -852,25 +848,25 @@ def plot_sphere_mollweide_vispy(func, circle_points=50, depth=2, vispy_canvas.show() def circles_ellipse_mesh(radial=5, azimuthal=100): - angles = n.linspace(0, 2*n.pi, azimuthal + 1)[:-1] - radii = n.linspace(0, 1., radial) + angles = np.linspace(0, 2*np.pi, azimuthal + 1)[:-1] + radii = np.linspace(0, 1., radial) - offsets = n.zeros(len(angles)) - offsets[::2] += 2*n.pi / azimuthal + offsets = np.zeros(len(angles)) + offsets[::2] += 2*np.pi / azimuthal vertices = [] for i, radius in enumerate(radii): cur_angles = angles if i % 2 == 0: - cur_angles += 0.5* (2*n.pi) / azimuthal - points = n.zeros((len(angles), 3)) - points[:, 0] = n.cos(angles) * radius - points[:, 1] = n.sin(angles) * radius + cur_angles += 0.5* (2*np.pi) / azimuthal + points = np.zeros((len(angles), 3)) + points[:, 0] = np.cos(angles) * radius + points[:, 1] = np.sin(angles) * radius vertices.append(points) - vertices = n.vstack(vertices) + vertices = np.vstack(vertices) indices = [] num_angles = len(angles) @@ -885,7 +881,7 @@ def circles_ellipse_mesh(radial=5, azimuthal=100): indices.append((cur_index, next_r_index_1, next_r_index_2)) indices.append((cur_index, next_r_index_2, next_index)) - return vertices, n.array(indices) + return vertices, np.array(indices) def plot_vertices_indices(vertices, indices): import matplotlib.pyplot as plt @@ -907,21 +903,21 @@ def plot_vertices_indices(vertices, indices): def recursive_ellipse_mesh(init_num_points=10, depth=2): - triangle = n.array([[0., 0.], + triangle = np.array([[0., 0.], [1., 0.], [0.5, 1.5]]) - angles = n.linspace(0, 2*n.pi, init_num_points + 1)[:-1] - xs = n.sin(angles) - ys = n.cos(angles) - centre = n.array([0., 0.]) + angles = np.linspace(0, 2*np.pi, init_num_points + 1)[:-1] + xs = np.sin(angles) + ys = np.cos(angles) + centre = np.array([0., 0.]) vertices = [] indices = [] vertices_dict = {} for i in range(len(angles)): - triangle = n.array([centre, (xs[i], ys[i]), + triangle = np.array([centre, (xs[i], ys[i]), (xs[(i+1) % len(angles)], ys[(i+1) % len(angles)])]) indices.extend(get_subtriangles(triangle, vertices, @@ -931,7 +927,7 @@ def recursive_ellipse_mesh(init_num_points=10, depth=2): # indices = get_subtriangles(triangle, vertices, {}, 0, depth) - return (n.array(vertices), n.array(indices)) + return (np.array(vertices), np.array(indices)) @@ -947,12 +943,12 @@ def get_subtriangles(points, vertices, vertices_dict, depth, max_depth): return [indices] - arr_points = n.array(points) + arr_points = np.array(points) - centre = n.average(arr_points, axis=0) - half_edges = arr_points + 0.5*(n.roll(arr_points, -1, axis=0) - arr_points) + centre = np.average(arr_points, axis=0) + half_edges = arr_points + 0.5*(np.roll(arr_points, -1, axis=0) - arr_points) - new_points = n.array([arr_points[0], + new_points = np.array([arr_points[0], half_edges[0], arr_points[1], half_edges[1], @@ -961,7 +957,7 @@ def get_subtriangles(points, vertices, vertices_dict, depth, max_depth): subtriangles = [] for i in range(6): - new_triangle = n.array([centre, new_points[i], new_points[(i+1) % 6]]) + new_triangle = np.array([centre, new_points[i], new_points[(i+1) % 6]]) subtriangles.extend(get_subtriangles(new_triangle, vertices, vertices_dict, depth+1, max_depth)) @@ -984,8 +980,8 @@ def plot_sphere_lambert_vispy(func, circle_points=50, depth=2, vertices, indices = circles_ellipse_mesh(circle_points, depth) else: vertices, indices = recursive_ellipse_mesh(circle_points, depth) - # vertices[:, 0] *= 2*n.sqrt(2) - # vertices[:, 1] *= n.sqrt(2) + # vertices[:, 0] *= 2*np.sqrt(2) + # vertices[:, 1] *= np.sqrt(2) vertices[:, 0] *= 2 vertices[:, 1] *= 2 mesh = Mesh(vertices, indices) @@ -993,7 +989,7 @@ def plot_sphere_lambert_vispy(func, circle_points=50, depth=2, md = mesh._meshdata vertices = md.get_vertices() - values = n.zeros(len(vertices)) + values = np.zeros(len(vertices)) print('pre') thetas = [] @@ -1006,35 +1002,35 @@ def plot_sphere_lambert_vispy(func, circle_points=50, depth=2, # print('radius is', np.sqrt(vertex[0]**2 + vertex[1]**2)) # print('angle is', np.arctan2(vertex[1], vertex[0])) - # intermediate = n.arcsin(vertex[1] / n.sqrt(2)) - # theta = n.arcsin((2*intermediate + n.sin(2*intermediate)) / n.pi) - # phi = n.pi * vertex[0] / (2*n.sqrt(2) * n.cos(intermediate)) + # intermediate = np.arcsin(vertex[1] / np.sqrt(2)) + # theta = np.arcsin((2*intermediate + np.sin(2*intermediate)) / np.pi) + # phi = np.pi * vertex[0] / (2*np.sqrt(2) * np.cos(intermediate)) theta = 2*np.arccos(np.sqrt(vertex[0]**2 + vertex[1]**2)/2.) phi = np.arctan2(vertex[1], vertex[0]) - # theta = n.arccos(vertex[2]) - # phi = n.arctan2(vertex[1], vertex[0]) + # theta = np.arccos(vertex[2]) + # phi = np.arctan2(vertex[1], vertex[0]) - if n.isnan(theta): + if np.isnan(theta): theta = 0.0 print('theta', vertex) - if n.isnan(phi): + if np.isnan(phi): phi = 0.0 print('phi', vertex) thetas.append(theta) phis.append(phi) - values[i] = func(theta + n.pi/2, phi + n.pi) + values[i] = func(theta + np.pi/2, phi + np.pi) vprint() - print('thetas', n.min(thetas), n.max(thetas)) - print('phis', n.min(phis), n.max(phis)) + print('thetas', np.min(thetas), np.max(thetas)) + print('phis', np.min(phis), np.max(phis)) - colours = n.zeros((len(values), 4)) - max_val = n.max(values) - min_val = n.min(values) - unique_values = n.unique(colours) + colours = np.zeros((len(values), 4)) + max_val = np.max(values) + min_val = np.min(values) + unique_values = np.unique(colours) max_val += (1. + 1./len(unique_values))*(max_val - min_val) diff = (max_val - min_val) @@ -1047,7 +1043,7 @@ def plot_sphere_lambert_vispy(func, circle_points=50, depth=2, faces = md.get_faces() for si in range(smooth): - new_colours = [[n.array(row) for _ in range(3)] + new_colours = [[np.array(row) for _ in range(3)] for row in colours] for i, face in enumerate(faces): new_colours[face[0]].append(colours[face[1]]) @@ -1057,7 +1053,7 @@ def plot_sphere_lambert_vispy(func, circle_points=50, depth=2, new_colours[face[2]].append(colours[face[0]]) new_colours[face[2]].append(colours[face[1]]) - new_colours = n.array([n.average(cs, axis=0) for cs in new_colours]) + new_colours = np.array([np.average(cs, axis=0) for cs in new_colours]) colours = new_colours @@ -1092,7 +1088,7 @@ def plot_sphere_lambert_sharp_vispy(func, circle_points=50, depth=2, md = mesh._meshdata vertices = md.get_vertices() - values = n.zeros(len(vertices)) + values = np.zeros(len(vertices)) print('pre') thetas = [] @@ -1104,16 +1100,16 @@ def plot_sphere_lambert_sharp_vispy(func, circle_points=50, depth=2, theta = 2*np.arccos(np.sqrt(vertex[0]**2 + vertex[1]**2)/2.) phi = np.arctan2(vertex[1], vertex[0]) - if n.isnan(theta): + if np.isnan(theta): theta = 0.0 print('theta', vertex) - if n.isnan(phi): + if np.isnan(phi): phi = 0.0 print('phi', vertex) thetas.append(theta) phis.append(phi) - values[i] = func(theta + n.pi/2, phi + n.pi) + values[i] = func(theta + np.pi/2, phi + np.pi) vprint() return thetas, phis, values @@ -1124,8 +1120,8 @@ def plot_sphere_lambert_sharp_vispy(func, circle_points=50, depth=2, import matplotlib.pyplot as plt cmap = plt.get_cmap(cmap) - max_value = n.max(values) - min_value = n.min(values) + max_value = np.max(values) + min_value = np.min(values) def normalise(v): return (v - min_value) / (max_value - min_value) for tri_i, triangle in enumerate(indices): @@ -1148,8 +1144,8 @@ def normalise(v): v3_x = (v3[0] + offset_x) / 4. * output_size v3_y = (v3[1] + offset_y) / 4. * output_size - c_x = n.average([v1_x, v2_x, v3_x]) - c_y = n.average([v1_y, v2_y, v3_y]) + c_x = np.average([v1_x, v2_x, v3_x]) + c_y = np.average([v1_y, v2_y, v3_y]) v12_x = (v1_x + v2_x) / 2. v12_y = (v1_y + v2_y) / 2. @@ -1211,12 +1207,12 @@ def get_coloured_subtriangles(points, point_values, vertices, values, vertices_d return [indices] - arr_points = n.array(points) + arr_points = np.array(points) - centre = n.average(arr_points, axis=0) - half_edges = arr_points + 0.5*(n.roll(arr_points, -1, axis=0) - arr_points) + centre = np.average(arr_points, axis=0) + half_edges = arr_points + 0.5*(np.roll(arr_points, -1, axis=0) - arr_points) - new_points = n.array([arr_points[0], + new_points = np.array([arr_points[0], half_edges[0], arr_points[1], half_edges[1], @@ -1225,7 +1221,7 @@ def get_coloured_subtriangles(points, point_values, vertices, values, vertices_d subtriangles = [] for i in range(6): - new_triangle = n.array([centre, new_points[i], new_points[(i+1) % 6]]) + new_triangle = np.array([centre, new_points[i], new_points[(i+1) % 6]]) subtriangles.extend(get_subtriangles(new_triangle, vertices, vertices_dict, depth+1, max_depth)) diff --git a/pyknotid/writhes.py b/pyknotid/writhes.py index e72df72..0020288 100644 --- a/pyknotid/writhes.py +++ b/pyknotid/writhes.py @@ -1,4 +1,3 @@ -from __future__ import division, print_function import numpy as np from math import factorial from functools import wraps diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..8dd08d4 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,121 @@ +[build-system] +requires = ["setuptools>=45", "wheel", "Cython>=0.29", "numpy>=1.19"] +build-backend = "setuptools.build_meta" + +[project] +name = "pyknotid" +version = "0.5.4" +description = "Tools for identifying and analysing knots, in space-curves or standard topological representations" +readme = "README.md" +requires-python = ">=3.8" +license = {text = "MIT"} +authors = [ + {name = "Alexander Taylor", email = "alexander.taylor@bristol.ac.uk"} +] +keywords = ["knot", "knot-theory", "topology", "mathematics", "linking"] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Scientific/Engineering :: Physics", +] + +dependencies = [ + "numpy>=1.19", + "networkx>=2.5", + "planarity>=0.4", + "peewee>=3.14", + "vispy>=0.6", + "sympy>=1.8", + "appdirs>=1.4", + "requests>=2.25", + "tqdm>=4.60", +] + +[project.optional-dependencies] +performance = [ + "numba>=0.55", +] +dev = [ + "pytest>=6.0", + "pytest-cov>=2.12", + "black>=21.0", + "flake8>=3.9", + "mypy>=0.900", + "numba>=0.55", +] +docs = [ + "sphinx>=4.0", + "sphinx-rtd-theme>=0.5", +] + +[project.scripts] +analyse-knot-file = "pyknotid.cli.analyse_knot_file:main" +plot-knot = "pyknotid.cli.plot_knot:main" + +[project.urls] +Homepage = "https://github.com/SPOCKnots/pyknotid" +Documentation = "http://pyknotid.readthedocs.io" +Repository = "https://github.com/SPOCKnots/pyknotid" +"Bug Tracker" = "https://github.com/SPOCKnots/pyknotid/issues" + +[tool.black] +line-length = 100 +target-version = ['py38', 'py39', 'py310', 'py311', 'py312'] +include = '\.pyi?$' +extend-exclude = ''' +/( + # directories + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | build + | dist + | doc +)/ +''' + +[tool.pytest.ini_options] +minversion = "6.0" +addopts = "-ra -q --strict-markers" +testpaths = [ + "tests", +] +python_files = "test_*.py" +python_classes = "Test*" +python_functions = "test_*" + +[tool.mypy] +python_version = "3.8" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = false +ignore_missing_imports = true + +[tool.coverage.run] +source = ["pyknotid"] +omit = [ + "*/tests/*", + "*/doc/*", + "setup.py", +] + +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", + "def __repr__", + "raise AssertionError", + "raise NotImplementedError", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] diff --git a/setup.py b/setup.py index f90c985..db2ed01 100644 --- a/setup.py +++ b/setup.py @@ -20,36 +20,12 @@ def recursively_include(results, directory, patterns): results[directory].append(join(*filename.split(sep)[1:])) recursively_include(package_data, 'pyknotid', - ['*.tmpl', '*.pov', '*.pyx', '*.pxd', - '*.py', + ['*.tmpl', '*.pov', '*.py', ]) -# Build cython components if possible -try: - from Cython.Build import cythonize - import numpy -except ImportError: - print('Cython or numpy could not be imported, so cythonised calculation ' - 'functions will not be built. pyknotid will use Python-only ' - 'routines instead. These are slower, but will return the ' - 'same result.') - print('To build the cython components, install cython and numpy and rebuild ' - 'pyknotid.') - ext_modules = [] - include_dirs = [] -else: - ext_modules = [ - Extension("pyknotid.spacecurves.chelpers", ["pyknotid/spacecurves/chelpers.pyx"], - libraries=["m"]), - Extension("pyknotid.spacecurves.ccomplexity", ["pyknotid/spacecurves/ccomplexity.pyx"], - libraries=["m"]), - Extension("pyknotid.simplify.coctree", ["pyknotid/simplify/coctree.pyx"], - libraries=["m"]), - Extension("pyknotid.cinvariants", ["pyknotid/cinvariants.pyx"], - libraries=["m"]), - ] - ext_modules = cythonize(ext_modules) - include_dirs = [numpy.get_include()] +# All performance-critical code uses Numba +ext_modules = [] +include_dirs = [] pyknotid_init_filen = join(dirname(__file__), 'pyknotid', '__init__.py') version = None @@ -67,7 +43,7 @@ def recursively_include(results, directory, patterns): version = matches[0].strip("'").strip('"') break if version is None: - raise Exception('Error: version could not be loaded from {}'.format(pyknotid_init_filen)) + raise RuntimeError(f'Error: version could not be loaded from {pyknotid_init_filen}') if 'READTHEDOCS' in environ and environ['READTHEDOCS'] == 'True': print('Installing for doc only') @@ -75,37 +51,20 @@ def recursively_include(results, directory, patterns): else: install_requires=['numpy', 'networkx', 'planarity', 'peewee', 'vispy', 'sympy', 'appdirs', - 'requests', 'tqdm'], - -long_description = ''' -Pyknotid -======== - -Python (and optional Cython) modules for detecting and measuring -knotting and linking. pyknotid can analyse space-curves, i.e. sets of -points in three-dimensions, or can parse standard topological -representations of knot diagrams. - -A graphical interface to some of these tools is available online at -`Knot ID `__. + 'requests', 'tqdm'] -pyknotid was developed as part of the Leverhulme Trust Research -Programme Grant RP2013-K-009: Scientific Properties of Complex Knots -(SPOCK), a collaboration between the University of Bristol and Durham -University in the UK. For more information, see the `SPOCK homepage -`__. - -If you use pyknotid in your research, please `cite us -`__. - -Questions or comments are welcome, please email alexander.taylor@bristol.ac.uk. - -Documentation -------------- - -pyknotid is documented online at `readthedocs -`__. -''' +# Read the README for long description +readme_path = join(dirname(__file__), 'README.md') +try: + with open(readme_path, 'r', encoding='utf-8') as f: + long_description = f.read() + long_description_content_type = 'text/markdown' +except FileNotFoundError: + long_description = ( + 'Python modules for detecting and measuring knotting and linking. ' + 'See https://github.com/SPOCKnots/pyknotid for more information.' + ) + long_description_content_type = 'text/plain' setup( name='pyknotid', @@ -113,6 +72,7 @@ def recursively_include(results, directory, patterns): description=('Tools for identifying and analysing knots, in space-curves ' 'or standard topological representations'), long_description=long_description, + long_description_content_type=long_description_content_type, author='Alexander Taylor', author_email='alexander.taylor@bristol.ac.uk', python_requires='>=3.8',