diff --git a/.gitignore b/.gitignore index f5331f50..58309e9f 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ hexrd.egg-info *.so *.*~ *# +.#* diff --git a/MANIFEST.in b/MANIFEST.in index 4d67c4cb..575714dd 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,2 +1,3 @@ include versioneer.py include README.md +include hexrd/_version.py diff --git a/README.md b/README.md index 695b6935..f6495b86 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,51 @@ -HEXRD provides a collection of resources for analysis of x-ray diffraction -data, especially high-energy x-ray diffraction. HEXRD is comprised of a -library and API for writing scripts, a command line interface, and an -interactive graphical user interface. +HEXRD +===== + +HEXRD provides a collection of resources for analysis of X-ray diffraction +data, including powder diffraction, Laue diffraction, and monochromatic rotation series (_i.e._, 3DXRD/HEDM). +HEXRD is comprised of a library and API for writing scripts, a command line interface, and an +interactive graphical user interface (though this is not up to date in python2.7). + +Note that this is a _legacy_ repo with minimal maintenance; the canonical HEXRD repos can now be found at https://github.com/HEXRD/hexrd. + +It is recomended that you use the conda package manager for your python environment (available from either [here](https://docs.conda.io/en/latest/miniconda.html) or [here](https://www.anaconda.com/products/individual), with the former being a smaller, more barebones install). + +Building +-------- +You can skip this if you find a build of the desired version at my [my anaconda cloud](https://anaconda.org/joelvbernier/hexrd) page, which I update periodically. Otherwise, the recommended method is via `conda-build`. If you installed Miniconda, you will have to first install `conda-build` in your base env: `conda install conda-build`. Otherwise, using conda 4.8.3 (from Miniconda3 or Anaconda3) the best procedure is as follows: +- go to wherever you keep your git repos, _e.g._, `cd ~/Documents/GitHub` +- if you have the repo all ready, update it with a fetch and pull `fit fetch -av; git pull' +- otherwise, clone the hexrd repo: `git clone https://github.com/joelvbernier/hexrd.git` +- `cd hexrd` +- checkout the v0.6.x branch: `git checkout v0.6.x` +- make an empty env with python2.7 and numpy: `conda create --name hexrd_0.6 -c anaconda -c conda-forge python=2 numpy` +- activate your new env: `conda activate hexrd_0.6` +- install fabio from [here](https://github.com/joelvbernier/fabio.git) + - cd into wherever you keep your git repos, _e.g._, `cd ~/Documents/GitHub` + - clone repo: `git clone https://github.com/joelvbernier/fabio.git` + - grab the python 2.7 compatible branch: `git checkout py27_compat` + - `cd fabio` + - `pip install ./` +- build hexrd from the conda recipe: `conda build conda.recipe/ --python=2 -c anaconda -c conda-forge` + +Installing +---------- +You can check [my anaconda cloud](https://anaconda.org/joelvbernier/hexrd) for prebuillt versions; if you ffind one for your platform, then simply execute +- `conda install hexrd=0.6 -c joelvbernier` + +Otherwise, you can install from a local build as follows: +- `conda install hexrd=0.6 --use-local -c anaconda -c conda-forge` + +Running +------- +The function libraries lend themselves to scripts for your vaired purposes, but there is a CLI for the ff-HEDM workflow, namely indexing, `hexrd find-orientations`, and grain parameter refinement, `hexrd fit-grains`. More documentation to come. + +Additional Packages +------------------- +It is highly recommended to install the `fast-histogram` package for the indexing: + +- `pip install fast-histogram` + +And is you want spyder, the default channel is broken for python2.7. Use the following: + +- `conda install spyder=3 jupyter_client=5.3.4 -c anaconda -c conda-forge` diff --git a/conda.recipe/bld.bat b/conda.recipe/bld.bat index 6ff7ccf0..72dfb81d 100644 --- a/conda.recipe/bld.bat +++ b/conda.recipe/bld.bat @@ -1,3 +1,7 @@ +REM !!! need to replace for proper versioning under setuptools??? +REM git describe --tags --dirty > %SRC_DIR%/__conda_version__.txt +REM %PYTHON% %RECIPE_DIR%/format_version.py %SRC_DIR%/__conda_version__.txt + rmdir build /s /q %PYTHON% setup.py install --old-and-unmanageable diff --git a/conda.recipe/build.sh b/conda.recipe/build.sh index f47eff93..eacf2510 100755 --- a/conda.recipe/build.sh +++ b/conda.recipe/build.sh @@ -1,3 +1,7 @@ +# !!! need to replace for proper versioning under setuptools??? +#git describe --tags --dirty > $SRC_DIR/__conda_version__.txt +#$PYTHON $RECIPE_DIR/format_version.py $SRC_DIR/__conda_version__.txt + rm -rf build $PYTHON setup.py install --old-and-unmanageable diff --git a/conda.recipe/format_version.py b/conda.recipe/format_version.py new file mode 100644 index 00000000..b96d641c --- /dev/null +++ b/conda.recipe/format_version.py @@ -0,0 +1,8 @@ +import os +import sys + +fn = sys.argv[1] +with open(fn) as f: + s = f.read().lstrip('v').replace('-', '+', 1).replace('-', '.') +with open(fn, 'w') as f: + f.write(s) diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml index bb642671..a17ba78d 100644 --- a/conda.recipe/meta.yaml +++ b/conda.recipe/meta.yaml @@ -1,12 +1,11 @@ package: name: hexrd - # requires ".dev" tags following official releases, for now: version: {{ environ.get('GIT_DESCRIBE_TAG', '')[1:] }} source: - git_url: https://github.com/joelvbernier/hexrd.git - #git_tag: master # edit to point to specific branch or tag - git_tag: v0.3.x + # git_url: https://github.com/joelvbernier/hexrd.git + # git_tag: v0.6.x + git_url: ../ build: number: {{ environ.get('GIT_DESCRIBE_NUMBER', 0) }} @@ -21,21 +20,21 @@ app: requirements: build: - # - nomkl # in case MKL is broken on Linux + - numba - numpy - python - setuptools run: - - dill + - h5py - matplotlib - # - nomkl # in case MKL is broken on Linux - numba - numpy - - progressbar >=2.3 + - psutil + - progressbar - python - python.app # [osx] - pyyaml - - qtconsole + - scikit-image - scikit-learn - scipy - wxpython @@ -43,17 +42,17 @@ requirements: test: imports: - hexrd - commands: - - hexrd -V - - hexrd -h - - hexrd help - - hexrd find-orientations -h - - hexrd help find-orientations - - hexrd fit-grains -h - - hexrd help fit-grains - - hexrd gui -h - - hexrd help gui - - hexrd test + # commands: + # - hexrd -V + # - hexrd -h + # - hexrd help + # - hexrd find-orientations -h + # - hexrd help find-orientations + # - hexrd fit-grains -h + # - hexrd help fit-grains + # - hexrd gui -h + # - hexrd help gui + # - hexrd test about: license: LGPL diff --git a/docs/build.rst b/docs/build.rst new file mode 100644 index 00000000..df87cc83 --- /dev/null +++ b/docs/build.rst @@ -0,0 +1,112 @@ +HEXRD Build Instructions +------------------------ + +The preferred method for building the HEXRD package is via the conda +recipe located in ``/conda.recipe`` + +Requirements +------------ +The following tools are needed to build the package:: + + conda + conda-build + +With `Anaconda `_-based Python +environments, you should be able to run:: + + conda build conda.recipe/ + +Building +-------- + +First, the dependencies for building an environment to run hexrd:: + + - cython + - fabio + - h5py + - matplotlib + - numba + - numpy + - progressbar >=2.3 + - python + - pyyaml + - setuptools + - scikit-image + - scikit-learn + - scipy + - wxpython + +If you will be running scripts of you own, I also strongly suggest adding spyder:: + + - spyder + +For example, to buid an environment to run hexrd v0.6.x, do the following:: + + conda create --name hexrd_0.6 cython h5py matplotlib numba numpy python=2.7 pyyaml setuptools scikit-image scikit-learn scipy spyder + conda install -c anaconda --name hexrd_0.6 wxpython + conda install -c anaconda --name hexrd_0.6 progressbar + conda activate hexrd_0.6 + + +Then install using setuptools:: + + python setup.py install + +Note, you will have to install fabio in the same environment using ``setup.py`` as well. +The procedure for building/installing with conda-build is as follows (*this is curently broken*) + +First, update conda and conda-build:: + + conda update conda + conda update conda-build + +Second, using ``conda-build``, purge previous builds (recommended, +not strictly required):: + + conda build purge + +In the event that you have previously run either +``python setup.py develop`` OR ``python setup.py install``, then first run +either:: + + python setup.py develop --uninstall + +or:: + + python setup.py install --record files.txt + cat files.txt | xargs rm -rf + +depending on how it was installed using ``distutils``. This will +remove any old builds/links. + +Note that the "nuclear option" for removing hexrd is as follows:: + + rm -rf /lib/python2.7/site-packages/hexrd* + rm /bin/hexrd* + +If you have installed ``hexrd`` in a specific conda environment, then +be sure to use the proper path to ``lib/`` under the root anaconda directory. + +Next, run ``conda-build``:: + + conda build conda.recipe/ --no-test + +Note that the ``--no-test`` flag supresses running the internal tests +until they are fixed (stay tuned...) + +Installation +------------ + +Findally, run ``conda install`` using the local package:: + + conda install hexrd=0.6 --use-local + +Conda should echo the proper version number package in the package +install list, which includes all dependencies. + +At this point, a check in a fresh terminal (outside the root hexrd +directory) and run:: + + hexrd --verison + +It should currently read ``hexrd 0.6.5`` diff --git a/docs/source/_static/transforms.pdf b/docs/source/_static/transforms.pdf index 00372eeb..c7349055 100644 Binary files a/docs/source/_static/transforms.pdf and b/docs/source/_static/transforms.pdf differ diff --git a/docs/source/conf.py b/docs/source/conf.py index 29528cc8..e1860632 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -57,7 +57,7 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -if 'dev' in __version__: +if '+' in __version__: todo_include_todos = True rst_prolog = """ .. note:: diff --git a/docs/source/dev/imageseries.rst b/docs/source/dev/imageseries.rst new file mode 100644 index 00000000..3c29db35 --- /dev/null +++ b/docs/source/dev/imageseries.rst @@ -0,0 +1,17 @@ +imageseries package +=============== +The *imageseries* package provides a standard API for accessing image-based data sets. The primary tool in the package is the ImageSeries class. It's interface is analagous to a list of images with associated image metadata. The number of images is given by the len() function. Properties are defined for image shape (shape), data type (dtype) and metadata (metadata). Individual images are accessed by standard subscripting (e.g. image[i]). + +The package contains interfaces for loading (load) and saving (save) imageseries. Images can be loaded in three formats: 'array', 'hdf5' and 'frame-cache'. The 'array' format takes the images from a 3D numpy array. With 'hdf5', images are stored in hdf5 file and accessed on demand. The 'frame-cache' is a list of sparse matrices, useful for thresholded images. An imageseries can be saved in 'hdf5' or 'frame-cache' format. + +The imageseries package also contains a module for modifying the images (process). The process module provides the ProcessedImageSeries class, which takes a given imageseries and produces a new one by modifying the images. It has certain built-in image operations including transposition, flipping, dark subtraction and restriction to a subset. + + +Metadata +---------------- + +The metadata property is generically a dictionary. The actual contents depends on the application. For common hexrd applications in which the specimen is rotated while being exposed to x-rays, the metadata has an 'omega' key with an associated value being an nx2 numpy array where n is the number of frames and the two associated values give the omega (rotation) range for that frame. + +Reader Refactor +------------- +While the imageseries package is in itself indpendent of hexrd, it was used as the basis of a refactoring of the reader classes originally found in the detector module. The main reader class was ReadGE. In the refactored code, the reader classes are now in their own module, image_io, but imported into detector to preserve the interface. The image_io module contains a generic OmegaImageSeries class for working with imageseries having omega metadata. The refactored ReadGE class simply uses the OmegaImageSeries class to provide the same methods as the old class. New code should use the OmegaImageSeries (or the standard ImageSeries) class directly. diff --git a/docs/source/dev/index.rst b/docs/source/dev/index.rst index e47a0606..cbc17adb 100644 --- a/docs/source/dev/index.rst +++ b/docs/source/dev/index.rst @@ -9,3 +9,4 @@ Contents: getting_started releases + imageseries diff --git a/hexrd/_version.py b/hexrd/_version.py index 36f41117..d54561a9 100644 --- a/hexrd/_version.py +++ b/hexrd/_version.py @@ -6,24 +6,66 @@ # that just contains the computed version number. # This file is released into the public domain. Generated by -# versioneer-0.12 (https://github.com/warner/python-versioneer) +# versioneer-0.15 (https://github.com/warner/python-versioneer) -# these strings will be replaced by git during git-archive -git_refnames = "$Format:%d$" -git_full = "$Format:%H$" +import errno +import os +import re +import subprocess +import sys -# these strings are filled in when 'setup.py versioneer' creates _version.py -tag_prefix = "v" -parentdir_prefix = "hexrd-" -versionfile_source = "hexrd/_version.py" -import os, sys, re, subprocess, errno +def get_keywords(): + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "$Format:%d$" + git_full = "$Format:%H$" + keywords = {"refnames": git_refnames, "full": git_full} + return keywords + + +class VersioneerConfig: + pass + + +def get_config(): + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440" + cfg.tag_prefix = "v" + cfg.parentdir_prefix = "hexrd-" + cfg.versionfile_source = "hexrd/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + pass + + +LONG_VERSION_PY = {} +HANDLERS = {} + + +def register_vcs_handler(vcs, method): # decorator + def decorate(f): + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): assert isinstance(commands, list) p = None for c in commands: try: + dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr @@ -34,7 +76,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): if e.errno == errno.ENOENT: continue if verbose: - print("unable to run %s" % args[0]) + print("unable to run %s" % dispcmd) print(e) return None else: @@ -42,26 +84,30 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): print("unable to find command, tried %s" % (commands,)) return None stdout = p.communicate()[0].strip() - if sys.version >= '3': + if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: - print("unable to run %s (error)" % args[0]) + print("unable to run %s (error)" % dispcmd) return None return stdout -def versions_from_parentdir(parentdir_prefix, root, verbose=False): +def versions_from_parentdir(parentdir_prefix, root, verbose): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: - print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % - (root, dirname, parentdir_prefix)) - return None - return {"version": dirname[len(parentdir_prefix):], "full": ""} + print("guessing rootdir is '%s', but '%s' doesn't start with " + "prefix '%s'" % (root, dirname, parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None} + +@register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, @@ -69,7 +115,7 @@ def git_get_keywords(versionfile_abs): # _version.py. keywords = {} try: - f = open(versionfile_abs,"r") + f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) @@ -84,14 +130,16 @@ def git_get_keywords(versionfile_abs): pass return keywords -def git_versions_from_keywords(keywords, tag_prefix, verbose=False): + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): if not keywords: - return {} # keyword-finding function failed to find keywords + raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") - return {} # unexpanded, so not in an unpacked git-archive tarball + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. @@ -116,16 +164,20 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose=False): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) - return { "version": r, - "full": keywords["full"].strip() } - # no suitable tags, so we use the full revision id + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None + } + # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: - print("no suitable tags, using full revision id") - return { "version": keywords["full"].strip(), - "full": keywords["full"].strip() } + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags"} -def git_versions_from_vcs(tag_prefix, root, verbose=False): +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' keywords were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, @@ -134,50 +186,275 @@ def git_versions_from_vcs(tag_prefix, root, verbose=False): if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) - return {} + raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"], - cwd=root) - if stdout is None: - return {} - if not stdout.startswith(tag_prefix): - if verbose: - print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)) - return {} - tag = stdout[len(tag_prefix):] - stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if stdout is None: - return {} - full = stdout.strip() - if tag.endswith("-dirty"): - full += "-dirty" - return {"version": tag, "full": full} - - -def get_versions(default={"version": "unknown", "full": ""}, verbose=False): + # if there is a tag, this yields TAG-NUM-gHEX[-dirty] + # if there are no tags, this yields HEX[-dirty] (no NUM) + describe_out = run_command(GITS, ["describe", "--tags", "--dirty", + "--always", "--long"], + cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparseable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], + cwd=root) + pieces["distance"] = int(count_out) # total number of commits + + return pieces + + +def plus_or_dot(pieces): + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + # now build up version string, with post-release "local version + # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + # exceptions: + # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_pre(pieces): + # TAG[.post.devDISTANCE] . No -dirty + + # exceptions: + # 1: no tags. 0.post.devDISTANCE + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += ".post.dev%d" % pieces["distance"] + else: + # exception #1 + rendered = "0.post.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that + # .dev0 sorts backwards (a dirty tree will appear "older" than the + # corresponding clean one), but you shouldn't be releasing software with + # -dirty anyways. + + # exceptions: + # 1: no tags. 0.postDISTANCE[.dev0] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_old(pieces): + # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. + + # exceptions: + # 1: no tags. 0.postDISTANCE[.dev0] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty + # --always' + + # exceptions: + # 1: no tags. HEX[-dirty] (note: no 'g' prefix) + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty + # --always -long'. The distance/hash is unconditional. + + # exceptions: + # 1: no tags. HEX[-dirty] (note: no 'g' prefix) + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"]} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None} + + +def get_versions(): # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. - keywords = { "refnames": git_refnames, "full": git_full } - ver = git_versions_from_keywords(keywords, tag_prefix, verbose) - if ver: - return ver + cfg = get_config() + verbose = cfg.verbose try: - root = __file__ + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in versionfile_source.split('/'): + for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: - return default + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree"} + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass - return (git_versions_from_vcs(tag_prefix, root, verbose) - or versions_from_parentdir(parentdir_prefix, root, verbose) - or default) + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version"} diff --git a/hexrd/cacheframes.py b/hexrd/cacheframes.py index df15344e..69d6b0c4 100644 --- a/hexrd/cacheframes.py +++ b/hexrd/cacheframes.py @@ -92,6 +92,7 @@ def load_frames(reader, cfg, show_progress=False): return reader def cache_frames(reader, cfg, show_progress=False, overwrite=True): + start = time.time() cache_file = os.path.join(cfg.analysis_dir, 'frame_cache.npz') # load the data reader = load_frames(reader, cfg, show_progress) @@ -103,7 +104,6 @@ def cache_frames(reader, cfg, show_progress=False, overwrite=True): arrs['%d_data' % i] = coo.data arrs['%d_row' % i] = coo.row arrs['%d_col' % i] = coo.col - start = time.time() np.savez_compressed(cache_file, **arrs) elapsed = time.time()-start logger.info('wrote %d frames to cache in %g seconds', len(reader[0]), elapsed) @@ -123,7 +123,7 @@ def get_frames(reader, cfg, show_progress=False, force=False, clean=False): # temporary catch if reader is None; i.e. raw data not here but cache is # ...NEED TO FIX THIS WHEN AXING OLD READER CLASS! - # the stop is treated as total number of frames read, which is inconsistent with + # the stop is treated as total number of frames read, which is inconsistent with # how the start value is used, which specifies empty frames to skip at the start # of each image. What a fucking mess! if reader is not None: diff --git a/hexrd/cli/findorientations.py b/hexrd/cli/findorientations.py index 578aca5e..b40e1d40 100644 --- a/hexrd/cli/findorientations.py +++ b/hexrd/cli/findorientations.py @@ -1,7 +1,7 @@ from __future__ import print_function, division, absolute_import -descr = 'Process diffraction data to find grain orientations' +descr = 'Process rotation image series to find grain orientations' example = """ examples: hexrd find-orientations configuration.yml @@ -79,21 +79,16 @@ def execute(args, parser): # load the configuration settings cfg = config.open(args.yml)[0] - # ...make this an attribute in cfg? - analysis_id = '%s_%s' %( - cfg.analysis_name.strip().replace(' ', '-'), - cfg.material.active.strip().replace(' ', '-'), - ) - # prepare the analysis directory quats_f = os.path.join( cfg.working_dir, - 'accepted_orientations_%s.dat' %analysis_id + 'accepted_orientations_%s.dat' % cfg.analysis_id ) if os.path.exists(quats_f) and not (args.force or args.clean): logger.error( - '%s already exists. Change yml file or specify "force"', quats_f - ) + '%s already exists. Change yml file or specify "force" or "clean"', + quats_f + ) sys.exit() if not os.path.exists(cfg.working_dir): os.makedirs(cfg.working_dir) @@ -101,7 +96,7 @@ def execute(args, parser): # configure logging to file logfile = os.path.join( cfg.working_dir, - 'find-orientations_%s.log' %analysis_id + 'find-orientations_%s.log' % cfg.analysis_id ) fh = logging.FileHandler(logfile, mode='w') fh.setLevel(log_level) @@ -120,7 +115,12 @@ def execute(args, parser): pr.enable() # process the data - find_orientations(cfg, hkls=args.hkls, clean=args.clean, profile=args.profile) + find_orientations( + cfg, + hkls=args.hkls, + clean=args.clean, + profile=args.profile + ) if args.profile: pr.disable() diff --git a/hexrd/cli/fitgrains.py b/hexrd/cli/fitgrains.py index b011a3f0..fef1a134 100644 --- a/hexrd/cli/fitgrains.py +++ b/hexrd/cli/fitgrains.py @@ -24,11 +24,11 @@ def configure_parser(sub_parsers): ) p.add_argument( '-c', '--clean', action='store_true', - help='overwrites existing analysis, including frame cache' + help='overwrites existing analysis, uses initial orientations' ) p.add_argument( '-f', '--force', action='store_true', - help='overwrites existing analysis, exlcuding frame cache' + help='overwrites existing analysis' ) p.add_argument( '-p', '--profile', action='store_true', @@ -62,17 +62,11 @@ def execute(args, parser): cf = logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S') ch.setFormatter(cf) logger.addHandler(ch) - - # ...make this an attribute in cfg? - analysis_id = '%s_%s' %( - cfgs[0].analysis_name.strip().replace(' ', '-'), - cfgs[0].material.active.strip().replace(' ', '-'), - ) # if find-orientations has not already been run, do so: quats_f = os.path.join( cfgs[0].working_dir, - 'accepted_orientations_%s.dat' %analysis_id + 'accepted_orientations_%s.dat' % cfgs[0].analysis_id ) if not os.path.exists(quats_f): logger.info("Missing %s, running find-orientations", quats_f) diff --git a/hexrd/config/__init__.py b/hexrd/config/__init__.py index 912ebb9a..bff7c58a 100644 --- a/hexrd/config/__init__.py +++ b/hexrd/config/__init__.py @@ -16,7 +16,7 @@ def open(file_name=None): with file(file_name) as f: res = [] - for cfg in yaml.load_all(f): + for cfg in yaml.load_all(f, Loader=yaml.SafeLoader): try: # take the previous config section and update with values # from the current one diff --git a/hexrd/config/config.py b/hexrd/config/config.py index 55901727..f9df78c6 100644 --- a/hexrd/config/config.py +++ b/hexrd/config/config.py @@ -1,4 +1,60 @@ +"""Base Config class""" + +import logging + +from .utils import null + +logger = logging.getLogger('hexrd.config') + class Config(object): + _dirty = False + def __init__(self, cfg): self._cfg = cfg + + @property + def dirty(self): + return self._dirty + + def get(self, key, default=null): + args = key.split(':') + args, item = args[:-1], args[-1] + temp = self._cfg + for arg in args: + temp = temp.get(arg, {}) + # intermediate block may be None: + temp = {} if temp is None else temp + try: + res = temp[item] + except KeyError: + if default is not null: + logger.info( + '%s not specified, defaulting to %s', key, default + ) + res = temp.get(item, default) + else: + raise RuntimeError( + '%s must be specified in configuration file' % key + ) + return res + + def set(self, key, val): + args = key.split(':') + args, item = args[:-1], args[-1] + temp = self._cfg + for arg in args: + temp = temp.get(arg, {}) + # intermediate block may be None: + temp = {} if temp is None else temp + if temp.get(item, null) != val: + temp[item] = val + self._dirty = True + + + def dump(self, filename): + import yaml + + with open(filename, 'w') as f: + yaml.dump(self._cfg, f) + self._dirty = False diff --git a/hexrd/config/findorientations.py b/hexrd/config/findorientations.py index a35bc6f6..00078da0 100644 --- a/hexrd/config/findorientations.py +++ b/hexrd/config/findorientations.py @@ -1,53 +1,49 @@ -import logging import os import numpy as np from .config import Config - +# TODO: set these as defaults +seed_search_methods = { + 'label':dict(filter_radius=1, threshold=1), + 'blob_log':dict(min_sigma=0.5, max_sigma=5, + num_sigma=10, threshold=0.01, + overlap=0.1), + 'blob_dog':dict(min_sigma=0.5, max_sigma=5, + sigma_ratio=1.6, + threshold=0.01, overlap=0.1) +} class FindOrientationsConfig(Config): + # Subsections + @property + def orientation_maps(self): + return OrientationMapsConfig(self._cfg) + @property + def seed_search(self): + return SeedSearchConfig(self._cfg) + @property def clustering(self): return ClusteringConfig(self._cfg) - @property def eta(self): return EtaConfig(self._cfg) - - @property - def extract_measured_g_vectors(self): - return self._cfg.get( - 'find_orientations:extract_measured_g_vectors', - False - ) - - @property def omega(self): return OmegaConfig(self._cfg) - @property - def orientation_maps(self): - return OrientationMapsConfig(self._cfg) - - - @property - def seed_search(self): - return SeedSearchConfig(self._cfg) - - + # Simple Values @property def threshold(self): return self._cfg.get('find_orientations:threshold', 1) - @property def use_quaternion_grid(self): key = 'find_orientations:use_quaternion_grid' @@ -62,9 +58,15 @@ def use_quaternion_grid(self): '"%s": "%s" does not exist' % (key, temp) ) + @property + def extract_measured_g_vectors(self): + return self._cfg.get( + 'find_orientations:extract_measured_g_vectors', + False + ) -class ClusteringConfig(Config): +class ClusteringConfig(Config): @property def algorithm(self): @@ -78,7 +80,6 @@ def algorithm(self): % (key, temp, choices) ) - @property def completeness(self): key = 'find_orientations:clustering:completeness' @@ -89,7 +90,6 @@ def completeness(self): '"%s" must be specified' % key ) - @property def radius(self): key = 'find_orientations:clustering:radius' @@ -101,17 +101,15 @@ def radius(self): ) - class OmegaConfig(Config): + tolerance_dflt = 0.5 @property def period(self): + # ??? maybe should get from image_series like before in v0.3.x key = 'find_orientations:omega:period' - ome_start = self._cfg.image_series.omega.start - ome_step = self._cfg.image_series.omega.step - range = 360 if self._cfg.image_series.omega.step > 0 else -360 - temp = self._cfg.get(key, [ome_start, ome_start + range]) + temp = self._cfg.get(key, [-180., 180]) range = np.abs(temp[1]-temp[0]) if range != 360: raise RuntimeError( @@ -120,31 +118,29 @@ def period(self): ) return temp - @property def tolerance(self): return self._cfg.get( 'find_orientations:omega:tolerance', - 2 * self._cfg.image_series.omega.step + self.tolerance_dflt ) class EtaConfig(Config): + tolerance_dflt = 0.5 @property def tolerance(self): return self._cfg.get( 'find_orientations:eta:tolerance', - 2 * self._cfg.image_series.omega.step + self.tolerance_dflt ) - @property def mask(self): return self._cfg.get('find_orientations:eta:mask', 5) - @property def range(self): mask = self.mask @@ -154,10 +150,8 @@ def range(self): [ 90 + mask, 270 - mask]] - class SeedSearchConfig(Config): - @property def hkl_seeds(self): key = 'find_orientations:seed_search:hkl_seeds' @@ -172,7 +166,6 @@ def hkl_seeds(self): '"%s" must be defined for seeded search' % key ) - @property def fiber_step(self): return self._cfg.get( @@ -180,23 +173,44 @@ def fiber_step(self): self._cfg.find_orientations.omega.tolerance ) + @property + def method(self): + key = 'find_orientations:seed_search:method' + try: + temp = self._cfg.get(key) + assert len(temp) == 1., \ + "method must have exactly one key" + if isinstance(temp, dict): + method_spec = next(temp.iterkeys()) + if method_spec.lower() not in seed_search_methods: + raise RuntimeError( + 'invalid seed search method "%s"' + % method_spec + ) + else: + return temp + except: + raise RuntimeError( + '"%s" must be defined for seeded search' % key + ) @property def fiber_ndiv(self): return int(360.0 / self.fiber_step) - class OrientationMapsConfig(Config): - @property def active_hkls(self): temp = self._cfg.get( 'find_orientations:orientation_maps:active_hkls', default='all' ) - return [temp] if isinstance(temp, int) else temp - + if isinstance(temp, int): + temp = [temp] + if temp == 'all': + temp = None + return temp @property def bin_frames(self): @@ -204,15 +218,15 @@ def bin_frames(self): 'find_orientations:orientation_maps:bin_frames', default=1 ) - @property def file(self): - temp = self._cfg.get('find_orientations:orientation_maps:file') - if not os.path.isabs(temp): - temp = os.path.join(self._cfg.working_dir, temp) + temp = self._cfg.get('find_orientations:orientation_maps:file', + default=None) + if temp is not None: + if not os.path.isabs(temp): + temp = os.path.join(self._cfg.working_dir, temp) return temp - @property def threshold(self): return self._cfg.get('find_orientations:orientation_maps:threshold') diff --git a/hexrd/config/fitgrains.py b/hexrd/config/fitgrains.py index 0d9b6a8d..3322119c 100644 --- a/hexrd/config/fitgrains.py +++ b/hexrd/config/fitgrains.py @@ -61,14 +61,6 @@ def npdiv(self): return self._cfg.get('fit_grains:npdiv', 2) - @property - def panel_buffer(self): - temp = self._cfg.get('fit_grains:panel_buffer') - if isinstance(temp, (int, float)): - temp = [temp, temp] - return temp - - @property def threshold(self): return self._cfg.get('fit_grains:threshold') @@ -124,7 +116,7 @@ def fit_only(self): def tth_max(self): key = 'fit_grains:tth_max' temp = self._cfg.get(key, True) - if temp in (True, False): + if isinstance(temp, bool): return temp if isinstance(temp, (int, float)): if temp > 0: diff --git a/hexrd/config/imageseries.py b/hexrd/config/imageseries.py index 6534d3e9..435ae73a 100644 --- a/hexrd/config/imageseries.py +++ b/hexrd/config/imageseries.py @@ -1,133 +1,46 @@ import glob import os -from .config import Config - - - -class FileConfig(Config): - - - @property - def stem(self): - temp = self._cfg.get('image_series:file:stem') - if not os.path.isabs(temp): - temp = os.path.join(self._cfg.working_dir, temp) - return temp - - - @property - def ids(self): - temp = self._cfg.get('image_series:file:ids') - return temp if isinstance(temp, list) else [temp] - - - -class ImagesConfig(Config): - - - @property - def start(self): - return self._cfg.get('image_series:images:start', default=0) - - - @property - def step(self): - return self._cfg.get('image_series:images:step', default=1) - - - @property - def stop(self): - return self._cfg.get('image_series:images:stop', default=None) - - - -class OmegaConfig(Config): - - - @property - def start(self): - return self._cfg.get('image_series:omega:start') - - - @property - def step(self): - return self._cfg.get('image_series:omega:step') - - - @property - def stop(self): - return self._cfg.get('image_series:omega:stop', default=None) - +import numpy as np +from .config import Config +from hexrd import imageseries -class ImageSeriesConfig(Config): +class ImageSeries(Config): + BASEKEY = 'image_series' - @property - def dark(self): - temp = self._cfg.get( - 'image_series:dark', default=None - ) - if temp is None or os.path.exists(temp): - return temp - raise IOError( - '"image_series:dark": "%s" does not exist' % temp - ) + def __init__(self, cfg): + super(ImageSeries, self).__init__(cfg) + self._image_dict = None + def get(self, key): + """get item with given key""" + return self._cfg.get(':'.join([self.BASEKEY, key])) @property - def file(self): - return FileConfig(self._cfg) - + def imageseries(self): + """return the imageseries dictionary""" + if self._image_dict is None: + self._image_dict = dict() + fmt = self.format + for ispec in self.data: + fname = ispec['file'] + args = ispec['args'] + ims = imageseries.open(fname, fmt, **args) + oms = imageseries.omega.OmegaImageSeries(ims) + panel = oms.metadata['panel'] + self._image_dict[panel] = oms - @property - def files(self): - stem = self._cfg.image_series.file.stem - res = [] - missing = [] - for id in self._cfg.image_series.file.ids: - try: - id = stem % id - except TypeError: - # string interpolation failed, join stem and id: - id = stem + id - temp = glob.glob(id) - if temp: - res.extend(temp) - else: - missing.append(id) - if missing: - raise IOError( - 'Image files not found: %s' % (', '.join(missing)) - ) - return res + return self._image_dict + # ========== yaml inputs @property - def flip(self): - temp = self._cfg.get('image_series:flip', default=None) - if temp is None: - return - temp = temp.lower() - if temp not in ['h', 'v', 'hv', 'vh', 'cw90', 'ccw90']: - raise RuntimeError( - 'image_series:flip setting "%s" is not valid' % temp - ) - return temp - - - @property - def images(self): - return ImagesConfig(self._cfg) - - - @property - def omega(self): - return OmegaConfig(self._cfg) - + def data(self): + return self.get('data') @property - def n_frames(self): - return (self.omega.stop - self.omega.start)/self.omega.step + def format(self): + return self.get('format') diff --git a/hexrd/config/instrument.py b/hexrd/config/instrument.py index f60e9e87..4172e956 100644 --- a/hexrd/config/instrument.py +++ b/hexrd/config/instrument.py @@ -1,74 +1,35 @@ -import os +import yaml from .config import Config +from hexrd import instrument -class PixelsConfig(Config): +class Instrument(Config): + def __init__(self, instr_file): + self._configuration = instr_file + with open(instr_file, 'r') as f: + icfg = yaml.safe_load(f) + self._hedm = instrument.HEDMInstrument(icfg) + # Note: instrument is instantiated with a yaml dictionary; use self + # to instantiate classes based on this one @property - def columns(self): - return self._cfg.get('instrument:detector:pixels:columns') - - - @property - def size(self): - temp = self._cfg.get('instrument:detector:pixels:size') - if isinstance(temp, (int, float)): - temp = [temp, temp] - return temp - - - @property - def rows(self): - return self._cfg.get('instrument:detector:pixels:rows') - - - -class DetectorConfig(Config): - - - @property - def parameters_old(self): - key = 'instrument:detector:parameters_old' - temp = self._cfg.get(key, default=None) - if temp is None: - return temp - if not os.path.isabs(temp): - temp = os.path.join(self._cfg.working_dir, temp) - if os.path.exists(temp): - return temp - raise IOError( - '"%s": "%s" does not exist' % (key, temp) - ) - - - @property - def pixels(self): - return PixelsConfig(self._cfg) - - - -class InstrumentConfig(Config): - + def configuration(self): + return self._configuration @property - def detector(self): - return DetectorConfig(self._cfg) + def hedm(self): + return self._hedm + @hedm.setter + def hedm(self, yml): + with open(yml, 'r') as f: + icfg = yaml.safe_load(f) + self._hedm = instrument.HEDMInstrument(icfg) @property - def parameters(self): - key = 'instrument:parameters' - temp = self._cfg.get(key) - if not os.path.isabs(temp): - temp = os.path.join(self._cfg.working_dir, temp) - if os.path.exists(temp): - return temp - if self.detector.parameters_old is not None: - # converting old parameter file - return temp - raise IOError( - '"%s": "%s" does not exist' % (key, temp) - ) + def detector_dict(self): + """returns dictionary of detectors""" + return self.hedm.detectors diff --git a/hexrd/config/material.py b/hexrd/config/material.py index 37a490c5..7837c081 100644 --- a/hexrd/config/material.py +++ b/hexrd/config/material.py @@ -1,11 +1,15 @@ import os +try: + import dill as cpl +except(ImportError): + import cPickle as cpl + from .config import Config class MaterialConfig(Config): - @property def definitions(self): temp = self._cfg.get('material:definitions') @@ -17,7 +21,12 @@ def definitions(self): '"material:definitions": "%s" does not exist' ) - @property def active(self): return self._cfg.get('material:active') + + @property + def plane_data(self): + with file(self.definitions, "r") as matf: + mat_list = cpl.load(matf) + return dict(zip([i.name for i in mat_list], mat_list))[self.active].planeData diff --git a/hexrd/config/root.py b/hexrd/config/root.py index f1acaf9f..b99de4b9 100644 --- a/hexrd/config/root.py +++ b/hexrd/config/root.py @@ -1,71 +1,56 @@ import os import logging import multiprocessing as mp -import sys -from hexrd.utils.decorators import memoized +# from hexrd.utils.decorators import memoized +from hexrd import imageseries from .config import Config -from .instrument import InstrumentConfig +from .instrument import Instrument from .findorientations import FindOrientationsConfig from .fitgrains import FitGrainsConfig -from .imageseries import ImageSeriesConfig from .material import MaterialConfig -from .utils import null - logger = logging.getLogger('hexrd.config') - class RootConfig(Config): - - _dirty = False - - @property def analysis_name(self): return str(self.get('analysis_name', default='analysis')) + @analysis_name.setter def analysis_name(self, val): self.set('analysis_name', val) - @property def analysis_dir(self): return os.path.join(self.working_dir, self.analysis_name) - - @property - def dirty(self): - return self._dirty - - @property def find_orientations(self): return FindOrientationsConfig(self) - @property def fit_grains(self): return FitGrainsConfig(self) - - @property - def image_series(self): - return ImageSeriesConfig(self) - - @property def instrument(self): - return InstrumentConfig(self) - + instr_file = self.get('instrument') + return Instrument(instr_file) @property def material(self): return MaterialConfig(self) + @property + def analysis_id(self): + return '_'.join( + [self.analysis_name.strip().replace(' ', '-'), + self.material.active.strip().replace(' ', '-')] + ) @property def multiprocessing(self): @@ -82,7 +67,7 @@ def multiprocessing(self): if multiproc > ncpus: logger.warning( 'Resuested %s processes, %d available', - multiproc, ncpus, ncpus + multiproc, ncpus ) res = ncpus else: @@ -105,6 +90,7 @@ def multiprocessing(self): ) res = temp return res + @multiprocessing.setter def multiprocessing(self, val): if val in ('half', 'all', -1): @@ -117,7 +103,6 @@ def multiprocessing(self, val): % (mp.cpu_count(), val) ) - @property def working_dir(self): try: @@ -137,6 +122,7 @@ def working_dir(self): '"working_dir" not specified, defaulting to "%s"' % temp ) return temp + @working_dir.setter def working_dir(self, val): val = os.path.abspath(val) @@ -144,46 +130,22 @@ def working_dir(self, val): raise IOError('"working_dir": "%s" does not exist' % val) self.set('working_dir', val) - - def dump(self, filename): - import yaml - - with open(filename, 'w') as f: - yaml.dump(self._cfg, f) - self._dirty = False - - - def get(self, key, default=null): - args = key.split(':') - args, item = args[:-1], args[-1] - temp = self._cfg - for arg in args: - temp = temp.get(arg, {}) - # intermediate block may be None: - temp = {} if temp is None else temp - try: - res = temp[item] - except KeyError: - if default is not null: - logger.info( - '%s not specified, defaulting to %s', key, default - ) - res = temp.get(item, default) - else: - raise RuntimeError( - '%s must be specified in configuration file' % key - ) - return res - - - def set(self, key, val): - args = key.split(':') - args, item = args[:-1], args[-1] - temp = self._cfg - for arg in args: - temp = temp.get(arg, {}) - # intermediate block may be None: - temp = {} if temp is None else temp - if temp.get(item, null) != val: - temp[item] = val - self._dirty = True + @property + def image_series(self): + """return the imageseries dictionary""" + if not hasattr(self, '_image_dict'): + self._image_dict = dict() + fmt = self.get('image_series:format') + imsdata = self.get('image_series:data') + for ispec in imsdata: + fname = ispec['file'] + args = ispec['args'] + ims = imageseries.open(fname, fmt, **args) + oms = imageseries.omega.OmegaImageSeries(ims) + try: + panel = ispec['panel'] + except(KeyError): + panel = oms.metadata['panel'] + self._image_dict[panel] = oms + + return self._image_dict diff --git a/hexrd/config/tests/common.py b/hexrd/config/tests/common.py index 19603d8b..50cc955b 100644 --- a/hexrd/config/tests/common.py +++ b/hexrd/config/tests/common.py @@ -9,7 +9,7 @@ test_data = { 'existing_path': os.path.abspath('..'), 'nonexistent_path': 'an_unlikely_name_for_a_directory', - 'existing_file': __file__, + 'existing_file': os.path.abspath(__file__), 'nonexistent_file': 'an_unlikely_name_for_a_file.dat', 'file_stem': 'test_%%05d.dat', 'tempdir': tempfile.gettempdir(), diff --git a/hexrd/config/tests/test_find_orientations.py b/hexrd/config/tests/test_find_orientations.py index c801003a..1dccbf17 100644 --- a/hexrd/config/tests/test_find_orientations.py +++ b/hexrd/config/tests/test_find_orientations.py @@ -7,10 +7,6 @@ """ analysis_name: analysis working_dir: %(tempdir)s -image_series: - omega: - start: -180 - step: 0.25 --- find_orientations: orientation_maps: @@ -70,7 +66,7 @@ def get_reference_data(cls): return reference_data - def test_threshold(self): + def test_gvecs(self): self.assertFalse( self.cfgs[0].find_orientations.extract_measured_g_vectors ) @@ -181,10 +177,11 @@ def test_period(self): self.cfgs[1].find_orientations.omega.period, [0, 360] ) - self.assertEqual( - self.cfgs[2].find_orientations.omega.period, - [0, -360] - ) + ## Do we allow ranges going backwards? + #self.assertEqual( + # self.cfgs[2].find_orientations.omega.period, + # [0, -360] + # ) self.assertRaises( RuntimeError, getattr, self.cfgs[3].find_orientations.omega, 'period' @@ -308,7 +305,7 @@ def get_reference_data(cls): def test_active_hkls(self): self.assertEqual( self.cfgs[0].find_orientations.orientation_maps.active_hkls, - 'all' + None ) self.assertEqual( self.cfgs[1].find_orientations.orientation_maps.active_hkls, diff --git a/hexrd/config/tests/test_image_series.py b/hexrd/config/tests/test_image_series.py index 06885175..ece0e333 100644 --- a/hexrd/config/tests/test_image_series.py +++ b/hexrd/config/tests/test_image_series.py @@ -3,60 +3,19 @@ from .common import TestConfig, test_data - reference_data = \ """ -working_dir: %(tempdir)s -image_series: -# dark: # not specified to test default is None ---- -image_series: - dark: %(existing_file)s - file: - flip: V - images: ---- -image_series: - dark: %(nonexistent_file)s - file: - stem: %(file_stem)s - ids: [1] - flip: triple_lindy - images: - start: 1 - step: 2 - stop: -1 - omega: - start: 0 - step: 0.25 - stop: 360 ---- -image_series: - file: - ids: 2 ---- image_series: - file: - ids: [1,2] ---- -image_series: - file: - stem: %(tempdir)s%(pathsep)s%%s.dat - ids: ["*001*"] ---- -image_series: - file: - stem: %(tempdir)s - ids: %(nonexistent_file)s ---- -image_series: - file: - stem: %(tempdir)s%(pathsep)s - ids: ['foo.dat', 'bar.dat'] + format: array + data: + - filename: f1 + args: a1 + - filename: f2 + args: a2 """ % test_data -class TestImageSeriesConfig(TestConfig): +class TestImageSeries(TestConfig): @classmethod @@ -64,169 +23,24 @@ def get_reference_data(cls): return reference_data - def test_dark(self): - self.assertEqual(self.cfgs[0].image_series.dark, None) - self.assertEqual( - self.cfgs[1].image_series.dark, - test_data['existing_file'] - ) - self.assertRaises( - IOError, - getattr, self.cfgs[2].image_series, 'dark' - ) - - def test_files(self): - files = [] - for i in ['00011.dat', '00012.dat', '00021.dat']: - with tempfile.NamedTemporaryFile(delete=False, suffix=i) as f: - files.append(f.name) - f.file.write('foo') - try: - self.assertEqual( - sorted(self.cfgs[5].image_series.files), - sorted(files[:2]) - ) - finally: - for f in files: - os.remove(f) - self.assertRaises( - IOError, - getattr, self.cfgs[6].image_series, 'files' - ) - files = [] - for i in ['foo.dat', 'bar.dat']: - with open(os.path.join(tempfile.gettempdir(), i), 'w') as f: - files.append(f.name) - f.write('foo') - try: - self.assertEqual( - files, - self.cfgs[7].image_series.files - ) - finally: - for f in files: - os.remove(f) - - def test_flip(self): - self.assertEqual(self.cfgs[0].image_series.flip, None) - self.assertEqual(self.cfgs[1].image_series.flip, 'v') - self.assertRaises( - RuntimeError, - getattr, self.cfgs[2].image_series, 'flip' - ) - - def test_n_frames(self): - self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].image_series, 'n_frames' - ) - self.assertEqual(self.cfgs[2].image_series.n_frames, 1440) + def test_format(self): - - -class TestFileConfig(TestConfig): - - - @classmethod - def get_reference_data(cls): - return reference_data - - - def test_stem(self): - self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].image_series.file, 'stem' - ) - self.assertRaises( - RuntimeError, - getattr, self.cfgs[1].image_series.file, 'stem' - ) self.assertEqual( - self.cfgs[2].image_series.file.stem, - os.path.join(test_data['tempdir'], test_data['file_stem']) + 'array', + self.cfgs[0].get('image_series:format') ) + def test_data(self): - def test_ids(self): - self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].image_series.file, 'ids' - ) - self.assertRaises( - RuntimeError, - getattr, self.cfgs[1].image_series.file, 'ids' - ) - self.assertEqual( - self.cfgs[2].image_series.file.ids, - [1] - ) - self.assertEqual( - self.cfgs[3].image_series.file.ids, - [2] - ) - self.assertEqual( - self.cfgs[4].image_series.file.ids, - [1, 2] - ) - - - -class TestImagesConfig(TestConfig): - - - @classmethod - def get_reference_data(cls): - return reference_data - - - def test_start(self): - self.assertEqual(self.cfgs[0].image_series.images.start, 0) - self.assertEqual(self.cfgs[1].image_series.images.start, 0) - self.assertEqual(self.cfgs[2].image_series.images.start, 1) - - - def test_step(self): - self.assertEqual(self.cfgs[0].image_series.images.step, 1) - self.assertEqual(self.cfgs[1].image_series.images.step, 1) - self.assertEqual(self.cfgs[2].image_series.images.step, 2) - + d = self.cfgs[0].get('image_series:data') + self.assertEqual(len(d), 2) - def test_stop(self): - self.assertEqual(self.cfgs[0].image_series.images.stop, None) - self.assertEqual(self.cfgs[1].image_series.images.stop, None) - self.assertEqual(self.cfgs[2].image_series.images.stop, -1) + def test_data_filename(self): + d = self.cfgs[0].get('image_series:data') + self.assertEqual(d[0]['filename'], 'f1') + def test_data_args(self): -class TestOmegaConfig(TestConfig): - - - @classmethod - def get_reference_data(cls): - return reference_data - - - def test_start(self): - self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].image_series.omega, 'start' - ) - self.assertEqual(self.cfgs[2].image_series.omega.start, 0) - - - - def test_step(self): - self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].image_series.omega, 'step' - ) - self.assertEqual(self.cfgs[2].image_series.omega.step, 0.25) - - - - def test_stop(self): - self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].image_series.omega, 'stop' - ) - self.assertEqual(self.cfgs[2].image_series.omega.stop, 360) + d = self.cfgs[0].get('image_series:data') + self.assertEqual(d[1]['args'], 'a2') diff --git a/hexrd/config/tests/test_instrument.py b/hexrd/config/tests/test_instrument.py index dfec1d37..0c11e8b0 100644 --- a/hexrd/config/tests/test_instrument.py +++ b/hexrd/config/tests/test_instrument.py @@ -1,146 +1,148 @@ import os +import hexrd.instrument from .common import TestConfig, test_data - +from ..instrument import Instrument, Beam, OscillationStage reference_data = \ """ -analysis_name: foo -working_dir: %(tempdir)s +beam: {} --- -instrument: +beam: + energy: 2.0 + vector: {azimuth: 0.0, polar_angle: 0.0} --- -instrument: - parameters: %(nonexistent_file)s - detector: - parameters_old: %(nonexistent_file)s - pixels: +oscillation_stage: + chi: 0.05 + t_vec_s: [1., 2., 3.] --- -instrument: - parameters: %(existing_file)s - detector: +detectors: + GE1: + distortion: + function_name: GE_41RT + parameters: [7.617424115028922e-05, -1.01006559390677e-06, -0.00016461139058911365, + 2.0, 2.0, 2.0] pixels: - size: 1 - rows: 1024 columns: 2048 ---- -instrument: - parameters: %(nonexistent_file)s - detector: - parameters_old: %(existing_file)s + rows: 2048 + size: [0.2, 0.2] + saturation_level: 14000.0 + transform: + t_vec_d: [94.51351402409436, -337.4575337059045, -1921.058935922086] + tilt_angles: [0.002314455268055846, 6.288758382211901e-05, 1.0938371193555785] + GE2: + distortion: + function_name: GE_41RT + parameters: [5.245111176545523e-05, -3.165350904260842e-05, -0.00020774139197230943, + 2.0, 2.0, 2.0] pixels: - size: [1, 2] + columns: 2048 + rows: 2048 + size: [0.2, 0.2] + saturation_level: 14000.0 + transform: + t_vec_d: [-320.190205619744, -95.95873622987875, -1920.07233414923] + tilt_angles: [0.00044459111576242654, 0.003958638944891969, -0.47488346109306645] +--- +instrument: instrument.yaml """ % test_data -class TestInstrumentConfig(TestConfig): - +class TestInstrument(TestConfig): @classmethod def get_reference_data(cls): return reference_data + def test_beam(self): + icfg = Instrument(self.cfgs[1]) + b = icfg.beam + self.assertTrue(isinstance(b, hexrd.instrument.beam.Beam), "Failed to produce a Beam instance") - def test_parameters(self): - self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].instrument, 'parameters' - ) - self.assertRaises( - RuntimeError, - getattr, self.cfgs[1].instrument, 'parameters' - ) - self.assertRaises( - IOError, - getattr, self.cfgs[2].instrument, 'parameters' - ) - self.assertEqual( - self.cfgs[3].instrument.parameters, - test_data['existing_file'] - ) - # next test should succeed, converting from old parameters - self.assertEqual( - self.cfgs[4].instrument.parameters, - os.path.join(test_data['tempdir'], test_data['nonexistent_file']) - ) + def test_oscillation_stage(self): + icfg = Instrument(self.cfgs[2]) + ostage = icfg.oscillation_stage + self.assertTrue(isinstance(ostage, hexrd.instrument.oscillation_stage.OscillationStage), + "Failed to produce an OscillationStage instance") + def test_detector(self): + icfg = Instrument(self.cfgs[3]) + det = icfg.get_detector('GE1') + self.assertTrue(isinstance(det, hexrd.instrument.PlanarDetector), + "Failed to produce an Detector instance") + def test_detector_dict(self): + icfg = Instrument(self.cfgs[3]) + dd = icfg.detector_dict + self.assertTrue(isinstance(dd, dict), + "Failed to produce an Detector Dictionary instance") + for k in dd: + d = dd[k] + self.assertTrue(isinstance(d, hexrd.instrument.PlanarDetector), + "Detector dictionary values are not detector instances") -class TestDetectorConfig(TestConfig): +class TestBeam(TestConfig): @classmethod def get_reference_data(cls): return reference_data + def test_beam_energy_dflt(self): + bcfg = Beam(self.cfgs[0]) + energy = bcfg.energy + self.assertEqual(energy, Beam.beam_energy_DFLT, "Incorrect default beam energy") - def test_parameters_old(self): - self.assertEqual(self.cfgs[0].instrument.detector.parameters_old, None) - self.assertEqual(self.cfgs[1].instrument.detector.parameters_old, None) - self.assertRaises( - IOError, - getattr, self.cfgs[2].instrument.detector, 'parameters_old' - ) - self.assertEqual( - self.cfgs[4].instrument.detector.parameters_old, - os.path.join(test_data['tempdir'], test_data['existing_file']) - ) + def test_beam_energy(self): + bcfg = Beam(self.cfgs[1]) + energy = bcfg.energy + self.assertEqual(energy, 2.0, "Incorrect beam energy") + def test_beam_vector_dflt(self): + bcfg = Beam(self.cfgs[0]) + bvecdflt = Beam.beam_vec_DFLT + bvec = bcfg.vector + self.assertEqual(bvec[0], bvecdflt[0], "Incorrect default beam vector") + self.assertEqual(bvec[1], bvecdflt[1], "Incorrect default beam vector") + self.assertEqual(bvec[2], bvecdflt[2], "Incorrect default beam vector") -class TestDetectorPixelsConfig(TestConfig): + def test_beam_vector(self): + bcfg = Beam(self.cfgs[1]) + bvec = bcfg.vector + self.assertEqual(bvec[0], 0.0, "Incorrect default beam vector") + self.assertEqual(bvec[1], -1.0, "Incorrect default beam vector") + self.assertEqual(bvec[2], 0.0, "Incorrect default beam vector") + + +class TestOscillationStage(TestConfig): @classmethod def get_reference_data(cls): return reference_data + def test_chi_dflt(self): + oscfg = OscillationStage(self.cfgs[0]) + self.assertEqual(oscfg.chi, OscillationStage.chi_DFLT, "Incorrect default chi for oscillation stage") + + def test_chi(self): + oscfg = OscillationStage(self.cfgs[2]) + self.assertEqual(oscfg.chi, 0.05, "Incorrect default chi for oscillation stage") + + def test_tvec_dflt(self): + oscfg = OscillationStage(self.cfgs[0]) + tvec_dflt = OscillationStage.tvec_DFLT + tvec = oscfg.tvec + + self.assertEqual(tvec[0], tvec_dflt[0], "Incorrect default translation vector") + self.assertEqual(tvec[1], tvec_dflt[1], "Incorrect default translation vector") + self.assertEqual(tvec[2], tvec_dflt[2], "Incorrect default translation vector") + + def test_tvec(self): + oscfg = OscillationStage(self.cfgs[2]) + tvec = oscfg.tvec - def test_columns(self): - self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].instrument.detector.pixels, 'columns' - ) - self.assertRaises( - RuntimeError, - getattr, self.cfgs[1].instrument.detector.pixels, 'columns' - ) - self.assertRaises( - RuntimeError, - getattr, self.cfgs[2].instrument.detector.pixels, 'columns' - ) - self.assertEqual(self.cfgs[3].instrument.detector.pixels.columns, 2048) - - - - def test_size(self): - self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].instrument.detector.pixels, 'size' - ) - self.assertRaises( - RuntimeError, - getattr, self.cfgs[1].instrument.detector.pixels, 'size' - ) - self.assertRaises( - RuntimeError, - getattr, self.cfgs[2].instrument.detector.pixels, 'size' - ) - self.assertEqual(self.cfgs[3].instrument.detector.pixels.size, [1, 1]) - self.assertEqual(self.cfgs[4].instrument.detector.pixels.size, [1, 2]) - - - def test_rows(self): - self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].instrument.detector.pixels, 'rows' - ) - self.assertRaises( - RuntimeError, - getattr, self.cfgs[1].instrument.detector.pixels, 'rows' - ) - self.assertRaises( - RuntimeError, - getattr, self.cfgs[2].instrument.detector.pixels, 'rows' - ) - self.assertEqual(self.cfgs[3].instrument.detector.pixels.rows, 1024) + self.assertEqual(tvec[0], 1., "Incorrect translation vector") + self.assertEqual(tvec[1], 2., "Incorrect translation vector") + self.assertEqual(tvec[2], 3., "Incorrect translation vector") diff --git a/hexrd/constants.py b/hexrd/constants.py index 582eba29..c9f36aba 100644 --- a/hexrd/constants.py +++ b/hexrd/constants.py @@ -69,5 +69,7 @@ beam_vec = -lab_z eta_vec = lab_x + # for energy/wavelength conversions -keVToAngstrom = lambda x: (1e7*sc.c*sc.h/sc.e) / float(x) +def keVToAngstrom(x): + return (1e7*sc.c*sc.h/sc.e) / np.array(x, dtype=float) diff --git a/hexrd/coreutil.py b/hexrd/coreutil.py index b34e331f..fa7652c8 100644 --- a/hexrd/coreutil.py +++ b/hexrd/coreutil.py @@ -1,5 +1,4 @@ import collections -from ConfigParser import SafeConfigParser import copy import logging import os @@ -169,21 +168,9 @@ def initialize_experiment(cfg): pd = ws.activeMaterial.planeData - image_start = cfg.image_series.images.start - dark = cfg.image_series.dark - flip = cfg.image_series.flip - # detector data try: - reader = ReadGE( - [(f, image_start) for f in cfg.image_series.files], - np.radians(cfg.image_series.omega.start), - np.radians(cfg.image_series.omega.step), - subtractDark=dark is not None, # TODO: get rid of this - dark=dark, - doFlip=flip is not None, - flipArg=flip, # TODO: flip=flip - ) + reader = ReadGE(cfg.image_series.omegaseries) except IOError: logger.info("raw data not found, skipping reader init") reader = None @@ -192,7 +179,7 @@ def initialize_experiment(cfg): ws.loadDetector(os.path.join(cwd, detector_fname)) detector = ws.detector except IOError: - logger.info("old detector par file not found, skipping; \nalthough you may need this for find-orientations") + logger.info("old detector par file not found, skipping; \nalthough you may need this for find-orientations") detector = None return pd, reader, detector diff --git a/hexrd/findorientations.py b/hexrd/findorientations.py old mode 100644 new mode 100755 index e3492671..93d72393 --- a/hexrd/findorientations.py +++ b/hexrd/findorientations.py @@ -1,35 +1,26 @@ from __future__ import print_function -import cPickle import logging import multiprocessing as mp import os -import time +import timeit import numpy as np -#np.seterr(over='ignore', invalid='ignore') +# np.seterr(over='ignore', invalid='ignore') + +# import tqdm import scipy.cluster as cluster from scipy import ndimage +from skimage.feature import blob_dog, blob_log +from hexrd import constants as const from hexrd import matrixutil as mutil -from hexrd.xrd import indexer as idx +from hexrd.xrd import indexer +from hexrd import instrument from hexrd.xrd import rotations as rot -from hexrd.xrd import symmetry as sym -from hexrd.xrd import transforms as xf from hexrd.xrd import transforms_CAPI as xfcapi -from hexrd.coreutil import initialize_experiment - -from hexrd.xrd import xrdutil -from hexrd.xrd.xrdutil import simulateGVecs - -from hexrd.xrd import distortion as dFuncs - -from hexrd.fitgrains import get_instrument_parameters - -logger = logging.getLogger(__name__) - -save_as_ascii = False # FIX LATER... +from hexrd.xrd.xrdutil import EtaOmeMaps # just require scikit-learn? have_sklearn = False @@ -44,11 +35,38 @@ pass -def generate_orientation_fibers(eta_ome, chi, threshold, seed_hkl_ids, fiber_ndiv, filt_stdev=0.8, ncpus=1): +save_as_ascii = False # FIX LATER... +fwhm_to_stdev = 1./np.sqrt(8*np.log(2)) + +logger = logging.getLogger(__name__) + + +# ============================================================================= +# FUNCTIONS +# ============================================================================= + + +def generate_orientation_fibers(cfg, eta_ome): """ From ome-eta maps and hklid spec, generate list of quaternions from fibers """ + # grab the relevant parameters from the root config + ncpus = cfg.multiprocessing + chi = cfg.instrument.hedm.chi + seed_hkl_ids = cfg.find_orientations.seed_search.hkl_seeds + fiber_ndiv = cfg.find_orientations.seed_search.fiber_ndiv + method_dict = cfg.find_orientations.seed_search.method + + # strip out method name and kwargs + # !!! note that the config enforces that method is a dict with length 1 + # TODO: put a consistency check on required kwargs, or otherwise specify + # default values for each case? They must be specified as of now. + method = next(method_dict.iterkeys()) + method_kwargs = method_dict[method] + logger.info('\tusing "%s" method for fiber generation' + % method) + # seed_hkl_ids must be consistent with this... pd_hkl_ids = eta_ome.iHKLList[seed_hkl_ids] @@ -62,41 +80,66 @@ def generate_orientation_fibers(eta_ome, chi, threshold, seed_hkl_ids, fiber_ndi # crystallography data from the pd object pd = eta_ome.planeData hkls = pd.hkls - tTh = pd.getTTh() + tTh = pd.getTTh() bMat = pd.latVecOps['B'] csym = pd.getLaueGroup() - params = { - 'bMat':bMat, - 'chi':chi, - 'csym':csym, - 'fiber_ndiv':fiber_ndiv, - } + params = dict( + bMat=bMat, + chi=chi, + csym=csym, + fiber_ndiv=fiber_ndiv) - ############################################ - ## Labeling of spots from seed hkls ## - ############################################ + # ========================================================================= + # Labeling of spots from seed hkls + # ========================================================================= - qfib = [] - input_p = [] + qfib = [] + input_p = [] numSpots = [] - coms = [] + coms = [] for i in seed_hkl_ids: - # First apply filter - this_map_f = -ndimage.filters.gaussian_laplace(eta_ome.dataStore[i], filt_stdev) - - labels_t, numSpots_t = ndimage.label( - this_map_f > threshold, - structureNDI_label - ) - coms_t = np.atleast_2d( - ndimage.center_of_mass( - this_map_f, - labels=labels_t, - index=np.arange(1, np.amax(labels_t)+1) + if method == 'label': + # First apply filter + filt_stdev = fwhm_to_stdev * method_kwargs['filter_radius'] + this_map_f = -ndimage.filters.gaussian_laplace( + eta_ome.dataStore[i], filt_stdev) + + labels_t, numSpots_t = ndimage.label( + this_map_f > method_kwargs['threshold'], + structureNDI_label ) - ) - #labels.append(labels_t) + coms_t = np.atleast_2d( + ndimage.center_of_mass( + this_map_f, + labels=labels_t, + index=np.arange(1, np.amax(labels_t) + 1) + ) + ) + elif method in ['blob_log', 'blob_dog']: + # must scale map + # TODO: we should so a parameter study here + this_map = eta_ome.dataStore[i] + this_map[np.isnan(this_map)] = 0. + this_map -= np.min(this_map) + scl_map = 2*this_map/np.max(this_map) - 1. + + # TODO: Currently the method kwargs must be explicitly specified + # in the config, and there are no checks + # for 'blob_log': min_sigma=0.5, max_sigma=5, + # num_sigma=10, threshold=0.01, overlap=0.1 + # for 'blob_dog': min_sigma=0.5, max_sigma=5, + # sigma_ratio=1.6, threshold=0.01, overlap=0.1 + if method == 'blob_log': + blobs = np.atleast_2d( + blob_log(scl_map, **method_kwargs) + ) + else: # blob_dog + blobs = np.atleast_2d( + blob_dog(scl_map, **method_kwargs) + ) + numSpots_t = len(blobs) + coms_t = blobs[:, :2] numSpots.append(numSpots_t) coms.append(coms_t) pass @@ -108,7 +151,7 @@ def generate_orientation_fibers(eta_ome, chi, threshold, seed_hkl_ids, fiber_ndi eta_c = eta_ome.etaEdges[0] + (0.5 + coms[i][ispot][1])*del_eta input_p.append( np.hstack( - [hkls[:, pd_hkl_ids[i]], + [hkls[:, pd_hkl_ids[i]], tTh[pd_hkl_ids[i]], eta_c, ome_c] ) ) @@ -117,22 +160,37 @@ def generate_orientation_fibers(eta_ome, chi, threshold, seed_hkl_ids, fiber_ndi pass # do the mapping - start = time.time() + start = timeit.default_timer() qfib = None if ncpus > 1: # multiple process version + # ???: Need a chunksize in map? + chunksize = max(1, len(input_p)//(10*ncpus)) pool = mp.Pool(ncpus, discretefiber_init, (params, )) - qfib = pool.map(discretefiber_reduced, input_p) # chunksize=chunksize) + qfib = pool.map( + discretefiber_reduced, input_p, + chunksize=chunksize + ) + ''' + # This is an experiment... + ntotal= 10*ncpus + np.remainder(len(input_p), 10*ncpus) > 0 + for _ in tqdm.tqdm( + pool.imap_unordered( + discretefiber_reduced, input_p, chunksize=chunksize + ), total=ntotal + ): + pass + print(_.shape) + ''' pool.close() + pool.join() else: # single process version. - global paramMP - discretefiber_init(params) # sets paramMP + discretefiber_init(params) # sets paramMP qfib = map(discretefiber_reduced, input_p) - paramMP = None # clear paramMP - elapsed = (time.time() - start) - logger.info("fiber generation took %.3f seconds", elapsed) - + discretefiber_cleanup() + elapsed = (timeit.default_timer() - start) + logger.info("\tfiber generation took %.3f seconds", elapsed) return np.hstack(qfib) @@ -140,18 +198,23 @@ def discretefiber_init(params): global paramMP paramMP = params + +def discretefiber_cleanup(): + global paramMP + del paramMP + def discretefiber_reduced(params_in): """ input parameters are [hkl_id, com_ome, com_eta] """ - bMat = paramMP['bMat'] - chi = paramMP['chi'] - csym = paramMP['csym'] + bMat = paramMP['bMat'] + chi = paramMP['chi'] + csym = paramMP['csym'] fiber_ndiv = paramMP['fiber_ndiv'] hkl = params_in[:3].reshape(3, 1) - + gVec_s = xfcapi.anglesToGVec( np.atleast_2d(params_in[3:]), chi=chi, @@ -170,7 +233,8 @@ def discretefiber_reduced(params_in): return tmp -def run_cluster(compl, qfib, qsym, cfg, min_samples=None, compl_thresh=None, radius=None): +def run_cluster(compl, qfib, qsym, cfg, + min_samples=None, compl_thresh=None, radius=None): """ """ algorithm = cfg.find_orientations.clustering.algorithm @@ -186,7 +250,7 @@ def run_cluster(compl, qfib, qsym, cfg, min_samples=None, compl_thresh=None, rad if radius is not None: cl_radius = radius - start = time.clock() # time this + start = timeit.default_timer() # timeit this num_above = sum(np.array(compl) > min_compl) if num_above == 0: @@ -199,9 +263,13 @@ def run_cluster(compl, qfib, qsym, cfg, min_samples=None, compl_thresh=None, rad else: # use compiled module for distance # just to be safe, must order qsym as C-contiguous - qsym = np.array(qsym.T, order='C').T + qsym = np.array(qsym.T, order='C').T + def quat_distance(x, y): - return xfcapi.quat_distance(np.array(x, order='C'), np.array(y, order='C'), qsym) + return xfcapi.quat_distance( + np.array(x, order='C'), np.array(y, order='C'), + qsym + ) qfib_r = qfib[:, np.array(compl) > min_compl] @@ -211,8 +279,10 @@ def quat_distance(x, y): if algorithm == 'sph-dbscan' or algorithm == 'fclusterdata': logger.info("falling back to euclidean DBSCAN") algorithm = 'ort-dbscan' - #raise RuntimeError, \ - # "Requested clustering of %d orientations, which would be too slow!" %qfib_r.shape[1] + # raise RuntimeError( + # "Requested clustering of %d orientations, " + # + "which would be too slow!" % qfib_r.shape[1] + # ) logger.info( "Feeding %d orientations above %.1f%% to clustering", @@ -225,9 +295,10 @@ def quat_distance(x, y): "sklearn >= 0.14 required for dbscan; using fclusterdata" ) - if algorithm == 'dbscan' or algorithm == 'ort-dbscan' or algorithm == 'sph-dbscan': + if algorithm in ['dbscan', 'ort-dbscan', 'sph-dbscan']: # munge min_samples according to options - if min_samples is None or cfg.find_orientations.use_quaternion_grid is not None: + if min_samples is None \ + or cfg.find_orientations.use_quaternion_grid is not None: min_samples = 1 if algorithm == 'sph-dbscan': @@ -263,10 +334,10 @@ def quat_distance(x, y): ) # extract cluster labels - cl = np.array(labels, dtype=int) # convert to array - noise_points = cl == -1 # index for marking noise - cl += 1 # move index to 1-based instead of 0 - cl[noise_points] = -1 # re-mark noise as -1 + cl = np.array(labels, dtype=int) # convert to array + noise_points = cl == -1 # index for marking noise + cl += 1 # move index to 1-based instead of 0 + cl[noise_points] = -1 # re-mark noise as -1 logger.info("dbscan found %d noise points", sum(noise_points)) elif algorithm == 'fclusterdata': logger.info("using spherical fclusetrdata") @@ -296,19 +367,20 @@ def quat_distance(x, y): ).flatten() pass pass - - if (algorithm == 'dbscan' or algorithm == 'ort-dbscan') \ - and qbar.size/4 > 1: + + if algorithm in ('dbscan', 'ort-dbscan') and qbar.size/4 > 1: logger.info("\tchecking for duplicate orientations...") cl = cluster.hierarchy.fclusterdata( qbar.T, np.radians(cl_radius), criterion='distance', metric=quat_distance) - nblobs_new = len(np.unique(cl)) + nblobs_new = len(np.unique(cl)) if nblobs_new < nblobs: - logger.info("\tfound %d duplicates within %f degrees" \ - %(nblobs-nblobs_new, cl_radius)) + logger.info( + "\tfound %d duplicates within %f degrees", + nblobs - nblobs_new, cl_radius + ) tmp = np.zeros((4, nblobs_new)) for i in range(nblobs_new): npts = sum(cl == i + 1) @@ -319,8 +391,8 @@ def quat_distance(x, y): qbar = tmp pass pass - - logger.info("clustering took %f seconds", time.clock() - start) + + logger.info("clustering took %f seconds", timeit.default_timer() - start) logger.info( "Found %d orientation clusters with >=%.1f%% completeness" " and %2f misorientation", @@ -332,244 +404,389 @@ def quat_distance(x, y): return np.atleast_2d(qbar), cl -def load_eta_ome_maps(cfg, pd, reader, detector, hkls=None, clean=False): - fn = os.path.join( - cfg.working_dir, - cfg.find_orientations.orientation_maps.file - ) +def load_eta_ome_maps(cfg, pd, image_series, hkls=None, clean=False): + """ + Load the eta-ome maps specified by the config and CLI flags. + + Parameters + ---------- + cfg : TYPE + DESCRIPTION. + pd : TYPE + DESCRIPTION. + image_series : TYPE + DESCRIPTION. + hkls : TYPE, optional + DESCRIPTION. The default is None. + clean : TYPE, optional + DESCRIPTION. The default is False. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + # check maps filename + if cfg.find_orientations.orientation_maps.file is None: + maps_fname = '_'.join([cfg.analysis_id, "eta-ome_maps.npz"]) + + fn = os.path.join(cfg.working_dir, maps_fname) + + # ???: necessary? + if fn.split('.')[-1] != 'npz': + fn = fn + '.npz' if not clean: try: - res = cPickle.load(open(fn, 'r')) + res = EtaOmeMaps(fn) pd = res.planeData available_hkls = pd.hkls.T logger.info('loaded eta/ome orientation maps from %s', fn) hkls = [str(i) for i in available_hkls[res.iHKLList]] logger.info( - 'hkls used to generate orientation maps: %s', hkls) + 'hkls used to generate orientation maps: %s', + hkls + ) return res except (AttributeError, IOError): - return generate_eta_ome_maps(cfg, pd, reader, detector, hkls) + logger.info("specified maps file '%s' not found " + + "and clean option specified; " + + "recomputing eta/ome orientation maps", + fn) + return generate_eta_ome_maps(cfg, hkls=hkls) else: - logger.info('clean option specified; recomputing eta/ome orientation maps') - return generate_eta_ome_maps(cfg, pd, reader, detector, hkls) + logger.info('clean option specified; ' + + 'recomputing eta/ome orientation maps') + return generate_eta_ome_maps(cfg, hkls=hkls) -def generate_eta_ome_maps(cfg, pd, reader, detector, hkls=None): - available_hkls = pd.hkls.T - # default to all hkls defined for material - active_hkls = range(available_hkls.shape[0]) - # override with hkls from config, if specified +def generate_eta_ome_maps(cfg, hkls=None): + """ + Generates the eta-omega maps specified in the input config. + """ + # extract PlaneData from config and set active hkls + plane_data = cfg.material.plane_data + + # handle logic for active hkl spec + # !!!: default to all hkls defined for material, + # override with + # 1) hkls from config, if specified; or + # 2) hkls from kwarg, if specified + available_hkls = plane_data.hkls.T + active_hkls = range(len(available_hkls)) temp = cfg.find_orientations.orientation_maps.active_hkls active_hkls = active_hkls if temp == 'all' else temp - # override with hkls from command line, if specified active_hkls = hkls if hkls is not None else active_hkls + # logging output + hklseedstr = ', '.join( + [str(available_hkls[i]) for i in active_hkls] + ) + logger.info( - "using hkls to generate orientation maps: %s", - ', '.join([str(i) for i in available_hkls[active_hkls]]) - ) + "building eta_ome maps using hkls: %s", + hklseedstr + ) + + # grad imageseries dict from cfg + imsd = cfg.image_series - # not ready # eta_ome = xrdutil.EtaOmeMaps(cfg, reader=reader, eta_step=None) - bin_frames = cfg.find_orientations.orientation_maps.bin_frames - eta_bins = np.int(2*np.pi / abs(reader.getDeltaOmega())) / bin_frames - eta_ome = xrdutil.CollapseOmeEta( - reader, - pd, - pd.hkls[:, active_hkls], - detector, - nframesLump=bin_frames, - nEtaBins=eta_bins, - debug=False, - threshold=cfg.find_orientations.orientation_maps.threshold - ).getEtaOmeMaps() + # handle omega period + # !!! we assume all detector ims have the same ome ranges, so any will do! + oims = next(imsd.itervalues()) + ome_period = oims.omega[0, 0] + np.r_[0., 360.] + + start = timeit.default_timer() + + # make eta_ome maps + eta_ome = instrument.GenerateEtaOmeMaps( + imsd, cfg.instrument.hedm, plane_data, + active_hkls=active_hkls, + threshold=cfg.find_orientations.orientation_maps.threshold, + ome_period=ome_period) + + logger.info("\t\t...took %f seconds", timeit.default_timer() - start) + + # save maps + # ???: should perhaps set default maps name at module level + map_fname = cfg.find_orientations.orientation_maps.file \ + or '_'.join([cfg.analysis_id, "eta-ome_maps.npz"]) + + if not os.path.exists(cfg.working_dir): + os.mkdir(cfg.working_dir) fn = os.path.join( cfg.working_dir, - cfg.find_orientations.orientation_maps.file - ) - fd = os.path.split(fn)[0] - if not os.path.isdir(fd): - os.makedirs(fd) - with open(fn, 'w') as f: - cPickle.dump(eta_ome, f) - logger.info("saved eta/ome orientation maps to %s", fn) + map_fname + ) + + eta_ome.save(fn) + + logger.info('saved eta/ome orientation maps to "%s"', fn) + return eta_ome -def find_orientations(cfg, hkls=None, clean=False, profile=False): +def find_orientations(cfg, + hkls=None, clean=False, profile=False, + use_direct_testing=False): """ - Takes a config dict as input, generally a yml document - NOTE: single cfg instance, not iterator! + + Parameters + ---------- + cfg : TYPE + DESCRIPTION. + hkls : TYPE, optional + DESCRIPTION. The default is None. + clean : TYPE, optional + DESCRIPTION. The default is False. + profile : TYPE, optional + DESCRIPTION. The default is False. + use_direct_search : TYPE, optional + DESCRIPTION. The default is False. + + Returns + ------- + None. + """ - # ...make this an attribute in cfg? - analysis_id = '%s_%s' %( - cfg.analysis_name.strip().replace(' ', '-'), - cfg.material.active.strip().replace(' ', '-'), + # grab objects from config + plane_data = cfg.material.plane_data + imsd = cfg.image_series + instr = cfg.instrument.hedm + eta_ranges = cfg.find_orientations.eta.range + + # tolerances + tth_tol = plane_data.tThWidth + eta_tol = np.radians(cfg.find_orientations.eta.tolerance) + ome_tol = np.radians(cfg.find_orientations.omega.tolerance) + + # handle omega period + # !!! We assume all detector ims have the same ome ranges; + # therefore any will do for this purpose. + oims = next(imsd.itervalues()) + ome_period = oims.omega[0, 0] + np.r_[0., 360.] + ome_ranges = [ + ([i['ostart'], i['ostop']]) + for i in oims.omegawedges.wedges + ] + + # for multiprocessing + ncpus = cfg.multiprocessing + + # thresholds + image_threshold = cfg.find_orientations.orientation_maps.threshold + on_map_threshold = cfg.find_orientations.threshold + compl_thresh = cfg.find_orientations.clustering.completeness + + # clustering + cl_algorithm = cfg.find_orientations.clustering.algorithm + cl_radius = cfg.find_orientations.clustering.radius + + # ========================================================================= + # ORIENTATION SCORING + # ========================================================================= + do_grid_search = cfg.find_orientations.use_quaternion_grid is not None + + if use_direct_testing: + npdiv_DFLT = 2 + params = dict( + plane_data=plane_data, + instrument=instr, + imgser_dict=imsd, + tth_tol=tth_tol, + eta_tol=eta_tol, + ome_tol=ome_tol, + eta_ranges=np.radians(eta_ranges), + ome_period=np.radians(ome_period), + npdiv=npdiv_DFLT, + threshold=image_threshold) + + logger.info("\tusing direct search on %d processes", ncpus) + + # handle search space + if cfg.find_orientations.use_quaternion_grid is None: + # doing seeded search + logger.info("Will perform seeded search") + logger.info( + "\tgenerating search quaternion list using %d processes", + ncpus + ) + start = timeit.default_timer() + + # need maps + eta_ome = load_eta_ome_maps(cfg, plane_data, imsd, + hkls=hkls, clean=clean) + + # generate trial orientations + qfib = generate_orientation_fibers(cfg, eta_ome) + + logger.info("\t\t...took %f seconds", + timeit.default_timer() - start) + else: + # doing grid search + try: + qfib = np.load(cfg.find_orientations.use_quaternion_grid) + except(IOError): + raise RuntimeError( + "specified quaternion grid file '%s' not found!" + % cfg.find_orientations.use_quaternion_grid + ) + + # execute direct search + pool = mp.Pool( + ncpus, + indexer.test_orientation_FF_init, + (params, ) ) - - # a goofy call, could be replaced with two more targeted calls - pd, reader, detector = initialize_experiment(cfg) - - # need instrument cfg later on down... - instr_cfg = get_instrument_parameters(cfg) - detector_params = np.hstack([ - instr_cfg['detector']['transform']['tilt_angles'], - instr_cfg['detector']['transform']['t_vec_d'], - instr_cfg['oscillation_stage']['chi'], - instr_cfg['oscillation_stage']['t_vec_s'], - ]) - rdim = cfg.instrument.detector.pixels.size[0]*cfg.instrument.detector.pixels.rows - cdim = cfg.instrument.detector.pixels.size[1]*cfg.instrument.detector.pixels.columns - panel_dims = ((-0.5*cdim, -0.5*rdim), - ( 0.5*cdim, 0.5*rdim), - ) - # UGH! hard-coded distortion... - if instr_cfg['detector']['distortion']['function_name'] == 'GE_41RT': - distortion = (dFuncs.GE_41RT, - instr_cfg['detector']['distortion']['parameters'], - ) + completeness = pool.map(indexer.test_orientation_FF_reduced, qfib.T) + pool.close() else: - distortion = None + logger.info("\tusing map search with paintGrid on %d processes", ncpus) - min_compl = cfg.find_orientations.clustering.completeness + start = timeit.default_timer() - # start logger - logger.info("beginning analysis '%s'", cfg.analysis_name) - - # load the eta_ome orientation maps - eta_ome = load_eta_ome_maps(cfg, pd, reader, detector, hkls=hkls, clean=clean) - - ome_range = (np.min(eta_ome.omeEdges), - np.max(eta_ome.omeEdges) - ) - try: - # are we searching the full grid of orientation space? - qgrid_f = cfg.find_orientations.use_quaternion_grid - quats = np.load(qgrid_f) - logger.info("Using %s for full quaternion search", qgrid_f) - hkl_ids = None - except (IOError, ValueError, AttributeError): - # or doing a seeded search? - logger.info("Defaulting to seeded search") - hkl_seeds = cfg.find_orientations.seed_search.hkl_seeds - hkl_ids = [eta_ome.planeData.hklDataList[i]['hklID'] for i in hkl_seeds] - hklseedstr = ', '.join( - [str(i) for i in eta_ome.planeData.hkls.T[hkl_seeds]] - ) - logger.info( - "Seeding search using hkls from %s: %s", - cfg.find_orientations.orientation_maps.file, - hklseedstr - ) - quats = generate_orientation_fibers( - eta_ome, - detector_params[6], - cfg.find_orientations.threshold, - cfg.find_orientations.seed_search.hkl_seeds, - cfg.find_orientations.seed_search.fiber_ndiv, - ncpus=cfg.multiprocessing, + # handle eta-ome maps + eta_ome = load_eta_ome_maps(cfg, plane_data, imsd, + hkls=hkls, clean=clean) + + # handle search space + if cfg.find_orientations.use_quaternion_grid is None: + # doing seeded search + logger.info( + "\tgenerating search quaternion list using %d processes", + ncpus ) - if save_as_ascii: - np.savetxt( - os.path.join(cfg.working_dir, 'trial_orientations.dat'), - quats.T, - fmt="%.18e", - delimiter="\t" + start = timeit.default_timer() + + qfib = generate_orientation_fibers(cfg, eta_ome) + logger.info("\t\t...took %f seconds", + timeit.default_timer() - start) + else: + # doing grid search + try: + qfib = np.load(cfg.find_orientations.use_quaternion_grid) + except(IOError): + raise RuntimeError( + "specified quaternion grid file '%s' not found!" + % cfg.find_orientations.use_quaternion_grid ) - pass - pass # close conditional on grid search - - # generate the completion maps - logger.info("Running paintgrid on %d trial orientations", quats.shape[1]) - if profile: - logger.info("Profiling mode active, forcing ncpus to 1") - ncpus = 1 - else: - ncpus = cfg.multiprocessing - logger.info( - "%d of %d available processors requested", ncpus, mp.cpu_count() + # do map-based indexing + start = timeit.default_timer() + + logger.info("will test %d quaternions using %d processes", + qfib.shape[1], ncpus) + + completeness = indexer.paintGrid( + qfib, + eta_ome, + etaRange=np.radians(cfg.find_orientations.eta.range), + omeTol=np.radians(cfg.find_orientations.omega.tolerance), + etaTol=np.radians(cfg.find_orientations.eta.tolerance), + omePeriod=np.radians(cfg.find_orientations.omega.period), + threshold=on_map_threshold, + doMultiProc=ncpus > 1, + nCPUs=ncpus ) - compl = idx.paintGrid( - quats, - eta_ome, - etaRange=np.radians(cfg.find_orientations.eta.range), - omeTol=np.radians(cfg.find_orientations.omega.tolerance), - etaTol=np.radians(cfg.find_orientations.eta.tolerance), - omePeriod=np.radians(cfg.find_orientations.omega.period), - threshold=cfg.find_orientations.threshold, - doMultiProc=ncpus > 1, - nCPUs=ncpus - ) + logger.info("\t\t...took %f seconds", + timeit.default_timer() - start) + completeness = np.array(completeness) + + logger.info("\tSaving %d scored orientations with max completeness %f%%", + qfib.shape[1], 100*np.max(completeness)) + + np.savez_compressed( + '_'.join(['scored_orientations', cfg.analysis_id]), + test_quaternions=qfib, score=completeness + ) + + # ========================================================================= + # CLUSTERING AND GRAINS OUTPUT + # ========================================================================= - if save_as_ascii: - np.savetxt(os.path.join(cfg.working_dir, 'completeness.dat'), compl) + if not os.path.exists(cfg.analysis_dir): + os.makedirs(cfg.analysis_dir) + qbar_filename = 'accepted_orientations_' + cfg.analysis_id + '.dat' + + logger.info("\trunning clustering using '%s'", cl_algorithm) + + start = timeit.default_timer() + + if do_grid_search: + min_samples = 1 + mean_rpg = 1 else: - np.save( - os.path.join( - cfg.working_dir, - 'scored_orientations_%s.npy' %analysis_id - ), - np.vstack([quats, compl]) - ) + active_hkls = cfg.find_orientations.orientation_maps.active_hkls \ + or eta_ome.iHKLList + + fiber_seeds = cfg.find_orientations.seed_search.hkl_seeds - ########################################################## - ## Simulate N random grains to get neighborhood size ## - ########################################################## - if hkl_ids is not None: + # Simulate N random grains to get neighborhood size + seed_hkl_ids = [ + plane_data.hklDataList[active_hkls[i]]['hklID'] + for i in fiber_seeds + ] + + # !!! default to use 100 grains ngrains = 100 rand_q = mutil.unitVector(np.random.randn(4, ngrains)) rand_e = np.tile(2.*np.arccos(rand_q[0, :]), (3, 1)) \ - * mutil.unitVector(rand_q[1:, :]) + * mutil.unitVector(rand_q[1:, :]) + grain_param_list = np.vstack( + [rand_e, + np.zeros((3, ngrains)), + np.tile(const.identity_6x1, (ngrains, 1)).T] + ).T + sim_results = instr.simulate_rotation_series( + plane_data, grain_param_list, + eta_ranges=np.radians(eta_ranges), + ome_ranges=np.radians(ome_ranges), + ome_period=np.radians(ome_period) + ) + refl_per_grain = np.zeros(ngrains) - num_seed_refls = np.zeros(ngrains) - for i in range(ngrains): - grain_params = np.hstack([rand_e[:, i], - xf.zeroVec.flatten(), - xf.vInv_ref.flatten() - ]) - sim_results = simulateGVecs(pd, - detector_params, - grain_params, - ome_range=(ome_range,), - ome_period=(ome_range[0], ome_range[0]+2*np.pi), - eta_range=np.radians(cfg.find_orientations.eta.range), - panel_dims=panel_dims, - pixel_pitch=cfg.instrument.detector.pixels.size, - distortion=distortion, - ) - refl_per_grain[i] = len(sim_results[0]) - num_seed_refls[i] = np.sum([sum(sim_results[0] == hkl_id) for hkl_id in hkl_ids]) - pass - #min_samples = 2 + seed_refl_per_grain = np.zeros(ngrains) + for sim_result in sim_results.itervalues(): + for i, refl_ids in enumerate(sim_result[0]): + refl_per_grain[i] += len(refl_ids) + seed_refl_per_grain[i] += np.sum( + [sum(refl_ids == hkl_id) for hkl_id in seed_hkl_ids] + ) + min_samples = max( - int(np.floor(0.5*min_compl*min(num_seed_refls))), + int(np.floor(0.5*compl_thresh*min(seed_refl_per_grain))), 2 - ) + ) mean_rpg = int(np.round(np.average(refl_per_grain))) - else: - min_samples = 1 - mean_rpg = 1 - logger.info("mean number of reflections per grain is %d", mean_rpg) - logger.info("neighborhood size estimate is %d points", min_samples) - - # cluster analysis to identify orientation blobs, the final output: - qbar, cl = run_cluster(compl, quats, pd.getQSym(), cfg, min_samples=min_samples) - - analysis_id = '%s_%s' %( - cfg.analysis_name.strip().replace(' ', '-'), - cfg.material.active.strip().replace(' ', '-'), - ) - - np.savetxt( - os.path.join( - cfg.working_dir, - 'accepted_orientations_%s.dat' %analysis_id - ), - qbar.T, - fmt="%.18e", - delimiter="\t") + logger.info("\tmean reflections per grain: %d", mean_rpg) + logger.info("\tneighborhood size: %d", min_samples) + + qbar, cl = run_cluster( + completeness, qfib, plane_data.getQSym(), cfg, + min_samples=min_samples, + compl_thresh=compl_thresh, + radius=cl_radius) + + logger.info("\t\t...took %f seconds", (timeit.default_timer() - start)) + logger.info("\tfound %d grains; saved to file: '%s'", + qbar.shape[1], qbar_filename) + + np.savetxt(qbar_filename, qbar.T, + fmt='%.18e', delimiter='\t') + + gw = instrument.GrainDataWriter( + os.path.join(cfg.analysis_dir, 'grains.out') + ) + grain_params_list = [] + for gid, q in enumerate(qbar.T): + phi = 2*np.arccos(q[0]) + n = xfcapi.unitRowVector(q[1:]) + grain_params = np.hstack([phi*n, const.zeros_3, const.identity_6x1]) + gw.dump_grain(gid, 1., 0., grain_params) + grain_params_list.append(grain_params) + gw.close() return diff --git a/hexrd/fitgrains.py b/hexrd/fitgrains.py index ec1a8331..3e38dd9f 100644 --- a/hexrd/fitgrains.py +++ b/hexrd/fitgrains.py @@ -1,498 +1,450 @@ -from __future__ import absolute_import +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 22 19:04:10 2017 + +@author: bernier2 +""" +from __future__ import print_function, absolute_import -import copy -import logging -import multiprocessing as mp -from multiprocessing.queues import Empty import os +import logging +import multiprocessing +import numpy as np +import timeit import sys -import time - import yaml -import numpy as np -from scipy.sparse import coo_matrix -from scipy.linalg.matfuncs import logm +from hexrd import config +from hexrd import constants as cnst +from hexrd import instrument +from hexrd.xrd import transforms_CAPI as xfcapi +from hexrd.xrd.fitting import fitGrain, objFuncFitGrain, gFlag_ref -from hexrd.coreutil import ( - initialize_experiment, migrate_detector_to_instrument_config, - get_instrument_parameters, get_detector_parameters, get_detector_parameters, - get_distortion_correction, get_saturation_level, set_planedata_exclusions - ) -from hexrd.matrixutil import vecMVToSymm -from hexrd.utils.progressbar import ( - Bar, ETA, Percentage, ProgressBar, ReverseBar - ) +logger = logging.getLogger(__name__) -from hexrd.xrd import distortion as dFuncs -from hexrd.xrd.fitting import fitGrain, objFuncFitGrain -from hexrd.xrd.rotations import angleAxisOfRotMat, rotMatOfQuat -from hexrd.xrd.transforms import bVec_ref, eta_ref, mapAngle, vInv_ref, angularDifference -from hexrd.xrd.xrdutil import pullSpots -from .cacheframes import get_frames -from hexrd import USE_NUMBA -if USE_NUMBA: - import numba +# multiprocessing fit funcs -logger = logging.getLogger(__name__) +def fit_grain_FF_init(params): + """ + Broadcast the fitting parameters as globals for multiprocessing -# grain parameter refinement flags -gFlag = np.array([1, 1, 1, - 1, 1, 1, - 1, 1, 1, 1, 1, 1], dtype=bool) -# grain parameter scalings -gScl = np.array([1., 1., 1., - 1., 1., 1., - 1., 1., 1., 0.01, 0.01, 0.01]) - - -def get_job_queue(cfg, ids_to_refine=None): - job_queue = mp.JoinableQueue() - # load the queue - try: - # use an estimate of the grain parameters, if available - estimate_f = cfg.fit_grains.estimate - grain_params_list = np.atleast_2d(np.loadtxt(estimate_f)) - n_quats = len(grain_params_list) - n_jobs = 0 - for grain_params in grain_params_list: - grain_id = grain_params[0] - if ids_to_refine is None or grain_id in ids_to_refine: - job_queue.put((grain_id, grain_params[3:15])) - n_jobs += 1 - logger.info( - 'fitting grains using "%s" for the initial estimate', - estimate_f - ) - except (ValueError, IOError): - # no estimate available, use orientations and defaults - logger.info('fitting grains using default initial estimate') - - # ...make this an attribute in cfg? - analysis_id = '%s_%s' %( - cfg.analysis_name.strip().replace(' ', '-'), - cfg.material.active.strip().replace(' ', '-'), - ) - - # load quaternion file - quats = np.atleast_2d( - np.loadtxt( - os.path.join( - cfg.working_dir, - 'accepted_orientations_%s.dat' %analysis_id - ) - ) - ) - n_quats = len(quats) - n_jobs = 0 - phi, n = angleAxisOfRotMat(rotMatOfQuat(quats.T)) - for i, (phi, n) in enumerate(zip(phi, n.T)): - if ids_to_refine is None or i in ids_to_refine: - exp_map = phi*n - grain_params = np.hstack( - [exp_map, 0., 0., 0., 1., 1., 1., 0., 0., 0.] - ) - job_queue.put((i, grain_params)) - n_jobs += 1 - logger.info("fitting grains for %d of %d orientations", n_jobs, n_quats) - return job_queue, n_jobs - - -def get_data(cfg, show_progress=False, force=False, clean=False): - # TODO: this should be refactored somehow to avoid initialize_experiment - # and avoid using the old reader. Also, the detector is not used here. - pd, reader, detector = initialize_experiment(cfg) - if cfg.fit_grains.fit_only: - reader = None - else: - reader = get_frames(reader, cfg, show_progress, force, clean) - - instrument_cfg = get_instrument_parameters(cfg) - detector_params = get_detector_parameters(instrument_cfg) - saturation_level = get_saturation_level(instrument_cfg) - distortion = get_distortion_correction(instrument_cfg) - set_planedata_exclusions(cfg, detector, pd) - # HANDLE OMEGA STOP - if cfg.image_series.omega.stop is None: - assert cfg.image_series.images.stop is not None, \ - "Must specify stop point, either in omega or image" - omega_stop = cfg.image_series.omega.start + \ - cfg.image_series.omega.step*cfg.image_series.images.stop - else: - omega_stop = cfg.image_series.omega.stop - pkwargs = { - 'detector_params': detector_params, - 'distortion': distortion, - 'eta_range': np.radians(cfg.find_orientations.eta.range), - 'eta_tol': cfg.fit_grains.tolerance.eta, - 'fit_only': cfg.fit_grains.fit_only, - 'ncols': instrument_cfg['detector']['pixels']['columns'], - 'npdiv': cfg.fit_grains.npdiv, - 'nrows': instrument_cfg['detector']['pixels']['rows'], - 'omega_period': np.radians(cfg.find_orientations.omega.period), - 'omega_start': cfg.image_series.omega.start, - 'omega_step': cfg.image_series.omega.step, - 'omega_stop': omega_stop, - 'omega_tol': cfg.fit_grains.tolerance.omega, - 'overlap_table': os.path.join(cfg.analysis_dir, 'overlap_table.npz'), - 'panel_buffer': cfg.fit_grains.panel_buffer, - 'pixel_pitch': instrument_cfg['detector']['pixels']['size'], - 'plane_data': pd, - 'refit_tol': cfg.fit_grains.refit, - 'saturation_level': saturation_level, - 'spots_stem': os.path.join(cfg.analysis_dir, 'spots_%05d.out'), - 'threshold': cfg.fit_grains.threshold, - 'tth_tol': cfg.fit_grains.tolerance.tth, - } - return reader, pkwargs - - -def fit_grains(cfg, force=False, clean=False, show_progress=False, ids_to_refine=None): - # load the data - reader, pkwargs = get_data(cfg, show_progress, force, clean) - job_queue, njobs = get_job_queue(cfg, ids_to_refine) - - # log this before starting progress bar - ncpus = cfg.multiprocessing - ncpus = ncpus if ncpus < njobs else njobs - logger.info( - 'will use %d of %d processors', ncpus, mp.cpu_count() - ) - if ncpus == 1: - logger.info('multiprocessing disabled') - - # echo some of the fitting options - if cfg.fit_grains.fit_only: - logger.info('\t**fitting only; will not pull spots') - if cfg.fit_grains.refit is not None: - msg = 'will perform refit excluding spots > ' + \ - '%.2f pixels and ' %cfg.fit_grains.refit[0] + \ - '%.2f frames from expected values' %cfg.fit_grains.refit[1] - logger.info(msg) + Parameters + ---------- + params : dict + The dictionary of fitting parameters. + + Returns + ------- + None. - start = time.time() - pbar = None - if show_progress: - pbar = ProgressBar( - widgets=[Bar('>'), ' ', ETA(), ' ', ReverseBar('<')], - maxval=njobs - ).start() - - # finally start processing data - if ncpus == 1: - # no multiprocessing - results = [] - w = FitGrainsWorker( - job_queue, results, reader, copy.deepcopy(pkwargs), - progressbar=pbar - ) - w.run() - else: - # multiprocessing - manager = mp.Manager() - results = manager.list() - for i in range(ncpus): - # lets make a deep copy of the pkwargs, just in case: - w = FitGrainsWorkerMP(job_queue, results, reader, copy.deepcopy(pkwargs)) - w.daemon = True - w.start() - while True: - n_res = len(results) - if show_progress: - pbar.update(n_res) - if n_res == njobs: - break - time.sleep(0.1) - job_queue.join() - - write_grains_file(cfg, results) - - if show_progress: - pbar.finish() - elapsed = time.time() - start - logger.info('processed %d grains in %g minutes', n_res, elapsed/60) - - -def write_grains_file(cfg, results, output_name=None): - # record the results to file - if output_name is None: - f = open(os.path.join(cfg.analysis_dir, 'grains.out'), 'w') - else: - f = open(os.path.join(cfg.analysis_dir, output_name), 'w') - # going to some length to make the header line up with the data - # while also keeping the width of the lines to a minimum, settled - # on %19.12g representation. - header_items = ( - 'grain ID', 'completeness', 'chi2', - 'xi[0]', 'xi[1]', 'xi[2]', 'tVec_c[0]', 'tVec_c[1]', 'tVec_c[2]', - 'vInv_s[0]', 'vInv_s[1]', 'vInv_s[2]', 'vInv_s[4]*sqrt(2)', - 'vInv_s[5]*sqrt(2)', 'vInv_s[6]*sqrt(2)', 'ln(V[0,0])', - 'ln(V[1,1])', 'ln(V[2,2])', 'ln(V[1,2])', 'ln(V[0,2])', 'ln(V[0,1])', - ) - len_items = [] - for i in header_items[1:]: - temp = len(i) - len_items.append(temp if temp > 19 else 19) # for %19.12g - fmtstr = '#%13s ' + ' '.join(['%%%ds' % i for i in len_items]) + '\n' - f.write(fmtstr % header_items) - for (id, g_refined, compl, eMat, resd) in sorted(results): - res_items = ( - id, compl, resd, g_refined[0], g_refined[1], g_refined[2], - g_refined[3], g_refined[4], g_refined[5], g_refined[6], - g_refined[7], g_refined[8], g_refined[9], g_refined[10], - g_refined[11], eMat[0, 0], eMat[1, 1], eMat[2, 2], eMat[1, 2], - eMat[0, 2], eMat[0, 1], - ) - fmtstr = ( - '%14d ' + ' '.join(['%%%d.12g' % i for i in len_items]) + '\n' - ) - f.write(fmtstr % res_items) - - - -class FitGrainsWorker(object): - - - def __init__(self, jobs, results, reader, pkwargs, **kwargs): - self._jobs = jobs - self._results = results - self._reader = reader - # a dict containing the rest of the parameters - self._p = pkwargs - - # lets make a couple shortcuts: - self._p['bMat'] = np.ascontiguousarray( - self._p['plane_data'].latVecOps['B'] - ) # is it still necessary to re-cast? - self._p['wlen'] = self._p['plane_data'].wavelength - self._pbar = kwargs.get('progressbar', None) - - - def pull_spots(self, grain_id, grain_params, iteration): - # need to calc panel dims on the fly - xdim = self._p['pixel_pitch'][1] * self._p['ncols'] - ydim = self._p['pixel_pitch'][0] * self._p['nrows'] - panel_dims = [(-0.5*xdim, -0.5*ydim), - ( 0.5*xdim, 0.5*ydim)] - return pullSpots( - self._p['plane_data'], - self._p['detector_params'], - grain_params, - self._reader, - distortion=self._p['distortion'], - eta_range=self._p['eta_range'], - ome_period=self._p['omega_period'], - eta_tol=self._p['eta_tol'][iteration], - ome_tol=self._p['omega_tol'][iteration], - tth_tol=self._p['tth_tol'][iteration], - pixel_pitch=self._p['pixel_pitch'], - panel_dims=panel_dims, - panel_buff=self._p['panel_buffer'], - npdiv=self._p['npdiv'], - threshold=self._p['threshold'], - doClipping=False, - filename=self._p['spots_stem'] % grain_id, - ) + Notes + ----- + See fit_grain_FF_reduced for specification. + """ + global paramMP + paramMP = params + + +def fit_grain_FF_cleanup(): + """ + Tears down the global fitting parameters. + """ + global paramMP + del paramMP + + +def fit_grain_FF_reduced(grain_id): + """ + Perform non-linear least-square fit for the specified grain. + + Parameters + ---------- + grain_id : int + The grain id. + + Returns + ------- + grain_id : int + The grain id. + completeness : float + The ratio of predicted to measured (observed) Bragg reflections. + chisq: float + Figure of merit describing the sum of squared residuals for each Bragg + reflection in the form (x, y, omega) normalized by the total number of + degrees of freedom. + grain_params : array_like + The optimized grain parameters + [, ]. + + Notes + ----- + input parameters are + [plane_data, instrument, imgser_dict, + tth_tol, eta_tol, ome_tol, npdiv, threshold] + """ + grains_table = paramMP['grains_table'] + plane_data = paramMP['plane_data'] + instrument = paramMP['instrument'] + imgser_dict = paramMP['imgser_dict'] + tth_tol = paramMP['tth_tol'] + eta_tol = paramMP['eta_tol'] + ome_tol = paramMP['ome_tol'] + npdiv = paramMP['npdiv'] + refit = paramMP['refit'] + threshold = paramMP['threshold'] + eta_ranges = paramMP['eta_ranges'] + ome_period = paramMP['ome_period'] + analysis_dirname = paramMP['analysis_dirname'] + spots_filename = paramMP['spots_filename'] + + grain = grains_table[grain_id] + grain_params = grain[3:15] + + for tols in zip(tth_tol, eta_tol, ome_tol): + complvec, results = instrument.pull_spots( + plane_data, grain_params, + imgser_dict, + tth_tol=tols[0], + eta_tol=tols[1], + ome_tol=tols[2], + npdiv=npdiv, threshold=threshold, + eta_ranges=eta_ranges, + ome_period=ome_period, + dirname=analysis_dirname, filename=spots_filename % grain_id, + save_spot_list=False, + quiet=True, check_only=False, interp='nearest') + + # ======= DETERMINE VALID REFLECTIONS ======= + + culled_results = dict.fromkeys(results) + num_refl_tot = 0 + num_refl_valid = 0 + for det_key in culled_results: + panel = instrument.detectors[det_key] + + presults = results[det_key] + + valid_refl_ids = np.array([x[0] for x in presults]) >= 0 + + spot_ids = np.array([x[0] for x in presults]) + + # find unsaturated spots on this panel + if panel.saturation_level is None: + unsat_spots = np.ones(len(valid_refl_ids)) + else: + unsat_spots = \ + np.array([x[4] for x in presults]) < panel.saturation_level + idx = np.logical_and(valid_refl_ids, unsat_spots) - def fit_grains(self, grain_id, grain_params, refit_tol=None): - """ - Executes lsq fits of grains based on spot files - - REFLECTION TABLE - - Cols as follows: - 0-6: ID PID H K L sum(int) max(int) - 6-9: pred tth pred eta pred ome - 9-12: meas tth meas eta meas ome - 12-15: meas X meas Y meas ome - """ - ome_start = self._p['omega_start'] - ome_step = self._p['omega_step'] - ome_stop = self._p['omega_stop'] - refl_table = np.loadtxt(self._p['spots_stem'] % grain_id) - valid_refl_ids = refl_table[:, 0] >= 0 - unsat_spots = refl_table[:, 6] < self._p['saturation_level'] - pred_ome = refl_table[:, 9] - if angularDifference(ome_start, ome_stop, units='degrees') > 0: - # if here, incomplete have omega range and - # clip the refelctions very close to the edges to avoid - # problems with the least squares... - if np.sign(ome_step) < 0: - idx_ome = np.logical_and( - pred_ome < np.radians(ome_start + 2*ome_step), - pred_ome > np.radians(ome_stop - 2*ome_step) - ) - else: - idx_ome = np.logical_and( - pred_ome > np.radians(ome_start + 2*ome_step), - pred_ome < np.radians(ome_stop - 2*ome_step) + # if an overlap table has been written, load it and use it + overlaps = np.zeros_like(idx, dtype=bool) + try: + ot = np.load( + os.path.join( + analysis_dirname, os.path.join( + det_key, 'overlap_table.npz' + ) ) - idx = np.logical_and( - valid_refl_ids, - np.logical_and(unsat_spots, idx_ome) ) + for key in ot.keys(): + for this_table in ot[key]: + these_overlaps = np.where( + this_table[:, 0] == grain_id)[0] + if len(these_overlaps) > 0: + mark_these = np.array( + this_table[these_overlaps, 1], dtype=int + ) + otidx = [ + np.where(spot_ids == mt)[0] + for mt in mark_these + ] + overlaps[otidx] = True + idx = np.logical_and(idx, ~overlaps) + # print("found overlap table for '%s'" % det_key) + except(IOError, IndexError): + # print("no overlap table found for '%s'" % det_key) + pass + + # attach to proper dict entry + culled_results[det_key] = [presults[i] for i in np.where(idx)[0]] + num_refl_tot += len(valid_refl_ids) + num_refl_valid += sum(valid_refl_ids) + + pass # now we have culled data + + # CAVEAT: completeness from pullspots only; incl saturated and overlaps + # + completeness = num_refl_valid / float(num_refl_tot) + + # ======= DO LEASTSQ FIT ======= + + if num_refl_valid <= 12: # not enough reflections to fit... exit + return grain_id, completeness, np.inf, grain_params else: - idx = np.logical_and(valid_refl_ids, unsat_spots) - pass # end if edge case + grain_params = fitGrain( + grain_params, instrument, culled_results, + plane_data.latVecOps['B'], plane_data.wavelength + ) + # get chisq + # TODO: do this while evaluating fit??? + chisq = objFuncFitGrain( + grain_params[gFlag_ref], grain_params, gFlag_ref, + instrument, + culled_results, + plane_data.latVecOps['B'], plane_data.wavelength, + ome_period, + simOnly=False, return_value_flag=2) + pass # end conditional on fit + pass # end tolerance looping + + if refit is not None: + # first get calculated x, y, ome from previous solution + # NOTE: this result is a dict + xyo_det_fit_dict = objFuncFitGrain( + grain_params[gFlag_ref], grain_params, gFlag_ref, + instrument, + culled_results, + plane_data.latVecOps['B'], plane_data.wavelength, + ome_period, + simOnly=True, return_value_flag=2) + + # make dict to contain new culled results + culled_results_r = dict.fromkeys(culled_results) + num_refl_valid = 0 + for det_key in culled_results_r: + presults = culled_results[det_key] + + ims = imgser_dict[det_key] + ome_step = sum(np.r_[-1, 1]*ims.metadata['omega'][0, :]) + + xyo_det = np.atleast_2d( + np.vstack([np.r_[x[7], x[6][-1]] for x in presults]) + ) - # if an overlap table has been written, load it and use it - overlaps = np.zeros(len(refl_table), dtype=bool) - try: - ot = np.load(self._p['overlap_table']) - for key in ot.keys(): - for this_table in ot[key]: - these_overlaps = np.where( - this_table[:, 0] == grain_id)[0] - if len(these_overlaps) > 0: - mark_these = np.array(this_table[these_overlaps, 1], dtype=int) - overlaps[mark_these] = True - idx = np.logical_and(idx, ~overlaps) - except IOError, IndexError: - #print "no overlap table found" - pass - - # completeness from pullspots only; incl saturated and overlaps - completeness = sum(valid_refl_ids)/float(len(valid_refl_ids)) + xyo_det_fit = xyo_det_fit_dict[det_key] - # extract data from grain table - hkls = refl_table[idx, 2:5].T # must be column vectors - xyo_det = refl_table[idx, -3:] # these are the cartesian centroids + ome + xpix_tol = refit[0]*panel.pixel_size_col + ypix_tol = refit[0]*panel.pixel_size_row + fome_tol = refit[1]*ome_step - # set in parameter attribute - self._p['hkls'] = hkls - self._p['xyo_det'] = xyo_det - - if sum(idx) <= 12: # not enough reflections to fit... exit - completeness = 0. - else: - grain_params = fitGrain( - xyo_det, hkls, self._p['bMat'], self._p['wlen'], - self._p['detector_params'], - grain_params[:3], grain_params[3:6], grain_params[6:], - beamVec=bVec_ref, etaVec=eta_ref, - distortion=self._p['distortion'], - gFlag=gFlag, gScl=gScl, - omePeriod=self._p['omega_period'] + # define difference vectors for spot fits + x_diff = abs(xyo_det[:, 0] - xyo_det_fit['calc_xy'][:, 0]) + y_diff = abs(xyo_det[:, 1] - xyo_det_fit['calc_xy'][:, 1]) + ome_diff = np.degrees( + xfcapi.angularDifference(xyo_det[:, 2], + xyo_det_fit['calc_omes']) ) - if refit_tol is not None: - xpix_tol = refit_tol[0]*self._p['pixel_pitch'][1] - ypix_tol = refit_tol[0]*self._p['pixel_pitch'][0] - fome_tol = refit_tol[1]*self._p['omega_step'] - - xyo_det_fit = objFuncFitGrain( - grain_params[gFlag], grain_params, gFlag, - self._p['detector_params'], - xyo_det, hkls, self._p['bMat'], self._p['wlen'], - bVec_ref, eta_ref, - self._p['distortion'][0], self._p['distortion'][1], - self._p['omega_period'], simOnly=True - ) - # define difference vectors for spot fits - x_diff = abs(xyo_det[:, 0] - xyo_det_fit[:, 0]) - y_diff = abs(xyo_det[:, 1] - xyo_det_fit[:, 1]) - ome_diff = np.degrees( - angularDifference(xyo_det[:, 2], xyo_det_fit[:, 2]) - ) + # filter out reflections with centroids more than + # a pixel and delta omega away from predicted value + idx_new = np.logical_and( + x_diff <= xpix_tol, + np.logical_and(y_diff <= ypix_tol, + ome_diff <= fome_tol) + ) - # filter out reflections with centroids more than - # a pixel and delta omega away from predicted value - idx_1 = np.logical_and( - x_diff <= xpix_tol, - np.logical_and(y_diff <= ypix_tol, - ome_diff <= fome_tol) - ) - idx_new = np.zeros_like(idx, dtype=bool) - idx_new[np.where(idx == 1)[0][idx_1]] = True - - if sum(idx_new) > 12 and (sum(idx_new) > 0.5*sum(idx)): - # have enough reflections left - # ** the check that we have more than half of what - # we started with is a hueristic - hkls = refl_table[idx_new, 2:5].T - xyo_det = refl_table[idx_new, -3:] - - # set in parameter attribute - self._p['hkls'] = hkls - self._p['xyo_det'] = xyo_det - - # do fit - grain_params = fitGrain( - xyo_det, hkls, - self._p['bMat'], self._p['wlen'], - self._p['detector_params'], - grain_params[:3], grain_params[3:6], grain_params[6:], - beamVec=bVec_ref, etaVec=eta_ref, - distortion=self._p['distortion'], - gFlag=gFlag, gScl=gScl, - omePeriod=self._p['omega_period'] - ) - pass # end check on num of refit refls - pass # end refit loop - pass # end on num of refls - return grain_params, completeness - + # attach to proper dict entry + culled_results_r[det_key] = [ + presults[i] for i in np.where(idx_new)[0] + ] - def get_e_mat(self, grain_params): - """ - strain tensor calculation - """ - return logm(np.linalg.inv(vecMVToSymm(grain_params[6:]))) - - - def get_residuals(self, grain_params): - dFunc, dParams = self._p['distortion'] - return objFuncFitGrain( - grain_params[gFlag], grain_params, gFlag, - self._p['detector_params'], - self._p['xyo_det'], self._p['hkls'], - self._p['bMat'], self._p['wlen'], - bVec_ref, eta_ref, - dFunc, dParams, - self._p['omega_period'], - simOnly=False, return_value_flag=2) - - def loop(self): - id, grain_params = self._jobs.get(False) - iterations = (0, len(self._p['eta_tol'])) - for iteration in range(*iterations): - # pull spots if asked to, otherwise just fit - if not self._p['fit_only']: - self.pull_spots(id, grain_params, iteration) - # FITTING HERE - grain_params, compl = self.fit_grains(id, grain_params, - refit_tol=self._p['refit_tol']) - if compl == 0: - break + num_refl_valid += sum(idx_new) pass - - # final pull spots if enabled - if not self._p['fit_only']: - self.pull_spots(id, grain_params, -1) - - eMat = self.get_e_mat(grain_params) - resd = self.get_residuals(grain_params) - - self._results.append((id, grain_params, compl, eMat, resd)) - self._jobs.task_done() + # only execute fit if left with enough reflections + if num_refl_valid > 12: + grain_params = fitGrain( + grain_params, instrument, culled_results_r, + plane_data.latVecOps['B'], plane_data.wavelength + ) + # get chisq + # TODO: do this while evaluating fit??? + chisq = objFuncFitGrain( + grain_params[gFlag_ref], + grain_params, gFlag_ref, + instrument, + culled_results_r, + plane_data.latVecOps['B'], plane_data.wavelength, + ome_period, + simOnly=False, return_value_flag=2) + pass + pass # close refit conditional + return grain_id, completeness, chisq, grain_params - def run(self): - n_res = 0 - while True: - try: - self.loop() - n_res += 1 - if self._pbar is not None: - self._pbar.update(n_res) - except Empty: - break +def fit_grains(cfg, + force=False, clean=False, + show_progress=False, ids_to_refine=None): + """ + Performs optimization of grain parameters. + operates on a single HEDM config block + """ + grains_filename = os.path.join( + cfg.analysis_dir, 'grains.out' + ) + + # grab imageseries dict + imsd = cfg.image_series + + # grab instrument + instr = cfg.instrument.hedm + + # process plane data + plane_data = cfg.material.plane_data + tth_max = cfg.fit_grains.tth_max + if isinstance(tth_max, bool): + if tth_max: + max_tth = instrument.max_tth(instr) + plane_data.tThMax = max_tth + logger.info("\tsetting the maximum 2theta to instrument" + + " maximum: %.2f degrees", + np.degrees(max_tth)) + else: + logger.info("\tnot adjusting exclusions in planeData") + else: + # a value for tth max has been specified + plane_data.exclusions = None + plane_data.tThMax = np.radians(tth_max) + logger.info("\tsetting the maximum 2theta to %.2f degrees", + tth_max) + + # make output directories + if not os.path.exists(cfg.analysis_dir): + os.mkdir(cfg.analysis_dir) + for det_key in instr.detectors: + os.mkdir(os.path.join(cfg.analysis_dir, det_key)) + else: + # make sure panel dirs exist under analysis dir + for det_key in instr.detectors: + if not os.path.exists(os.path.join(cfg.analysis_dir, det_key)): + os.mkdir(os.path.join(cfg.analysis_dir, det_key)) + + # grab eta ranges and ome_period + eta_ranges = np.radians(cfg.find_orientations.eta.range) -class FitGrainsWorkerMP(FitGrainsWorker, mp.Process): + # handle omega period + # !!! we assume all detector ims have the same ome ranges, so any will do! + oims = next(imsd.itervalues()) + ome_period = np.radians(oims.omega[0, 0] + np.r_[0., 360.]) + + # number of processes + ncpus = cfg.multiprocessing + + # threshold for fitting + threshold = cfg.fit_grains.threshold + + # some conditions for arg handling + existing_analysis = os.path.exists(grains_filename) + new_with_estimate = not existing_analysis and estimate is not None + new_without_estimate = not existing_analysis and estimate is None + force_with_estimate = force and cfg.fit_grains.estimate is not None + force_without_estimate = force and cfg.fit_grains.estimate is None + + # handle args + if clean or force_without_estimate or new_without_estimate: + # need accepted orientations from indexing in this case + if clean: + logger.info( + "'clean' specified; ignoring estimate and using default" + ) + elif force_without_estimate: + logger.info( + "'force' option specified, but no initial estimate; " + + "using default" + ) + try: + qbar = np.loadtxt( + 'accepted_orientations_' + cfg.analysis_id + '.dat', + ndmin=2).T + + gw = instrument.GrainDataWriter(grains_filename) + for i_g, q in enumerate(qbar.T): + phi = 2*np.arccos(q[0]) + n = xfcapi.unitRowVector(q[1:]) + grain_params = np.hstack( + [phi*n, cnst.zeros_3, cnst.identity_6x1] + ) + gw.dump_grain(int(i_g), 1., 0., grain_params) + gw.close() + except(IOError): + raise(RuntimeError, + "indexing results '%s' not found!" + % 'accepted_orientations_' + cfg.analysis_id + '.dat') + elif force_with_estimate or new_with_estimate: + grains_filename = cfg.fit_grains.estimate + elif existing_analysis and not (clean or force): + raise(RuntimeError, + "fit results '%s' exist, but --clean or --force options not specified" + % grains_filename) + + # load grains table + grains_table = np.loadtxt(grains_filename, ndmin=2) + if ids_to_refine is not None: + grains_table = np.atleast_2d(grains_table[ids_to_refine, :]) + spots_filename = "spots_%05d.out" + params = dict( + grains_table=grains_table, + plane_data=plane_data, + instrument=instr, + imgser_dict=imsd, + tth_tol=cfg.fit_grains.tolerance.tth, + eta_tol=cfg.fit_grains.tolerance.eta, + ome_tol=cfg.fit_grains.tolerance.omega, + npdiv=cfg.fit_grains.npdiv, + refit=cfg.fit_grains.refit, + threshold=threshold, + eta_ranges=eta_ranges, + ome_period=ome_period, + analysis_dirname=cfg.analysis_dir, + spots_filename=spots_filename) + + # ===================================================================== + # EXECUTE MP FIT + # ===================================================================== + + # DO FIT! + if len(grains_table) == 1 or ncpus == 1: + logger.info("\tstarting serial fit") + start = timeit.default_timer() + fit_grain_FF_init(params) + fit_results = map( + fit_grain_FF_reduced, + np.array(grains_table[:, 0], dtype=int) + ) + fit_grain_FF_cleanup() + elapsed = timeit.default_timer() - start + else: + nproc = min(ncpus, len(grains_table)) + chunksize = max(1, len(grains_table)//ncpus) + logger.info("\tstarting fit on %d processes", nproc) + start = timeit.default_timer() + pool = multiprocessing.Pool( + nproc, + fit_grain_FF_init, + (params, ) + ) + fit_results = pool.map( + fit_grain_FF_reduced, + np.array(grains_table[:, 0], dtype=int), + chunksize=chunksize + ) + pool.close() + pool.join() + elapsed = timeit.default_timer() - start + logger.info("fitting took %f seconds", elapsed) + + # ===================================================================== + # WRITE OUTPUT + # ===================================================================== + + gw = instrument.GrainDataWriter( + os.path.join(cfg.analysis_dir, 'grains.out') + ) + for fit_result in fit_results: + gw.dump_grain(*fit_result) + pass + gw.close() - def __init__(self, *args, **kwargs): - mp.Process.__init__(self) - FitGrainsWorker.__init__(self, *args, **kwargs) diff --git a/hexrd/fitting/fitpeak.py b/hexrd/fitting/fitpeak.py index d9901d97..e0c05c6e 100644 --- a/hexrd/fitting/fitpeak.py +++ b/hexrd/fitting/fitpeak.py @@ -1,42 +1,84 @@ + # ============================================================ -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. # All rights reserved. -# +# # This file is part of HEXRD. For details on dowloading the source, # see the file COPYING. -# +# # Please also see the file LICENSE. -# -# This program is free software; you can redistribute it and/or modify it under the -# terms of the GNU Lesser General Public License (as published by the Free Software -# Foundation) version 2.1 dated February 1999. -# +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the +# Free Software Foundation) version 2.1 dated February 1999. +# # This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA or visit . # ============================================================ +import numpy as np +from scipy import integrate +from scipy import ndimage as imgproc +from scipy import optimize + +from hexrd import constants +from hexrd.fitting import peakfunctions as pkfuncs + +import matplotlib.pyplot as plt + + +# ============================================================================= +# Helper Functions and Module Vars +# ============================================================================= + +ftol = constants.sqrt_epsf +xtol = constants.sqrt_epsf + + +def snip1d(y, w=4, numiter=2): + """Return SNIP-estimated baseline-background for given spectrum y.""" + z = np.log(np.log(np.sqrt(y + 1) + 1) + 1) + b = z + for i in range(numiter): + for p in range(w, 0, -1): + kernel = np.zeros(p*2 + 1) + kernel[0] = kernel[-1] = 1./2. + b = np.minimum( + b, + imgproc.convolve1d(z, kernel, mode='nearest') + ) + z = b + # bfull = np.zeros_like(y) + # bfull[~zeros_idx] = b + bkg = (np.exp(np.exp(b) - 1) - 1)**2 - 1 + return bkg + + +def lin_fit_obj(x, m, b): + return m*np.asarray(x) + b -import numpy as np -import scipy.optimize as optimize -import hexrd.fitting.peakfunctions as pkfuncs -import scipy.ndimage as imgproc -import copy +def lin_fit_jac(x, m, b): + return np.vstack([x, np.ones_like(x)]).T -#### 1-D Peak Fitting -def estimate_pk_parms_1d(x,f,pktype): +# ============================================================================= +# 1-D Peak Fitting +# ============================================================================= + + +def estimate_pk_parms_1d(x, f, pktype='pvoigt'): """ Gives initial guess of parameters for analytic fit of one dimensional peak data. @@ -44,132 +86,295 @@ def estimate_pk_parms_1d(x,f,pktype): Required Arguments: x -- (n) ndarray of coordinate positions f -- (n) ndarray of intensity measurements at coordinate positions x - pktype -- string, type of analytic function that will be used to fit the data, - current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and - "split_pvoigt" (split psuedo voigt) - + pktype -- string, type of analytic function that will be used to fit the + data, current options are "gaussian", "lorentzian", + "pvoigt" (psuedo voigt), and "split_pvoigt" (split psuedo voigt) Outputs: - p -- (m) ndarray containing initial guesses for parameters for the input peaktype + p -- (m) ndarray containing initial guesses for parameters for the input + peaktype (see peak function help for what each parameters corresponds to) """ - - - data_max=np.max(f) -# lbg=np.mean(f[:2]) -# rbg=np.mean(f[:2]) - if((f[0]> (0.25*data_max)) and (f[-1]> (0.25*data_max))):#heuristic for wide peaks - bg0=0. - elif (f[0]> (0.25*data_max)): #peak cut off on the left - bg0=f[-1] - elif (f[-1]> (0.25*data_max)): #peak cut off on the right - bg0=f[0] - else: - bg0=(f[0]+f[-1])/2. - #bg1=(rbg-lbg)/(x[-1]-x[0]) - - cen_index=np.argmax(f) - x0=x[cen_index] - A=data_max-bg0#-(bg0+bg1*x0) - - num_pts=len(f) - - #checks for peaks that are cut off - if cen_index == (num_pts-1): - FWHM=x[cen_index]-x[np.argmin(np.abs(f[:cen_index]-A/2.))]#peak cut off on the left + npts = len(x) + assert len(f) == npts, "ordinate and data must be same length!" + + # handle background + # ??? make kernel width a kwarg? + bkg = snip1d(f, w=int(2*npts/3.)) + + # fit linear bg and grab params + bp, _ = optimize.curve_fit(lin_fit_obj, x, bkg, jac=lin_fit_jac) + bg0 = bp[-1] + bg1 = bp[0] + + # set remaining params + pint = f - lin_fit_obj(x, *bp) + cen_index = np.argmax(pint) + A = pint[cen_index] + x0 = x[cen_index] + + # fix center index + if cen_index > 0 and cen_index < npts - 1: + left_hm = np.argmin(abs(pint[:cen_index] - 0.5*A)) + right_hm = np.argmin(abs(pint[cen_index:] - 0.5*A)) elif cen_index == 0: - FWHM=x[cen_index+np.argmin(np.abs(f[cen_index+1:]-A/2.))]-x[0] #peak cut off on the right - else: - FWHM=x[cen_index+np.argmin(np.abs(f[cen_index+1:]-A/2.))]-x[np.argmin(np.abs(f[:cen_index]-A/2.))] - - if FWHM <=0:##uh,oh something went bad - FWHM=(x[-1]-x[0])/4. #completely arbitrary, set peak width to 1/4 window size - - - - if pktype=='gaussian' or pktype=='lorentzian': - p=[A,x0,FWHM,bg0,0.] - elif pktype=='pvoigt': - p=[A,x0,FWHM,0.5,bg0,0.] - elif pktype=='split_pvoigt': - p=[A,x0,FWHM,FWHM,0.5,0.5,bg0,0.] - - p=np.array(p) - return p - - -def fit_pk_parms_1d(p0,x,f,pktype): + right_hm = np.argmin(abs(pint[cen_index:] - 0.5*A)) + left_hm = right_hm + elif cen_index == npts - 1: + left_hm = np.argmin(abs(pint[:cen_index] - 0.5*A)) + right_hm = left_hm + + # FWHM estimation + try: + FWHM = x[cen_index + right_hm] - x[left_hm] + except(IndexError): + FWHM = 0 + if FWHM <= 0 or FWHM > 0.75*npts: + # something is weird, so punt... + FWHM = 0.25*(x[-1] - x[0]) + + # set params + if pktype in ['gaussian', 'lorentzian']: + p = [A, x0, FWHM, bg0, bg1] + elif pktype == 'pvoigt': + p = [A, x0, FWHM, 0.5, bg0, bg1] + elif pktype == 'split_pvoigt': + p = [A, x0, FWHM, FWHM, 0.5, 0.5, bg0, bg1] + else: + raise RuntimeError("pktype '%s' not understood" % pktype) + + return np.r_[p] + + +def fit_pk_parms_1d(p0, x, f, pktype='pvoigt'): """ Performs least squares fit to find parameters for 1d analytic functions fit to diffraction data Required Arguments: - p0 -- (m) ndarray containing initial guesses for parameters for the input peaktype + p0 -- (m) ndarray containing initial guesses for parameters + for the input peaktype x -- (n) ndarray of coordinate positions f -- (n) ndarray of intensity measurements at coordinate positions x - pktype -- string, type of analytic function that will be used to fit the data, + pktype -- string, type of analytic function that will be used to + fit the data, current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and "split_pvoigt" (split psuedo voigt) - + Outputs: - p -- (m) ndarray containing fit parameters for the input peaktype (see peak function - help for what each parameters corresponds to) - - + p -- (m) ndarray containing fit parameters for the input peaktype + (see peak function help for what each parameters corresponds to) + + Notes: - 1. Currently no checks are in place to make sure that the guess of parameters - has a consistent number of parameters with the requested peak type - """ - - - fitArgs=(x,f,pktype) - - ftol=1e-6 - xtol=1e-6 - - weight=np.max(f)*10.#hard coded should be changed - + 1. Currently no checks are in place to make sure that the guess of + parameters has a consistent number of parameters with the requested + peak type + """ + + weight = np.max(f)*10. # hard coded should be changed + fitArgs = (x, f, pktype) if pktype == 'gaussian': - p, outflag = optimize.leastsq(fit_pk_obj_1d, p0, args=fitArgs,Dfun=eval_pk_deriv_1d,ftol=ftol,xtol=xtol) + p, outflag = optimize.leastsq( + fit_pk_obj_1d, p0, + args=fitArgs, Dfun=eval_pk_deriv_1d, + ftol=ftol, xtol=xtol + ) elif pktype == 'lorentzian': - p, outflag = optimize.leastsq(fit_pk_obj_1d, p0, args=fitArgs,Dfun=eval_pk_deriv_1d,ftol=ftol,xtol=xtol) + p, outflag = optimize.leastsq( + fit_pk_obj_1d, p0, + args=fitArgs, Dfun=eval_pk_deriv_1d, + ftol=ftol, xtol=xtol + ) elif pktype == 'pvoigt': - lb=[p0[0]*0.5,np.min(x),0., 0., 0.,None] - ub=[p0[0]*2.0,np.max(x),4.*p0[2],1., 2.*p0[4],None] - - fitArgs=(x,f,pktype,weight,lb,ub) - p, outflag = optimize.leastsq(fit_pk_obj_1d_bnded, p0, args=fitArgs,ftol=ftol,xtol=xtol) + lb = [p0[0]*0.5, np.min(x), 0., 0., 0., None] + ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 1., 2.*p0[4], None] + + fitArgs = (x, f, pktype, weight, lb, ub) + p, outflag = optimize.leastsq( + fit_pk_obj_1d_bnded, p0, + args=fitArgs, + ftol=ftol, xtol=xtol + ) elif pktype == 'split_pvoigt': - lb=[p0[0]*0.5,np.min(x),0., 0., 0., 0., 0.,None] - ub=[p0[0]*2.0,np.max(x),4.*p0[2],4.*p0[2],1., 1., 2.*p0[4],None] - - p, outflag = optimize.leastsq(fit_pk_obj_1d_bnded, p0, args=fitArgs,ftol=ftol,xtol=xtol) - + lb = [p0[0]*0.5, np.min(x), 0., 0., 0., 0., 0., None] + ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 4.*p0[2], 1., 1., 2.*p0[4], None] + fitArgs = (x, f, pktype, weight, lb, ub) + p, outflag = optimize.leastsq( + fit_pk_obj_1d_bnded, p0, + args=fitArgs, + ftol=ftol, xtol=xtol + ) elif pktype == 'tanh_stepdown': - p, outflag = optimize.leastsq(fit_pk_obj_1d, p0, args=fitArgs,ftol=ftol,xtol=xtol) + p, outflag = optimize.leastsq( + fit_pk_obj_1d, p0, + args=fitArgs, + ftol=ftol, xtol=xtol) else: - p=p0 + p = p0 print('non-valid option, returning guess') - - + if np.any(np.isnan(p)): - p=p0 + p = p0 print('failed fitting, returning guess') - + return p - -def eval_pk_deriv_1d(p,x,y0,pktype): + + +def fit_mpk_parms_1d(p0,x,f0,pktype,num_pks,bgtype=None,bnds=None): + """ + Performs least squares fit to find parameters for MULTIPLE 1d analytic functions fit + to diffraction data + + + Required Arguments: + p0 -- (m x u + v) guess of peak parameters for number of peaks, m is the number of + parameters per peak ("gaussian" and "lorentzian" - 3, "pvoigt" - 4, "split_pvoigt" + - 5), v is the number of parameters for chosen bgtype + x -- (n) ndarray of coordinate positions + f -- (n) ndarray of intensity measurements at coordinate positions x + pktype -- string, type of analytic function that will be used to fit the data, + current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and + "split_pvoigt" (split psuedo voigt) + num_pks -- integer 'u' indicating the number of pks, must match length of p + pktype -- string, background functions, available options are "constant", + "linear", and "quadratic" + bnds -- tuple containing + + Outputs: + p -- (m x u) fit peak parameters for number of peaks, m is the number of + parameters per peak ("gaussian" and "lorentzian" - 3, "pvoigt" - 4, "split_pvoigt" + - 5) + """ + + fitArgs=(x,f0,pktype,num_pks,bgtype) + + ftol=1e-6 + xtol=1e-6 + + if bnds != None: + p = optimize.least_squares(fit_mpk_obj_1d, p0,bounds=bnds, args=fitArgs,ftol=ftol,xtol=xtol) + else: + p = optimize.least_squares(fit_mpk_obj_1d, p0, args=fitArgs,ftol=ftol,xtol=xtol) + + return p.x + + +def estimate_mpk_parms_1d(pk_pos_0,x,f,pktype='pvoigt',bgtype='linear',fwhm_guess=0.07,center_bnd=0.02): + + + num_pks=len(pk_pos_0) + min_val=np.min(f) + + + + if pktype == 'gaussian' or pktype == 'lorentzian': + p0tmp=np.zeros([num_pks,3]) + p0tmp_lb=np.zeros([num_pks,3]) + p0tmp_ub=np.zeros([num_pks,3]) + + #x is just 2theta values + #make guess for the initital parameters + for ii in np.arange(num_pks): + pt=np.argmin(np.abs(x-pk_pos_0[ii])) + p0tmp[ii,:]=[(f[pt]-min_val),pk_pos_0[ii],fwhm_guess] + p0tmp_lb[ii,:]=[(f[pt]-min_val)*0.1,pk_pos_0[ii]-center_bnd,fwhm_guess*0.5] + p0tmp_ub[ii,:]=[(f[pt]-min_val)*10.0,pk_pos_0[ii]+center_bnd,fwhm_guess*2.0] + elif pktype == 'pvoigt': + p0tmp=np.zeros([num_pks,4]) + p0tmp_lb=np.zeros([num_pks,4]) + p0tmp_ub=np.zeros([num_pks,4]) + + #x is just 2theta values + #make guess for the initital parameters + for ii in np.arange(num_pks): + pt=np.argmin(np.abs(x-pk_pos_0[ii])) + p0tmp[ii,:]=[(f[pt]-min_val),pk_pos_0[ii],fwhm_guess,0.5] + p0tmp_lb[ii,:]=[(f[pt]-min_val)*0.1,pk_pos_0[ii]-center_bnd,fwhm_guess*0.5,0.0] + p0tmp_ub[ii,:]=[(f[pt]-min_val+1.)*10.0,pk_pos_0[ii]+center_bnd,fwhm_guess*2.0,1.0] + elif pktype == 'split_pvoigt': + p0tmp=np.zeros([num_pks,6]) + p0tmp_lb=np.zeros([num_pks,6]) + p0tmp_ub=np.zeros([num_pks,6]) + + #x is just 2theta values + #make guess for the initital parameters + for ii in np.arange(num_pks): + pt=np.argmin(np.abs(x-pk_pos_0[ii])) + p0tmp[ii,:]=[(f[pt]-min_val),pk_pos_0[ii],fwhm_guess,fwhm_guess,0.5,0.5] + p0tmp_lb[ii,:]=[(f[pt]-min_val)*0.1,pk_pos_0[ii]-center_bnd,fwhm_guess*0.5,fwhm_guess*0.5,0.0,0.0] + p0tmp_ub[ii,:]=[(f[pt]-min_val)*10.0,pk_pos_0[ii]+center_bnd,fwhm_guess*2.0,fwhm_guess*2.0,1.0,1.0] + + + if bgtype=='linear': + num_pk_parms=len(p0tmp.ravel()) + p0=np.zeros(num_pk_parms+2) + lb=np.zeros(num_pk_parms+2) + ub=np.zeros(num_pk_parms+2) + p0[:num_pk_parms]=p0tmp.ravel() + lb[:num_pk_parms]=p0tmp_lb.ravel() + ub[:num_pk_parms]=p0tmp_ub.ravel() + + + p0[-2]=min_val + + lb[-2]=-float('inf') + lb[-1]=-float('inf') + + ub[-2]=float('inf') + ub[-1]=float('inf') + + elif bgtype=='constant': + num_pk_parms=len(p0tmp.ravel()) + p0=np.zeros(num_pk_parms+1) + lb=np.zeros(num_pk_parms+1) + ub=np.zeros(num_pk_parms+1) + p0[:num_pk_parms]=p0tmp.ravel() + lb[:num_pk_parms]=p0tmp_lb.ravel() + ub[:num_pk_parms]=p0tmp_ub.ravel() + + + p0[-1]=min_val + lb[-1]=-float('inf') + ub[-1]=float('inf') + + elif bgtype=='quadratic': + num_pk_parms=len(p0tmp.ravel()) + p0=np.zeros(num_pk_parms+3) + lb=np.zeros(num_pk_parms+3) + ub=np.zeros(num_pk_parms+3) + p0[:num_pk_parms]=p0tmp.ravel() + lb[:num_pk_parms]=p0tmp_lb.ravel() + ub[:num_pk_parms]=p0tmp_ub.ravel() + + + p0[-3]=min_val + lb[-3]=-float('inf') + lb[-2]=-float('inf') + lb[-1]=-float('inf') + ub[-3]=float('inf') + ub[-2]=float('inf') + ub[-1]=float('inf') + + bnds=(lb,ub) + + + + + return p0, bnds + +def eval_pk_deriv_1d(p,x,y0,pktype): if pktype == 'gaussian': d_mat=pkfuncs.gaussian1d_deriv(p,x) elif pktype == 'lorentzian': d_mat=pkfuncs.lorentzian1d_deriv(p,x) - + return d_mat.T - -def fit_pk_obj_1d(p,x,f0,pktype): + +def fit_pk_obj_1d(p,x,f0,pktype): if pktype == 'gaussian': f=pkfuncs.gaussian1d(p,x) elif pktype == 'lorentzian': @@ -180,12 +385,12 @@ def fit_pk_obj_1d(p,x,f0,pktype): f=pkfuncs.split_pvoigt1d(p,x) elif pktype == 'tanh_stepdown': f=pkfuncs.tanh_stepdown_nobg(p,x) - + resd = f-f0 return resd -def fit_pk_obj_1d_bnded(p,x,f0,pktype,weight,lb,ub): +def fit_pk_obj_1d_bnded(p,x,f0,pktype,weight,lb,ub): if pktype == 'gaussian': f=pkfuncs.gaussian1d(p,x) elif pktype == 'lorentzian': @@ -194,20 +399,31 @@ def fit_pk_obj_1d_bnded(p,x,f0,pktype,weight,lb,ub): f=pkfuncs.pvoigt1d(p,x) elif pktype == 'split_pvoigt': f=pkfuncs.split_pvoigt1d(p,x) - + num_data=len(f) num_parm=len(p) resd=np.zeros(num_data+num_parm) - #tub bnds implementation - + #tub bnds implementation + resd[:num_data] = f-f0 for ii in range(num_parm): - if lb[ii] is not None: + if lb[ii] is not None: resd[num_data+ii]=weight*np.max([-(p[ii]-lb[ii]),0.,(p[ii]-ub[ii])]) - - + + + return resd + + +def fit_mpk_obj_1d(p,x,f0,pktype,num_pks,bgtype): + + f=pkfuncs.mpeak_1d(p,x,pktype,num_pks,bgtype='linear') + resd = f-f0 return resd + + + + #### 2-D Peak Fitting def estimate_pk_parms_2d(x,y,f,pktype): @@ -220,9 +436,9 @@ def estimate_pk_parms_2d(x,y,f,pktype): y -- (n x 0) ndarray of coordinate positions for dimension 2 (numpy.meshgrid formatting) f -- (n x 0) ndarray of intensity measurements at coordinate positions x and y pktype -- string, type of analytic function that will be used to fit the data, - current options are "gaussian", "gaussian_rot" (gaussian with arbitrary axes) and + current options are "gaussian", "gaussian_rot" (gaussian with arbitrary axes) and "split_pvoigt_rot" (split psuedo voigt with arbitrary axes) - + Outputs: p -- (m) ndarray containing initial guesses for parameters for the input peaktype @@ -230,45 +446,45 @@ def estimate_pk_parms_2d(x,y,f,pktype): """ - + bg0=np.mean([f[0,0],f[-1,0],f[-1,-1],f[0,-1]]) bg1x=(np.mean([f[-1,-1],f[0,-1]])-np.mean([f[0,0],f[-1,0]]))/(x[0,-1]-x[0,0]) bg1y=(np.mean([f[-1,-1],f[-1,0]])-np.mean([f[0,0],f[0,-1]]))/(y[-1,0]-y[0,0]) - - fnobg=f-(bg0+bg1x*x+bg1y*y) - + + fnobg=f-(bg0+bg1x*x+bg1y*y) + labels,numlabels=imgproc.label(fnobg>np.max(fnobg)/2.) - + #looks for the largest peak areas=np.zeros(numlabels) for ii in np.arange(1,numlabels+1,1): areas[ii-1]= np.sum(labels==ii) - - peakIndex=np.argmax(areas)+1 - - + + peakIndex=np.argmax(areas)+1 + + # #currently looks for peak closest to center # dist=np.zeros(numlabels) # for ii in np.arange(1,numlabels+1,1): # dist[ii-1]= ###### -# +# # peakIndex=np.argmin(dist)+1 - + FWHMx=np.max(x[labels==peakIndex])-np.min(x[labels==peakIndex]) FWHMy=np.max(y[labels==peakIndex])-np.min(y[labels==peakIndex]) - + coords=imgproc.maximum_position(fnobg, labels=labels, index=peakIndex) A=imgproc.maximum(fnobg, labels=labels, index=peakIndex) x0=x[coords] y0=y[coords] - + if pktype=='gaussian': p=[A,x0,y0,FWHMx,FWHMy,bg0,bg1x,bg1y] elif pktype=='gaussian_rot': p=[A,x0,y0,FWHMx,FWHMy,0.,bg0,bg1x,bg1y] elif pktype=='split_pvoigt_rot': p=[A,x0,y0,FWHMx,FWHMx,FWHMy,FWHMy,0.5,0.5,0.5,0.5,0.,bg0,bg1x,bg1y] - + p=np.array(p) return p @@ -284,49 +500,49 @@ def fit_pk_parms_2d(p0,x,y,f,pktype): y -- (n x 0) ndarray of coordinate positions for dimension 2 (numpy.meshgrid formatting) f -- (n x 0) ndarray of intensity measurements at coordinate positions x and y pktype -- string, type of analytic function that will be used to fit the data, - current options are "gaussian", "gaussian_rot" (gaussian with arbitrary axes) and + current options are "gaussian", "gaussian_rot" (gaussian with arbitrary axes) and "split_pvoigt_rot" (split psuedo voigt with arbitrary axes) - + Outputs: p -- (m) ndarray containing fit parameters for the input peaktype (see peak function help for what each parameters corresponds to) - - + + Notes: 1. Currently no checks are in place to make sure that the guess of parameters has a consisten number of parameters with the requested peak type - """ + """ fitArgs=(x,y,f,pktype) ftol=1e-9 xtol=1e-9 - + if pktype == 'gaussian': - p, outflag = optimize.leastsq(fit_pk_obj_2d, p0, args=fitArgs,ftol=ftol, xtol=xtol) + p, outflag = optimize.leastsq(fit_pk_obj_2d, p0, args=fitArgs,ftol=ftol, xtol=xtol) elif pktype == 'gaussian_rot': p, outflag = optimize.leastsq(fit_pk_obj_2d, p0, args=fitArgs,ftol=ftol, xtol=xtol) elif pktype == 'split_pvoigt_rot': - p, outflag = optimize.leastsq(fit_pk_obj_2d, p0, args=fitArgs,ftol=ftol, xtol=xtol) - - + p, outflag = optimize.leastsq(fit_pk_obj_2d, p0, args=fitArgs,ftol=ftol, xtol=xtol) + + if np.any(np.isnan(p)): p=p0 - + return p -def fit_pk_obj_2d(p,x,y,f0,pktype): +def fit_pk_obj_2d(p,x,y,f0,pktype): if pktype == 'gaussian': f=pkfuncs.gaussian2d(p,x,y) - elif pktype == 'gaussian_rot': + elif pktype == 'gaussian_rot': f=pkfuncs.gaussian2d_rot(p,x,y) - elif pktype == 'split_pvoigt_rot': + elif pktype == 'split_pvoigt_rot': f=pkfuncs.split_pvoigt2d_rot(p,x,y) - + resd = f-f0 return resd.flatten() - + #### Extra Utilities @@ -342,15 +558,132 @@ def goodness_of_fit(f,f0): Outputs: R -- (1) goodness of fit measure which is sum(error^2)/sum(meas^2) Rw -- (1) goodness of fit measure weighted by intensity sum(meas*error^2)/sum(meas^3) - - - """ + """ R=np.sum((f-f0)**2)/np.sum(f0**2) Rw=np.sum(np.abs(f0*(f-f0)**2))/np.sum(np.abs(f0**3)) - - return R, Rw - - - \ No newline at end of file + + return R, Rw + + + +def direct_pk_analysis(x,f,remove_bg=True,low_int=1.,edge_pts=3,pts_per_meas=100): + """ + Performs analysis of a single peak that is not well matched to any analytic functions + + + Required Arguments: + x -- (n) ndarray of coordinate positions + f -- (n) ndarray of intensity measurements at coordinate positions x + + Optional Arguments: + remove_bg -- boolean, if selected a linear background will be subtracted from the peak + low_int -- float, value for area under a peak that defines a lower bound + on what is recognized as peak + edge_pts -- int, number of points at the edges of the data to use to calculated background + pts_per_meas -- how many interpolated points to place between measurement values + + Outputs: + p -- array of values containing the integrated intensity, center of mass, and + FWHM of the peak + """ + + + + + plt.plot(x,f) + #subtract background, assumed linear + if remove_bg: + bg_data=np.hstack((f[:(edge_pts+1)],f[-edge_pts:])) + bg_pts=np.hstack((x[:(edge_pts+1)],x[-edge_pts:])) + + bg_parm=np.polyfit(bg_pts,bg_data,1) + + f=f-(bg_parm[0]*x+bg_parm[1])#pull out high background + + f=f-np.min(f)#set the minimum to 0 + + + plt.plot(bg_pts,bg_data,'x') + plt.plot(x,f,'r') + + spacing=np.diff(x)[0]/pts_per_meas + xfine=np.arange(np.min(x),np.max(x)+spacing,spacing)# make a fine grid of points + ffine=np.interp(xfine,x,f) + + data_max=np.max(f)#find max intensity values + + total_int=integrate.simps(ffine,xfine)#numerically integrate the peak using the simpson rule + + cen_index=np.argmax(ffine) + A=data_max + + if(total_int and others. -# LLNL-CODE-529294. +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. # All rights reserved. -# +# # This file is part of HEXRD. For details on dowloading the source, # see the file COPYING. -# +# # Please also see the file LICENSE. -# +# # This program is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License (as published by the Free Software # Foundation) version 2.1 dated February 1999. -# +# # This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, @@ -37,18 +37,18 @@ def _unit_gaussian(p,x):#Split the unit gaussian so this can be called for 2d an """ Required Arguments: p -- (m) [x0,FWHM] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: f -- (n) ndarray of function values at positions x - """ + """ x0=p[0] FWHM=p[1] - sigma=FWHM/gauss_width_fact - + sigma=FWHM/gauss_width_fact + f=np.exp(-(x-x0)**2/(2.*sigma**2.)) return f @@ -56,14 +56,14 @@ def _gaussian1d_no_bg(p,x): """ Required Arguments: p -- (m) [A,x0,FWHM] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: f -- (n) ndarray of function values at positions x - """ + """ A=p[0] - f=A*_unit_gaussian(p[[1,2]],x) + f=A*_unit_gaussian(p[[1,2]],x) return f @@ -71,64 +71,64 @@ def gaussian1d(p,x): """ Required Arguments: p -- (m) [A,x0,FWHM,c0,c1] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: f -- (n) ndarray of function values at positions x - """ + """ bg0=p[3] - bg1=p[4] - - f=_gaussian1d_no_bg(p[:3],x)+bg0+bg1*x - + bg1=p[4] + + f=_gaussian1d_no_bg(p[:3],x)+bg0+bg1*x + return f - - + + def _gaussian1d_no_bg_deriv(p,x): """ Required Arguments: p -- (m) [A,x0,FWHM] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: d_mat -- (3 x n) ndarray of derivative values at positions x - """ + """ x0=p[1] FWHM=p[2] - - sigma=FWHM/gauss_width_fact + + sigma=FWHM/gauss_width_fact dydx0=_gaussian1d_no_bg(p,x)*((x-x0)/(sigma**2.)) - dydA=_unit_gaussian(p[[1,2]],x) + dydA=_unit_gaussian(p[[1,2]],x) dydFWHM=_gaussian1d_no_bg(p,x)*((x-x0)**2./(sigma**3.))/gauss_width_fact - + d_mat=np.zeros((len(p),len(x))) - + d_mat[0,:]=dydA d_mat[1,:]=dydx0 d_mat[2,:]=dydFWHM - + return d_mat - -def gaussian1d_deriv(p,x): + +def gaussian1d_deriv(p,x): """ Required Arguments: p -- (m) [A,x0,FWHM,c0,c1] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: d_mat -- (5 x n) ndarray of derivative values at positions x - """ + """ d_mat=np.zeros((len(p),len(x))) d_mat[0:3,:]=_gaussian1d_no_bg_deriv(p[0:3],x) - d_mat[3,:]=1. + d_mat[3,:]=1. d_mat[4,:]=x - + return d_mat @@ -138,16 +138,16 @@ def _unit_lorentzian(p,x):#Split the unit function so this can be called for 2d """ Required Arguments: p -- (m) [x0,FWHM] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: f -- (n) ndarray of function values at positions x - """ + """ x0=p[0] FWHM=p[1] - gamma=FWHM/lorentz_width_fact - + gamma=FWHM/lorentz_width_fact + f= gamma**2 / ((x-x0)**2 + gamma**2) return f @@ -155,32 +155,32 @@ def _lorentzian1d_no_bg(p,x): """ Required Arguments: p -- (m) [A,x0,FWHM] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: f -- (n) ndarray of function values at positions x - """ + """ A=p[0] - f= A*_unit_lorentzian(p[[1,2]],x) - + f= A*_unit_lorentzian(p[[1,2]],x) + return f - + def lorentzian1d(p,x): """ Required Arguments: p -- (m) [x0,FWHM,c0,c1] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: f -- (n) ndarray of function values at positions x - """ - + """ + bg0=p[3] bg1=p[4] - f=_lorentzian1d_no_bg(p[:3],x)+bg0+bg1*x - + f=_lorentzian1d_no_bg(p[:3],x)+bg0+bg1*x + return f @@ -188,43 +188,43 @@ def _lorentzian1d_no_bg_deriv(p,x): """ Required Arguments: p -- (m) [A,x0,FWHM] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: d_mat -- (3 x n) ndarray of derivative values at positions x - """ - + """ + x0=p[1] FWHM=p[2] - - gamma=FWHM/lorentz_width_fact + + gamma=FWHM/lorentz_width_fact dydx0=_lorentzian1d_no_bg(p,x)*((2.*(x-x0))/((x-x0)**2 + gamma**2)) - dydA=_unit_lorentzian(p[[1,2]],x) + dydA=_unit_lorentzian(p[[1,2]],x) dydFWHM=_lorentzian1d_no_bg(p,x)*((2.*(x-x0)**2.)/(gamma*((x-x0)**2 + gamma**2)))/lorentz_width_fact - + d_mat=np.zeros((len(p),len(x))) - d_mat[0,:]=dydA + d_mat[0,:]=dydA d_mat[1,:]=dydx0 d_mat[2,:]=dydFWHM - + return d_mat - -def lorentzian1d_deriv(p,x): + +def lorentzian1d_deriv(p,x): """ Required Arguments: p -- (m) [A,x0,FWHM,c0,c1] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: d_mat -- (5 x n) ndarray of derivative values at positions x - """ + """ d_mat=np.zeros((len(p),len(x))) d_mat[0:3,:]=_lorentzian1d_no_bg_deriv(p[0:3],x) - d_mat[3,:]=1. + d_mat[3,:]=1. d_mat[4,:]=x - + return d_mat @@ -233,15 +233,15 @@ def _unit_pvoigt1d(p,x):#Split the unit function so this can be called for 2d an """ Required Arguments: p -- (m) [x0,FWHM,n] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: f -- (n) ndarray of function values at positions x - """ + """ - n=p[2] - + n=p[2] + f=(n*_unit_gaussian(p[:2],x)+(1.-n)*_unit_lorentzian(p[:2],x)) return f @@ -249,31 +249,31 @@ def _pvoigt1d_no_bg(p,x): """ Required Arguments: p -- (m) [A,x0,FWHM,n] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: f -- (n) ndarray of function values at positions x - """ + """ - A=p[0] + A=p[0] f=A*_unit_pvoigt1d(p[[1,2,3]],x) return f -def pvoigt1d(p,x): +def pvoigt1d(p,x): """ Required Arguments: p -- (m) [A,x0,FWHM,n,c0,c1] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: f -- (n) ndarray of function values at positions x - """ + """ bg0=p[4] bg1=p[5] - f=_pvoigt1d_no_bg(p[:4],x)+bg0+bg1*x - + f=_pvoigt1d_no_bg(p[:4],x)+bg0+bg1*x + return f #### 1-D Split Psuedo Voigt Functions @@ -281,7 +281,7 @@ def _split_pvoigt1d_no_bg(p,x): """ Required Arguments: p -- (m) [A,x0,FWHM-,FWHM+,n-,n+] - x -- (n) ndarray of coordinate positions + x -- (n) ndarray of coordinate positions Outputs: f -- (n) ndarray of function values at positions x @@ -290,27 +290,28 @@ def _split_pvoigt1d_no_bg(p,x): A=p[0] x0=p[1] - f=np.zeros(x.shape[0]) - + f=np.zeros(x.shape[0]) + #Define halves, using gthanorequal and lthan, choice is arbitrary xr=x>=x0 - xl=x=x0 - xl=x=y0 - yl=y and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License (as published by the Free Software +# Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ +"""Tools or X-ray diffraction analysis +""" + + + + + + + diff --git a/hexrd/grainmap/nfutil.py b/hexrd/grainmap/nfutil.py new file mode 100644 index 00000000..4c863c78 --- /dev/null +++ b/hexrd/grainmap/nfutil.py @@ -0,0 +1,1156 @@ + + + + +#%% + +import time +import os +import logging +import numpy as np +import copy + +import numba +import argparse +import contextlib +import multiprocessing +import tempfile +import shutil + +from hexrd.xrd import transforms as xf +from hexrd.xrd import transforms_CAPI as xfcapi +from hexrd.xrd import xrdutil + +from hexrd.xrd import rotations as rot +from hexrd import valunits + +from hexrd.xrd.transforms_CAPI import anglesToGVec, \ + makeRotMatOfExpMap, makeDetectorRotMat, makeOscillRotMat, \ + gvecToDetectorXY, detectorXYToGvec + +import yaml +import cPickle as cpl + +import scipy.ndimage as img +try: + import imageio as imgio +except(ImportError): + from skimage import io as imgio +import matplotlib.pyplot as plt + +# ============================================================================== +# %% SOME SCAFFOLDING +# ============================================================================== + +class ProcessController(object): + """This is a 'controller' that provides the necessary hooks to + track the results of the process as well as to provide clues of + the progress of the process""" + + def __init__(self, result_handler=None, progress_observer=None, ncpus = 1, + chunk_size = 100): + self.rh = result_handler + self.po = progress_observer + self.ncpus = ncpus + self.chunk_size = chunk_size + self.limits = {} + self.timing = [] + + + # progress handling -------------------------------------------------------- + + def start(self, name, count): + self.po.start(name, count) + t = time.time() + self.timing.append((name, count, t)) + + + def finish(self, name): + t = time.time() + self.po.finish() + entry = self.timing.pop() + assert name==entry[0] + total = t - entry[2] + logging.info("%s took %8.3fs (%8.6fs per item).", entry[0], total, total/entry[1]) + + + def update(self, value): + self.po.update(value) + + # result handler ----------------------------------------------------------- + + def handle_result(self, key, value): + logging.debug("handle_result (%(key)s)", locals()) + self.rh.handle_result(key, value) + + # value limitting ---------------------------------------------------------- + def set_limit(self, key, limit_function): + if key in self.limits: + logging.warn("Overwritting limit funtion for '%(key)s'", locals()) + + self.limits[key] = limit_function + + def limit(self, key, value): + try: + value = self.limits[key](value) + except KeyError: + pass + except Exception: + logging.warn("Could not apply limit to '%(key)s'", locals()) + + return value + + # configuration ----------------------------------------------------------- + def get_process_count(self): + return self.ncpus + + def get_chunk_size(self): + return self.chunk_size + + +def null_progress_observer(): + class NullProgressObserver(object): + def start(self, name, count): + pass + + def update(self, value): + pass + + def finish(self): + pass + + return NullProgressObserver() + + +def progressbar_progress_observer(): + from progressbar import ProgressBar, Percentage, Bar + + class ProgressBarProgressObserver(object): + def start(self, name, count): + self.pbar = ProgressBar(widgets=[name, Percentage(), Bar()], + maxval=count) + self.pbar.start() + + def update(self, value): + self.pbar.update(value) + + def finish(self): + self.pbar.finish() + + return ProgressBarProgressObserver() + + +def forgetful_result_handler(): + class ForgetfulResultHandler(object): + def handle_result(self, key, value): + pass # do nothing + + return ForgetfulResultHandler() + + +def saving_result_handler(filename): + """returns a result handler that saves the resulting arrays into a file + with name filename""" + class SavingResultHandler(object): + def __init__(self, file_name): + self.filename = file_name + self.arrays = {} + + def handle_result(self, key, value): + self.arrays[key] = value + + def __del__(self): + logging.debug("Writing arrays in %(filename)s", self.__dict__) + try: + np.savez_compressed(open(self.filename, "wb"), **self.arrays) + except IOError: + logging.error("Failed to write %(filename)s", self.__dict__) + + return SavingResultHandler(filename) + + +def checking_result_handler(filename): + """returns a return handler that checks the results against a + reference file. + + The Check will consider a FAIL either a result not present in the + reference file (saved as a numpy savez or savez_compressed) or a + result that differs. It will consider a PARTIAL PASS if the + reference file has a shorter result, but the existing results + match. A FULL PASS will happen when all existing results match + + """ + class CheckingResultHandler(object): + def __init__(self, reference_file): + """Checks the result against those save in 'reference_file'""" + logging.info("Loading reference results from '%s'", reference_file) + self.reference_results = np.load(open(reference_file, 'rb')) + + def handle_result(self, key, value): + if key in ['experiment', 'image_stack']: + return #ignore these + + try: + reference = self.reference_results[key] + except KeyError as e: + logging.warning("%(key)s: %(e)s", locals()) + reference = None + + if reference is None: + msg = "'{0}': No reference result." + logging.warn(msg.format(key)) + + try: + if key=="confidence": + reference = reference.T + value = value.T + + check_len = min(len(reference), len(value)) + test_passed = np.allclose(value[:check_len], reference[:check_len]) + + if not test_passed: + msg = "'{0}': FAIL" + logging.warn(msg.format(key)) + lvl = logging.WARN + elif len(value) > check_len: + msg = "'{0}': PARTIAL PASS" + lvl = logging.WARN + else: + msg = "'{0}': FULL PASS" + lvl = logging.INFO + logging.log(lvl, msg.format(key)) + except Exception as e: + msg = "%(key)s: Failure trying to check the results.\n%(e)s" + logging.error(msg, locals()) + + return CheckingResultHandler(filename) + + +# ============================================================================== +# %% OPTIMIZED BITS +# ============================================================================== + +# Some basic 3d algebra ======================================================== +@numba.njit +def _v3_dot(a, b): + return a[0]*b[0] + a[1]*b[1] + a[2]*b[2] + + +@numba.njit +def _m33_v3_multiply(m, v, dst): + v0 = v[0]; v1 = v[1]; v2 = v[2] + dst[0] = m[0, 0]*v0 + m[0, 1]*v1 + m[0, 2]*v2 + dst[1] = m[1, 0]*v0 + m[1, 1]*v1 + m[1, 2]*v2 + dst[2] = m[2, 0]*v0 + m[2, 1]*v1 + m[2, 2]*v2 + + return dst + + +@numba.njit +def _v3_normalized(src, dst): + v0 = src[0] + v1 = src[1] + v2 = src[2] + sqr_norm = v0*v0 + v1*v1 + v2*v2 + inv_norm = 1.0 if sqr_norm == 0.0 else 1./np.sqrt(sqr_norm) + + dst[0] = v0 * inv_norm + dst[1] = v1 * inv_norm + dst[2] = v2 * inv_norm + + return dst + + +@numba.njit +def _make_binary_rot_mat(src, dst): + v0 = src[0]; v1 = src[1]; v2 = src[2] + + dst[0,0] = 2.0*v0*v0 - 1.0 + dst[0,1] = 2.0*v0*v1 + dst[0,2] = 2.0*v0*v2 + dst[1,0] = 2.0*v1*v0 + dst[1,1] = 2.0*v1*v1 - 1.0 + dst[1,2] = 2.0*v1*v2 + dst[2,0] = 2.0*v2*v0 + dst[2,1] = 2.0*v2*v1 + dst[2,2] = 2.0*v2*v2 - 1.0 + + return dst + + +# code transcribed in numba from transforms module ============================= + +# This is equivalent to the transform module anglesToGVec, but written in +# numba. This should end in a module to share with other scripts +@numba.njit +def _anglesToGVec(angs, rMat_ss, rMat_c): + """From a set of angles return them in crystal space""" + result = np.empty_like(angs) + for i in range(len(angs)): + cx = np.cos(0.5*angs[i, 0]) + sx = np.sin(0.5*angs[i, 0]) + cy = np.cos(angs[i,1]) + sy = np.sin(angs[i,1]) + g0 = cx*cy + g1 = cx*sy + g2 = sx + + # with g being [cx*xy, cx*sy, sx] + # result = dot(rMat_c, dot(rMat_ss[i], g)) + t0_0 = rMat_ss[ i, 0, 0]*g0 + rMat_ss[ i, 1, 0]*g1 + rMat_ss[ i, 2, 0]*g2 + t0_1 = rMat_ss[ i, 0, 1]*g0 + rMat_ss[ i, 1, 1]*g1 + rMat_ss[ i, 2, 1]*g2 + t0_2 = rMat_ss[ i, 0, 2]*g0 + rMat_ss[ i, 1, 2]*g1 + rMat_ss[ i, 2, 2]*g2 + + result[i, 0] = rMat_c[0, 0]*t0_0 + rMat_c[ 1, 0]*t0_1 + rMat_c[ 2, 0]*t0_2 + result[i, 1] = rMat_c[0, 1]*t0_0 + rMat_c[ 1, 1]*t0_1 + rMat_c[ 2, 1]*t0_2 + result[i, 2] = rMat_c[0, 2]*t0_0 + rMat_c[ 1, 2]*t0_1 + rMat_c[ 2, 2]*t0_2 + + return result + + +# This is equivalent to the transform's module gvecToDetectorXYArray, but written in +# numba. +# As of now, it is not a good replacement as efficient allocation of the temporary +# arrays is not competitive with the stack allocation using in the C version of the +# code (WiP) + +# tC varies per coord +# gvec_cs, rSm varies per grain +# +# gvec_cs +beam = xf.bVec_ref[:, 0] +Z_l = xf.Zl[:,0] +@numba.jit() +def _gvec_to_detector_array(vG_sn, rD, rSn, rC, tD, tS, tC): + """ beamVec is the beam vector: (0, 0, -1) in this case """ + ztol = xrdutil.epsf + p3_l = np.empty((3,)) + tmp_vec = np.empty((3,)) + vG_l = np.empty((3,)) + tD_l = np.empty((3,)) + norm_vG_s = np.empty((3,)) + norm_beam = np.empty((3,)) + tZ_l = np.empty((3,)) + brMat = np.empty((3,3)) + result = np.empty((len(rSn), 2)) + + _v3_normalized(beam, norm_beam) + _m33_v3_multiply(rD, Z_l, tZ_l) + + for i in xrange(len(rSn)): + _m33_v3_multiply(rSn[i], tC, p3_l) + p3_l += tS + p3_minus_p1_l = tD - p3_l + + num = _v3_dot(tZ_l, p3_minus_p1_l) + _v3_normalized(vG_sn[i], norm_vG_s) + + _m33_v3_multiply(rC, norm_vG_s, tmp_vec) + _m33_v3_multiply(rSn[i], tmp_vec, vG_l) + + bDot = -_v3_dot(norm_beam, vG_l) + + if bDot < ztol or bDot > 1.0 - ztol: + result[i, 0] = np.nan + result[i, 1] = np.nan + continue + + _make_binary_rot_mat(vG_l, brMat) + _m33_v3_multiply(brMat, norm_beam, tD_l) + denom = _v3_dot(tZ_l, tD_l) + + if denom < ztol: + result[i, 0] = np.nan + result[i, 1] = np.nan + continue + + u = num/denom + tmp_res = u*tD_l - p3_minus_p1_l + result[i,0] = _v3_dot(tmp_res, rD[:,0]) + result[i,1] = _v3_dot(tmp_res, rD[:,1]) + + return result + + +@numba.njit +def _quant_and_clip_confidence(coords, angles, image, base, inv_deltas, clip_vals,bshw): + """quantize and clip the parametric coordinates in coords + angles + + coords - (..., 2) array: input 2d parametric coordinates + angles - (...) array: additional dimension for coordinates + base - (3,) array: base value for quantization (for each dimension) + inv_deltas - (3,) array: inverse of the quantum size (for each dimension) + clip_vals - (2,) array: clip size (only applied to coords dimensions) + bshw - (1,) half width of the beam stop in mm + + clipping is performed on ranges [0, clip_vals[0]] for x and + [0, clip_vals[1]] for y + + returns an array with the quantized coordinates, with coordinates + falling outside the clip zone filtered out. + + """ + count = len(coords) + + in_sensor = 0 + matches = 0 + for i in range(count): + xf = coords[i, 0] + yf = coords[i, 1] + + xf = np.floor((xf - base[0]) * inv_deltas[0]) + if not xf >= 0.0: + continue + if not xf < clip_vals[0]: + continue + + if not np.abs(yf)>bshw: + continue + + yf = np.floor((yf - base[1]) * inv_deltas[1]) + + + + if not yf >= 0.0: + continue + if not yf < clip_vals[1]: + continue + + zf = np.floor((angles[i] - base[2]) * inv_deltas[2]) + + in_sensor += 1 + + x, y, z = int(xf), int(yf), int(zf) + + #x_byte = x // 8 + #x_off = 7 - (x % 8) + #if image[z, y, x_byte] (1< 1: + global _multiprocessing_start_method + _multiprocessing_start_method=multiprocessing_start_method + logging.info('Running multiprocess %d processes (%s)', + ncpus, _multiprocessing_start_method) + with grand_loop_pool(ncpus=ncpus, state=(chunk_size, + image_stack, + all_angles, precomp, test_crds, + experiment)) as pool: + for rslice, rvalues in pool.imap_unordered(multiproc_inner_loop, + chunks): + count = rvalues.shape[1] + confidence[:, rslice] = rvalues + finished += count + controller.update(finished) + del _multiprocessing_start_method + + pool.close() + else: + logging.info('Running in a single process') + for chunk_start in chunks: + chunk_stop = min(n_coords, chunk_start+chunk_size) + rslice, rvalues = _grand_loop_inner(image_stack, all_angles, + precomp, test_crds, experiment, + start=chunk_start, + stop=chunk_stop) + count = rvalues.shape[1] + confidence[:, rslice] = rvalues + finished += count + controller.update(finished) + + controller.finish(subprocess) + controller.handle_result("confidence", confidence) + #del _multiprocessing_start_method + + #pool.close() + + return confidence + + +def evaluate_diffraction_angles(experiment, controller=None): + """Uses simulateGVecs to generate the angles used per each grain. + returns a list containg one array per grain. + + experiment -- a bag of experiment values, including the grains specs and other + required parameters. + """ + # extract required data from experiment + exp_maps = experiment.exp_maps + plane_data = experiment.plane_data + detector_params = experiment.detector_params + pixel_size = experiment.pixel_size + ome_range = experiment.ome_range + ome_period = experiment.ome_period + + panel_dims_expanded = [(-10, -10), (10, 10)] + subprocess='evaluate diffraction angles' + pbar = controller.start(subprocess, + len(exp_maps)) + all_angles = [] + ref_gparams = np.array([0., 0., 0., 1., 1., 1., 0., 0., 0.]) + for i, exp_map in enumerate(exp_maps): + gparams = np.hstack([exp_map, ref_gparams]) + sim_results = xrdutil.simulateGVecs(plane_data, + detector_params, + gparams, + panel_dims=panel_dims_expanded, + pixel_pitch=pixel_size, + ome_range=ome_range, + ome_period=ome_period, + distortion=None) + all_angles.append(sim_results[2]) + controller.update(i+1) + pass + controller.finish(subprocess) + + return all_angles + + +def _grand_loop_inner(image_stack, angles, precomp, + coords, experiment, start=0, stop=None): + """Actual simulation code for a chunk of data. It will be used both, + in single processor and multiprocessor cases. Chunking is performed + on the coords. + + image_stack -- the image stack from the sensors + angles -- the angles (grains) to test + coords -- all the coords to test + precomp -- (gvec_cs, rmat_ss) precomputed for each grain + experiment -- bag with experiment parameters + start -- chunk start offset + stop -- chunk end offset + """ + + t = time.time() + n_coords = len(coords) + n_angles = len(angles) + + # experiment geometric layout parameters + rD = experiment.rMat_d + rCn = experiment.rMat_c + tD = experiment.tVec_d[:,0] + tS = experiment.tVec_s[:,0] + + # experiment panel related configuration + base = experiment.base + inv_deltas = experiment.inv_deltas + clip_vals = experiment.clip_vals + distortion = experiment.distortion + bshw=experiment.bsw/2. + + _to_detector = xfcapi.gvecToDetectorXYArray + #_to_detector = _gvec_to_detector_array + stop = min(stop, n_coords) if stop is not None else n_coords + + distortion_fn = None + if distortion is not None and len(distortion > 0): + distortion_fn, distortion_args = distortion + + acc_detector = 0.0 + acc_distortion = 0.0 + acc_quant_clip = 0.0 + confidence = np.zeros((n_angles, stop-start)) + grains = 0 + crds = 0 + + if distortion_fn is None: + for igrn in xrange(n_angles): + angs = angles[igrn]; rC = rCn[igrn] + gvec_cs, rMat_ss = precomp[igrn] + grains += 1 + for icrd in xrange(start, stop): + t0 = time.time() + det_xy = _to_detector(gvec_cs, rD, rMat_ss, rC, tD, tS, coords[icrd]) + t1 = time.time() + c = _quant_and_clip_confidence(det_xy, angs[:,2], image_stack, + base, inv_deltas, clip_vals,bshw) + t2 = time.time() + acc_detector += t1 - t0 + acc_quant_clip += t2 - t1 + crds += 1 + confidence[igrn, icrd - start] = c + else: + for igrn in xrange(n_angles): + angs = angles[igrn]; rC = rCn[igrn] + gvec_cs, rMat_ss = precomp[igrn] + grains += 1 + for icrd in xrange(start, stop): + t0 = time.time() + tmp_xys = _to_detector(gvec_cs, rD, rMat_ss, rC, tD, tS, coords[icrd]) + t1 = time.time() + det_xy = distortion_fn(tmp_xys, distortion_args, invert=True) + t2 = time.time() + c = _quant_and_clip_confidence(det_xy, angs[:,2], image_stack, + base, inv_deltas, clip_vals,bshw) + t3 = time.time() + acc_detector += t1 - t0 + acc_distortion += t2 - t1 + acc_quant_clip += t3 - t2 + crds += 1 + confidence[igrn, icrd - start] = c + + t = time.time() - t + return slice(start, stop), confidence + + +def multiproc_inner_loop(chunk): + """function to use in multiprocessing that computes the simulation over the + task's alloted chunk of data""" + + chunk_size = _mp_state[0] + n_coords = len(_mp_state[4]) + chunk_stop = min(n_coords, chunk+chunk_size) + return _grand_loop_inner(*_mp_state[1:], start=chunk, stop=chunk_stop) + + +def worker_init(id_state, id_exp): + """process initialization function. This function is only used when the + child processes are spawned (instead of forked). When using the fork model + of multiprocessing the data is just inherited in process memory.""" + import joblib + + global _mp_state + state = joblib.load(id_state) + experiment = joblib.load(id_exp) + _mp_state = state + (experiment,) + +@contextlib.contextmanager +def grand_loop_pool(ncpus, state): + """function that handles the initialization of multiprocessing. It handles + properly the use of spawned vs forked multiprocessing. The multiprocessing + can be either 'fork' or 'spawn', with 'spawn' being required in non-fork + platforms (like Windows) and 'fork' being preferred on fork platforms due + to its efficiency. + """ + # state = ( chunk_size, + # image_stack, + # angles, + # precomp, + # coords, + # experiment ) + global _multiprocessing_start_method + if _multiprocessing_start_method == 'fork': + # Use FORK multiprocessing. + + # All read-only data can be inherited in the process. So we "pass" it as + # a global that the child process will be able to see. At the end of the + # processing the global is removed. + global _mp_state + _mp_state = state + pool = multiprocessing.Pool(ncpus) + yield pool + del (_mp_state) + else: + # Use SPAWN multiprocessing. + + # As we can not inherit process data, all the required data is + # serialized into a temporary directory using joblib. The + # multiprocessing pool will have the "worker_init" as initialization + # function that takes the key for the serialized data, which will be + # used to load the parameter memory into the spawn process (also using + # joblib). In theory, joblib uses memmap for arrays if they are not + # compressed, so no compression is used for the bigger arrays. + import joblib + tmp_dir = tempfile.mkdtemp(suffix='-nf-grand-loop') + try: + # dumb dumping doesn't seem to work very well.. do something ad-hoc + logging.info('Using "%s" as temporary directory.', tmp_dir) + + id_exp = joblib.dump(state[-1], + os.path.join(tmp_dir, + 'grand-loop-experiment.gz'), + compress=True) + id_state = joblib.dump(state[:-1], + os.path.join(tmp_dir, 'grand-loop-data')) + pool = multiprocessing.Pool(ncpus, worker_init, + (id_state[0], id_exp[0])) + yield pool + finally: + logging.info('Deleting "%s".', tmp_dir) + shutil.rmtree(tmp_dir) + + + + + +#%% Loading Utilities + + +def gen_trial_exp_data(grain_out_file,det_file,mat_file, x_ray_energy, mat_name, max_tth, comp_thresh, chi2_thresh, misorientation_bnd, \ + misorientation_spacing,ome_range_deg, nframes, beam_stop_width): + + print('Loading Grain Data.....') + #gen_grain_data + ff_data=np.loadtxt(grain_out_file) + + #ff_data=np.atleast_2d(ff_data[2,:]) + + exp_maps=ff_data[:,3:6] + t_vec_ds=ff_data[:,6:9] + + + # + completeness=ff_data[:,1] + + chi2=ff_data[:,2] + + n_grains=exp_maps.shape[0] + + rMat_c = rot.rotMatOfExpMap(exp_maps.T) + + + + + cut=np.where(np.logical_and(completeness>comp_thresh,chi20.: + mat_used.planeData.tThMax = np.amax(np.radians(max_tth)) + else: + mat_used.planeData.tThMax = np.amax(pixel_tth) + + pd=mat_used.planeData + + + print('Final Assembly.....') + experiment = argparse.Namespace() + # grains related information + experiment.n_grains = n_grains # this can be derived from other values... + experiment.rMat_c = rMat_c # n_grains rotation matrices (one per grain) + experiment.exp_maps = exp_maps # n_grains exp_maps -angle * rotation axis- (one per grain) + + experiment.plane_data = pd + experiment.detector_params = detector_params + experiment.pixel_size = pixel_size + experiment.ome_range = ome_range + experiment.ome_period = ome_period + experiment.x_col_edges = x_col_edges + experiment.y_row_edges = y_row_edges + experiment.ome_edges = ome_edges + experiment.ncols = ncols + experiment.nrows = nrows + experiment.nframes = nframes# used only in simulate... + experiment.rMat_d = rMat_d + experiment.tVec_d = np.atleast_2d(detector_params[3:6]).T + experiment.chi = detector_params[6] # note this is used to compute S... why is it needed? + experiment.tVec_s = np.atleast_2d(detector_params[7:]).T + experiment.rMat_c = rMat_c + experiment.distortion = None + experiment.panel_dims = panel_dims # used only in simulate... + experiment.base = base + experiment.inv_deltas = inv_deltas + experiment.clip_vals = clip_vals + experiment.bsw = beam_stop_width + + if mis_steps ==0: + nf_to_ff_id_map = cut + else: + nf_to_ff_id_map=np.tile(cut,27*mis_steps) + + return experiment, nf_to_ff_id_map + +#%% + + +def gen_nf_test_grid_tomo(x_dim_pnts, z_dim_pnts, v_bnds, voxel_spacing): + + if v_bnds[0]==v_bnds[1]: + Xs,Ys,Zs=np.meshgrid(np.arange(x_dim_pnts),v_bnds[0],np.arange(z_dim_pnts)) + else: + Xs,Ys,Zs=np.meshgrid(np.arange(x_dim_pnts),np.arange(v_bnds[0]+voxel_spacing/2.,v_bnds[1],voxel_spacing),np.arange(z_dim_pnts)) + #note numpy shaping of arrays is goofy, returns(length(y),length(x),length(z)) + + + Zs=(Zs-(z_dim_pnts/2))*voxel_spacing + Xs=(Xs-(x_dim_pnts/2))*voxel_spacing + + + test_crds = np.vstack([Xs.flatten(), Ys.flatten(), Zs.flatten()]).T + n_crds = len(test_crds) + + return test_crds, n_crds, Xs, Ys, Zs + + +#%% + +def gen_nf_dark(data_folder,img_nums,num_for_dark,nrows,ncols,dark_type='median',stem='nf_',num_digits=5,ext='.tif'): + + dark_stack=np.zeros([num_for_dark,nrows,ncols]) + + print('Loading data for dark generation...') + for ii in np.arange(num_for_dark): + print('Image #: ' + str(ii)) + dark_stack[ii,:,:]=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext) + #image_stack[ii,:,:]=np.flipud(tmp_img>threshold) + + if dark_type=='median': + print('making median...') + dark=np.median(dark_stack,axis=0) + elif dark_type=='min': + print('making min...') + dark=np.min(dark_stack,axis=0) + + return dark + + +#%% +def gen_nf_image_stack(data_folder,img_nums,dark,num_erosions,num_dilations,ome_dilation_iter,threshold,nrows,ncols,stem='nf_',num_digits=5,ext='.tif'): + + + image_stack=np.zeros([img_nums.shape[0],nrows,ncols],dtype=bool) + + print('Loading and Cleaning Images...') + for ii in np.arange(img_nums.shape[0]): + print('Image #: ' + str(ii)) + tmp_img=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext)-dark + #image procesing + image_stack[ii,:,:]=img.morphology.binary_erosion(tmp_img>threshold,iterations=num_erosions) + image_stack[ii,:,:]=img.morphology.binary_dilation(image_stack[ii,:,:],iterations=num_dilations) + + #%A final dilation that includes omega + print('Final Dilation Including Omega....') + image_stack=img.morphology.binary_dilation(image_stack,iterations=ome_dilation_iter) + + return image_stack + + +#%% +def scan_detector_parm(image_stack, experiment,test_crds,controller,parm_to_opt,parm_vector,slice_shape): + #0-distance + #1-x center + #2-xtilt + #3-ytilt + #4-ztilt + + multiprocessing_start_method = 'fork' if hasattr(os, 'fork') else 'spawn' + + #current detector parameters, note the value for the actively optimized parameters will be ignored + distance=experiment.detector_params[5]#mm + x_cen=experiment.detector_params[3]#mm + xtilt=experiment.detector_params[0] + ytilt=experiment.detector_params[1] + ztilt=experiment.detector_params[2] + + num_parm_pts=len(parm_vector) + + trial_data=np.zeros([num_parm_pts,slice_shape[0],slice_shape[1]]) + + tmp_td=copy.copy(experiment.tVec_d) + for jj in np.arange(num_parm_pts): + print('cycle %d of %d'%(jj+1,num_parm_pts)) + + + if parm_to_opt==0: + tmp_td[2]=parm_vector[jj] + else: + tmp_td[2]=distance + + if parm_to_opt==1: + tmp_td[0]=parm_vector[jj] + else: + tmp_td[0]=x_cen + + if parm_to_opt==2: + rMat_d_tmp=makeDetectorRotMat([parm_vector[jj],ytilt,ztilt]) + elif parm_to_opt==3: + rMat_d_tmp=makeDetectorRotMat([xtilt,parm_vector[jj],ztilt]) + elif parm_to_opt==4: + rMat_d_tmp=makeDetectorRotMat([xtilt,ytilt,parm_vector[jj]]) + else: + rMat_d_tmp=makeDetectorRotMat([xtilt,ytilt,ztilt]) + + experiment.rMat_d = rMat_d_tmp + experiment.tVec_d = tmp_td + + + + conf=test_orientations(image_stack, experiment, test_crds, + controller,multiprocessing_start_method) + + + trial_data[jj]=np.max(conf,axis=0).reshape(slice_shape) + + return trial_data + +#%% + +def extract_max_grain_map(confidence,grid_shape,binary_recon_bin=None): + if binary_recon_bin == None: + binary_recon_bin=np.ones([grid_shape[1],grid_shape[2]]) + + + conf_squeeze=np.max(confidence,axis=0).reshape(grid_shape) + grains=np.argmax(confidence,axis=0).reshape(grid_shape) + out_bounds=np.where(binary_recon_bin==0) + conf_squeeze[:,out_bounds[0],out_bounds[1]] =-0.001 + + return conf_squeeze,grains +#%% + +def process_raw_confidence(raw_confidence,vol_shape,tomo_mask=None,id_remap=None): + + print('Compiling Confidence Map...') + confidence_map=np.max(raw_confidence,axis=0).reshape(vol_shape) + grain_map=np.argmax(raw_confidence,axis=0).reshape(vol_shape) + + + if tomo_mask is not None: + print('Applying tomography mask...') + out_bounds=np.where(tomo_mask==0) + confidence_map[:,out_bounds[0],out_bounds[1]] =-0.001 + grain_map[:,out_bounds[0],out_bounds[1]] =-1 + + + if id_remap is not None: + max_grain_no=np.max(grain_map) + grain_map_copy=copy.copy(grain_map) + print('Remapping grain ids to ff...') + for ii in np.arange(max_grain_no): + this_grain=np.where(grain_map==ii) + grain_map_copy[this_grain]=id_remap[ii] + grain_map=grain_map_copy + + return grain_map, confidence_map + +#%% + +def save_raw_confidence(save_dir,save_stem,raw_confidence,id_remap=None): + print('Saving raw confidence, might take a while...') + if id_remap is not None: + np.savez(save_dir+save_stem+'_raw_confidence.npz',raw_confidence=raw_confidence,id_remap=id_remap) + else: + np.savez(save_dir+save_stem+'_raw_confidence.npz',raw_confidence=raw_confidence) +#%% + +def save_nf_data(save_dir,save_stem,grain_map,confidence_map,Xs,Ys,Zs,ori_list,id_remap=None): + print('Saving grain map data...') + if id_remap is not None: + np.savez(save_dir+save_stem+'_grain_map_data.npz',grain_map=grain_map,confidence_map=confidence_map,Xs=Xs,Ys=Ys,Zs=Zs,ori_list=ori_list,id_remap=id_remap) + else: + np.savez(save_dir+save_stem+'_grain_map_data.npz',grain_map=grain_map,confidence_map=confidence_map,Xs=Xs,Ys=Ys,Zs=Zs,ori_list=ori_list) + +#%% + +def plot_ori_map(grain_map, confidence_map, exp_maps, layer_no,id_remap=None): + + grains_plot=np.squeeze(grain_map[layer_no,:,:]) + conf_plot=np.squeeze(confidence_map[layer_no,:,:]) + n_grains=len(exp_maps) + + rgb_image=np.zeros([grains_plot.shape[0],grains_plot.shape[1],4], dtype='float32') + rgb_image[:,:,3]=1. + + for ii in np.arange(n_grains): + if id_remap is not None: + this_grain=np.where(np.squeeze(grains_plot)==id_remap[ii]) + else: + this_grain=np.where(np.squeeze(grains_plot)==ii) + if np.sum(this_grain[0])>0: + + ori=exp_maps[ii,:] + + #cubic mapping + rgb_image[this_grain[0],this_grain[1],0]=(ori[0]+(np.pi/4.))/(np.pi/2.) + rgb_image[this_grain[0],this_grain[1],1]=(ori[1]+(np.pi/4.))/(np.pi/2.) + rgb_image[this_grain[0],this_grain[1],2]=(ori[2]+(np.pi/4.))/(np.pi/2.) + + + + plt.imshow(rgb_image,interpolation='none') + plt.hold(True) + plt.imshow(conf_plot,vmin=0.0,vmax=1.,interpolation='none',cmap=plt.cm.gray,alpha=0.5) diff --git a/hexrd/grainmap/tomoutil.py b/hexrd/grainmap/tomoutil.py new file mode 100644 index 00000000..e068e212 --- /dev/null +++ b/hexrd/grainmap/tomoutil.py @@ -0,0 +1,176 @@ +#%% + +import numpy as np +import scipy as sp + +import scipy.ndimage as img +try: + import imageio as imgio +except(ImportError): + from skimage import io as imgio +import skimage.transform as xformimg + + +from skimage.transform import iradon, radon, rescale + + + +#%% + + +def gen_bright_field(tbf_data_folder,tbf_img_start,tbf_num_imgs,nrows,ncols,stem='nf_',num_digits=5,ext='.tif'): + + + tbf_img_nums=np.arange(tbf_img_start,tbf_img_start+tbf_num_imgs,1) + + + tbf_stack=np.zeros([tbf_num_imgs,nrows,ncols]) + + print('Loading data for median bright field...') + for ii in np.arange(tbf_num_imgs): + print('Image #: ' + str(ii)) + tbf_stack[ii,:,:]=imgio.imread(tbf_data_folder+'%s'%(stem)+str(tbf_img_nums[ii]).zfill(num_digits)+ext) + #image_stack[ii,:,:]=np.flipud(tmp_img>threshold) + print('making median...') + + tbf=np.median(tbf_stack,axis=0) + + return tbf + + + +def gen_median_image(data_folder,img_start,num_imgs,nrows,ncols,stem='nf_',num_digits=5,ext='.tif'): + + + img_nums=np.arange(img_start,img_start+num_imgs,1) + + + stack=np.zeros([num_imgs,nrows,ncols]) + + print('Loading data for median image...') + for ii in np.arange(num_imgs): + print('Image #: ' + str(ii)) + stack[ii,:,:]=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext) + #image_stack[ii,:,:]=np.flipud(tmp_img>threshold) + print('making median...') + + med=np.median(stack,axis=0) + + return med + +def gen_attenuation_rads(tomo_data_folder,tbf,tomo_img_start,tomo_num_imgs,nrows,ncols,stem='nf_',num_digits=5,ext='.tif',tdf=None): + + + + #Reconstructs a single tompgrahy layer to find the extent of the sample + tomo_img_nums=np.arange(tomo_img_start,tomo_img_start+tomo_num_imgs,1) + + #if tdf==None: + if len(tdf) == None: + tdf=np.zeros([nrows,ncols]) + + rad_stack=np.zeros([tomo_num_imgs,nrows,ncols]) + + print('Loading and Calculating Absorption Radiographs ...') + for ii in np.arange(tomo_num_imgs): + print('Image #: ' + str(ii)) + tmp_img=imgio.imread(tomo_data_folder+'%s'%(stem)+str(tomo_img_nums[ii]).zfill(num_digits)+ext) + + rad_stack[ii,:,:]=-np.log((tmp_img.astype(float)-tdf)/(tbf.astype(float)-tdf)) + + return rad_stack + + +def tomo_reconstruct_layer(rad_stack,cross_sectional_dim,layer_row=1024,start_tomo_ang=0., end_tomo_ang=360.,tomo_num_imgs=360, center=0.,pixel_size=0.00148): + sinogram=np.squeeze(rad_stack[:,layer_row,:]) + + rotation_axis_pos=-int(np.round(center/pixel_size)) + #rotation_axis_pos=13 + + theta = np.linspace(start_tomo_ang, end_tomo_ang, tomo_num_imgs, endpoint=False) + + max_rad=int(cross_sectional_dim/pixel_size/2.*1.1) #10% slack to avoid edge effects + + if rotation_axis_pos>=0: + sinogram_cut=sinogram[:,2*rotation_axis_pos:] + else: + sinogram_cut=sinogram[:,:(2*rotation_axis_pos)] + + dist_from_edge=np.round(sinogram_cut.shape[1]/2.).astype(int)-max_rad + + sinogram_cut=sinogram_cut[:,dist_from_edge:-dist_from_edge] + + print('Inverting Sinogram....') + reconstruction_fbp = iradon(sinogram_cut.T, theta=theta, circle=True) + + reconstruction_fbp=np.rot90(reconstruction_fbp,3)#Rotation to get the result consistent with hexrd, needs to be checked + + return reconstruction_fbp + + +def threshold_and_clean_tomo_layer(reconstruction_fbp,recon_thresh, noise_obj_size,min_hole_size,edge_cleaning_iter=None): + binary_recon=reconstruction_fbp>recon_thresh + + #hard codeed cleaning, grinding sausage... + binary_recon=img.morphology.binary_erosion(binary_recon,iterations=1) + binary_recon=img.morphology.binary_dilation(binary_recon,iterations=4) + + + labeled_img,num_labels=img.label(binary_recon) + + print('Cleaning...') + print('Removing Noise...') + for ii in np.arange(1,num_labels): + obj1=np.where(labeled_img==ii) + if obj1[0].shape[0]=1 and obj1[0].shape[0] radius*radius + + binary_recon_bin[mask]=0 + + return binary_recon_bin + + diff --git a/hexrd/grainmap/vtkutil.py b/hexrd/grainmap/vtkutil.py new file mode 100644 index 00000000..3af28e40 --- /dev/null +++ b/hexrd/grainmap/vtkutil.py @@ -0,0 +1,126 @@ +import numpy as np + +import os + + + +#%% + + +def output_grain_map_vtk(data_location,data_stems,output_stem,vol_spacing,top_down=True): + + + + num_scans=len(data_stems) + + confidence_maps=[None]*num_scans + grain_maps=[None]*num_scans + Xss=[None]*num_scans + Yss=[None]*num_scans + Zss=[None]*num_scans + + + for ii in np.arange(num_scans): + print('Loading Volume %d ....'%(ii)) + conf_data=np.load(os.path.join(data_location,data_stems[ii]+'_grain_map_data.npz')) + + confidence_maps[ii]=conf_data['confidence_map'] + grain_maps[ii]=conf_data['grain_map'] + Xss[ii]=conf_data['Xs'] + Yss[ii]=conf_data['Ys'] + Zss[ii]=conf_data['Zs'] + + #assumes all volumes to be the same size + num_layers=grain_maps[0].shape[0] + + total_layers=num_layers*num_scans + + num_rows=grain_maps[0].shape[1] + num_cols=grain_maps[0].shape[2] + + grain_map_stitched=np.zeros((total_layers,num_rows,num_cols)) + confidence_stitched=np.zeros((total_layers,num_rows,num_cols)) + Xs_stitched=np.zeros((total_layers,num_rows,num_cols)) + Ys_stitched=np.zeros((total_layers,num_rows,num_cols)) + Zs_stitched=np.zeros((total_layers,num_rows,num_cols)) + + + for i in np.arange(num_scans): + if top_down==True: + grain_map_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=grain_maps[num_scans-1-i] + confidence_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=confidence_maps[num_scans-1-i] + Xs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Xss[num_scans-1-i] + Zs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Zss[num_scans-1-i] + Ys_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Yss[num_scans-1-i]+vol_spacing*i + else: + + grain_map_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=grain_maps[i] + confidence_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=confidence_maps[i] + Xs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Xss[i] + Zs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Zss[i] + Ys_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Yss[i]+vol_spacing*i + + + + + print('Writing VTK data...') + # VTK Dump + Xslist=Xs_stitched[:,:,:].ravel() + Yslist=Ys_stitched[:,:,:].ravel() + Zslist=Zs_stitched[:,:,:].ravel() + + grainlist=grain_map_stitched[:,:,:].ravel() + conflist=confidence_stitched[:,:,:].ravel() + + num_pts=Xslist.shape[0] + num_cells=(total_layers-1)*(num_rows-1)*(num_cols-1) + + f = open(os.path.join(data_location, output_stem +'_stitch.vtk'), 'w') + + + f.write('# vtk DataFile Version 3.0\n') + f.write('grainmap Data\n') + f.write('ASCII\n') + f.write('DATASET UNSTRUCTURED_GRID\n') + f.write('POINTS %d double\n' % (num_pts)) + + for i in np.arange(num_pts): + f.write('%e %e %e \n' %(Xslist[i],Yslist[i],Zslist[i])) + + scale2=num_cols*num_rows + scale1=num_cols + + f.write('CELLS %d %d\n' % (num_cells, 9*num_cells)) + for k in np.arange(Xs_stitched.shape[0]-1): + for j in np.arange(Xs_stitched.shape[1]-1): + for i in np.arange(Xs_stitched.shape[2]-1): + base=scale2*k+scale1*j+i + p1=base + p2=base+1 + p3=base+1+scale1 + p4=base+scale1 + p5=base+scale2 + p6=base+scale2+1 + p7=base+scale2+scale1+1 + p8=base+scale2+scale1 + + f.write('8 %d %d %d %d %d %d %d %d \n' %(p1,p2,p3,p4,p5,p6,p7,p8)) + + + f.write('CELL_TYPES %d \n' % (num_cells)) + for i in np.arange(num_cells): + f.write('12 \n') + + f.write('POINT_DATA %d \n' % (num_pts)) + f.write('SCALARS grain_id int \n') + f.write('LOOKUP_TABLE default \n') + for i in np.arange(num_pts): + f.write('%d \n' %(grainlist[i])) + + f.write('FIELD FieldData 1 \n' ) + f.write('confidence 1 %d float \n' % (num_pts)) + for i in np.arange(num_pts): + f.write('%e \n' %(conflist[i])) + + + f.close() \ No newline at end of file diff --git a/hexrd/gridutil.py b/hexrd/gridutil.py index 5628e26e..7ce487e9 100644 --- a/hexrd/gridutil.py +++ b/hexrd/gridutil.py @@ -5,6 +5,7 @@ from numpy import sum as asum from numpy.linalg import det import numpy as np +from hexrd.constants import sqrt_epsf from hexrd import USE_NUMBA if USE_NUMBA: import numba @@ -38,15 +39,12 @@ def cellIndices(edges, points_1d): must be mapped to the same branch cut, and abs(edges[0] - edges[-1]) = 2*pi """ - ztol = 1e-12 + ztol = sqrt_epsf assert len(edges) >= 2, "must have at least 2 edges" - points_1d = r_[points_1d].flatten() - delta = float(edges[1] - edges[0]) - - on_last_rhs = points_1d >= edges[-1] - ztol - points_1d[on_last_rhs] = points_1d[on_last_rhs] - ztol + points_1d = np.r_[points_1d].flatten() + delta = float(edges[1] - edges[0]) if delta > 0: on_last_rhs = points_1d >= edges[-1] - ztol @@ -58,7 +56,7 @@ def cellIndices(edges, points_1d): idx = ceil( (points_1d - edges[0]) / delta ) - 1 else: raise RuntimeError, "edges array gives delta of 0" - # ...will catch exceptions elsewhere... + # ...will catch exceptions elsewhere... # if np.any(np.logical_or(idx < 0, idx > len(edges) - 1)): # raise RuntimeWarning, "some input points are outside the grid" return array(idx, dtype=int) @@ -134,7 +132,7 @@ def compute_areas(xy_eval_vtx, conn): v1x = vtx_x - vtx0x v1y = vtx_y - vtx0y acc += v0x*v1y - v1x*v0y - + areas[i] = 0.5 * acc return areas else: @@ -159,7 +157,7 @@ def compute_areas(xy_eval_vtx, conn): for i in range(len(conn)): polygon = [[xy_eval_vtx[conn[i, j], 0], xy_eval_vtx[conn[i, j], 1]] for j in range(4)] - areas[i] = gutil.computeArea(polygon) + areas[i] = computeArea(polygon) return areas def computeArea(polygon): @@ -168,16 +166,31 @@ def computeArea(polygon): """ n_vertices = len(polygon) polygon = array(polygon) - + triv = array([ [ [0, i-1], [0, i] ] for i in range(2, n_vertices) ]) - + area = 0 for i in range(len(triv)): - tvp = diff( hstack([ polygon[triv[i][0], :], + tvp = diff( hstack([ polygon[triv[i][0], :], polygon[triv[i][1], :] ]), axis=0).flatten() area += 0.5 * cross(tvp[:2], tvp[2:]) return area +def make_tolerance_grid(bin_width, window_width, num_subdivisions, + adjust_window=False, one_sided=False): + if bin_width > window_width: + bin_width = window_width + if adjust_window: + window_width = np.ceil(window_width/bin_width)*bin_width + if one_sided: + ndiv = abs(int(window_width/bin_width)) + grid = (np.arange(0, 2*ndiv+1) - ndiv)*bin_width + ndiv = 2*ndiv + else: + ndiv = int(num_subdivisions*np.ceil(window_width/float(bin_width))) + grid = np.arange(0, ndiv+1)*window_width/float(ndiv) - 0.5*window_width + return ndiv, grid + def computeIntersection(line1, line2): """ compute intersection of two-dimensional line intersection @@ -187,14 +200,14 @@ def computeIntersection(line1, line2): line1 = [ [x0, y0], [x1, y1] ] line1 = [ [x3, y3], [x4, y4] ] - + """ intersection = zeros(2) l1 = array(line1) l2 = array(line2) - + det_l1 = det(l1) det_l2 = det(l2) @@ -219,7 +232,7 @@ def isinside(point, boundary, ccw=True): """ pointPositionVector = hstack([ point - boundary[0, :], 0.]) boundaryVector = hstack([boundary[1, :] - boundary[0, :], 0.]) - + crossVector = cross(pointPositionVector, boundaryVector) inside = False @@ -231,7 +244,7 @@ def isinside(point, boundary, ccw=True): inside = True else: inside = True - + return inside def sutherlandHodgman(subjectPolygon, clipPolygon): @@ -239,30 +252,30 @@ def sutherlandHodgman(subjectPolygon, clipPolygon): """ subjectPolygon = array(subjectPolygon) clipPolygon = array(clipPolygon) - + numClipEdges = len(clipPolygon) prev_clipVertex = clipPolygon[-1, :] - + # loop over clipping edges outputList = array(subjectPolygon) for iClip in range(numClipEdges): - + curr_clipVertex = clipPolygon[iClip, :] - clipBoundary = vstack([ curr_clipVertex, + clipBoundary = vstack([ curr_clipVertex, prev_clipVertex ]) - + inputList = array(outputList) if len(inputList) > 0: - prev_subjectVertex = inputList[-1, :] + prev_subjectVertex = inputList[-1, :] outputList = [] - + for iInput in range(len(inputList)): curr_subjectVertex = inputList[iInput, :] - + if isinside(curr_subjectVertex, clipBoundary): if not isinside(prev_subjectVertex, clipBoundary): subjectLineSegment = vstack([ curr_subjectVertex, @@ -277,7 +290,7 @@ def sutherlandHodgman(subjectPolygon, clipPolygon): pass prev_subjectVertex = curr_subjectVertex prev_clipVertex = curr_clipVertex - pass + pass pass return outputList diff --git a/hexrd/imageseries/__init__.py b/hexrd/imageseries/__init__.py new file mode 100644 index 00000000..d8cbbdf4 --- /dev/null +++ b/hexrd/imageseries/__init__.py @@ -0,0 +1,21 @@ +"""Handles series of images + +This file contains the generic ImageSeries class +and a function for loading. Adapters for particular +data formats are managed in the "load" subpackage. +""" +from .baseclass import ImageSeries +from . import imageseriesabc +from . import load +from . import save +from . import stats +from . import process +from . import omega + +def open(filename, format=None, **kwargs): + # find the appropriate adapter based on format specified + reg = load.Registry.adapter_registry + adapter = reg[format](filename, **kwargs) + return ImageSeries(adapter) + +write = save.write diff --git a/hexrd/imageseries/baseclass.py b/hexrd/imageseries/baseclass.py new file mode 100644 index 00000000..a99e0e49 --- /dev/null +++ b/hexrd/imageseries/baseclass.py @@ -0,0 +1,43 @@ +"""Base class for imageseries +""" +from .imageseriesabc import ImageSeriesABC + +class ImageSeries(ImageSeriesABC): + """collection of images + + Basic sequence class with additional properties for image shape and + metadata (possibly None). + """ + + def __init__(self, adapter): + """Build FrameSeries from adapter instance + + *adapter* - object instance based on abstract Sequence class with + properties for image shape, data type and metadata. + """ + self._adapter = adapter + + return + + def __getitem__(self, key): + return self._adapter[key] + + def __len__(self): + return len(self._adapter) + + def __iter__(self): + return self._adapter.__iter__() + + @property + def dtype(self): + return self._adapter.dtype + + @property + def shape(self): + return self._adapter.shape + + @property + def metadata(self): + return self._adapter.metadata + + pass # end class diff --git a/hexrd/imageseries/imageseriesabc.py b/hexrd/imageseries/imageseriesabc.py new file mode 100644 index 00000000..504ad45d --- /dev/null +++ b/hexrd/imageseries/imageseriesabc.py @@ -0,0 +1,5 @@ +"""Abstract Base Class""" +import collections + +class ImageSeriesABC(collections.Sequence): + pass diff --git a/hexrd/imageseries/imageseriesiter.py b/hexrd/imageseries/imageseriesiter.py new file mode 100644 index 00000000..a4e7dea3 --- /dev/null +++ b/hexrd/imageseries/imageseriesiter.py @@ -0,0 +1,23 @@ +"""imageseries iterator + +For use by adapter classes. +""" +import collections + +class ImageSeriesIterator(collections.Iterator): + + def __init__(self, iterable): + self._iterable = iterable + self._remaining = range(len(iterable)) + + def __iter__(self): + return self + + def __next__(self): + try: + return self._iterable[self._remaining.pop(0)] + except IndexError: + raise StopIteration + + def next(self): + return self.__next__() diff --git a/hexrd/imageseries/load/__init__.py b/hexrd/imageseries/load/__init__.py new file mode 100644 index 00000000..bcbae650 --- /dev/null +++ b/hexrd/imageseries/load/__init__.py @@ -0,0 +1,28 @@ +import abc +import pkgutil + +from ..imageseriesabc import ImageSeriesABC +from .registry import Registry + +# Metaclass for adapter registry + +class _RegisterAdapterClass(abc.ABCMeta): + + def __init__(cls, name, bases, attrs): + abc.ABCMeta.__init__(cls, name, bases, attrs) + Registry.register(cls) + +class ImageSeriesAdapter(ImageSeriesABC): + + __metaclass__ = _RegisterAdapterClass + + format = None + +# import all adapter modules + +for loader, name, ispkg in pkgutil.iter_modules(__path__): + if name is not 'registry': + __import__(name, globals=globals()) + # + # couldn't get the following line to work due to relative import issue: + # loader.find_module(name).load_module(name) diff --git a/hexrd/imageseries/load/array.py b/hexrd/imageseries/load/array.py new file mode 100644 index 00000000..76074aed --- /dev/null +++ b/hexrd/imageseries/load/array.py @@ -0,0 +1,64 @@ +"""Adapter class for numpy array (3D) +""" +from . import ImageSeriesAdapter +from ..imageseriesiter import ImageSeriesIterator + +import numpy as np + + +class ArrayImageSeriesAdapter(ImageSeriesAdapter): + """collection of images in numpy array""" + + format = 'array' + + def __init__(self, fname, **kwargs): + """Constructor for frame cache image series + + *fname* - should be None + *kwargs* - keyword arguments + . 'data' = a 3D array (double/float) + . 'metadata' = a dictionary + """ + data_arr = np.array(kwargs['data']) + if data_arr.ndim < 3: + self._data = np.tile(data_arr, (1, 1, 1)) + elif data_arr.ndim == 3: + self._data = data_arr + else: + raise RuntimeError( + 'input array must be 2-d or 3-d; you provided ndim=%d' + % data_arr.ndim + ) + + self._meta = kwargs.pop('meta', dict()) + self._shape = self._data.shape + self._nframes = self._shape[0] + self._nxny = self._shape[1:3] + + @property + def metadata(self): + """(read-only) Image sequence metadata + + Currently returns none + """ + return self._meta + + @property + def shape(self): + return self._nxny + + @property + def dtype(self): + return self._data.dtype + + def __getitem__(self, key): + return self._data[key] + + def __iter__(self): + return ImageSeriesIterator(self) + + # @memoize + def __len__(self): + return self._nframes + + pass # end class diff --git a/hexrd/imageseries/load/framecache.py b/hexrd/imageseries/load/framecache.py new file mode 100644 index 00000000..bc525646 --- /dev/null +++ b/hexrd/imageseries/load/framecache.py @@ -0,0 +1,130 @@ +"""Adapter class for frame caches +""" +import os + +import numpy as np +from scipy.sparse import csr_matrix +import yaml + +from . import ImageSeriesAdapter +from ..imageseriesiter import ImageSeriesIterator +from .metadata import yamlmeta + +class FrameCacheImageSeriesAdapter(ImageSeriesAdapter): + """collection of images in HDF5 format""" + + format = 'frame-cache' + + def __init__(self, fname, style='npz', **kwargs): + """Constructor for frame cache image series + + *fname* - filename of the yml file + *kwargs* - keyword arguments (none required) + """ + self._fname = fname + if style.lower() in ('yml', 'yaml', 'test'): + self._load_yml() + self._load_cache(from_yml=True) + else: + self._load_cache() + + def _load_yml(self): + with open(self._fname, "r") as f: + d = yaml.load(f) + datad = d['data'] + self._cache = datad['file'] + self._nframes = datad['nframes'] + self._shape = tuple(datad['shape']) + self._dtype = np.dtype(datad['dtype']) + self._meta = yamlmeta(d['meta'], path=self._cache) + + def _load_cache(self, from_yml=False): + """load into list of csr sparse matrices""" + self._framelist = [] + if from_yml: + bpath = os.path.dirname(self._fname) + if os.path.isabs(self._cache): + cachepath = self._cache + else: + cachepath = os.path.join(bpath, self._cache) + arrs = np.load(cachepath) + + for i in range(self._nframes): + row = arrs["%d_row" % i] + col = arrs["%d_col" % i] + data = arrs["%d_data" % i] + frame = csr_matrix((data, (row, col)), + shape=self._shape, dtype=self._dtype) + self._framelist.append(frame) + else: + arrs = np.load(self._fname) + # HACK: while the loaded npz file has a getitem method + # that mimicks a dict, it doesn't have a "pop" method. + # must make an empty dict to pop after assignment of + # class attributes so we can get to the metadata + keysd = dict.fromkeys(arrs.keys()) + self._nframes = int(arrs['nframes']) + self._shape = tuple(arrs['shape']) + self._dtype = np.dtype(str(arrs['dtype'])) + keysd.pop('nframes') + keysd.pop('shape') + keysd.pop('dtype') + for i in range(self._nframes): + row = arrs["%d_row" % i] + col = arrs["%d_col" % i] + data = arrs["%d_data" % i] + keysd.pop("%d_row" % i) + keysd.pop("%d_col" % i) + keysd.pop("%d_data" % i) + frame = csr_matrix((data, (row, col)), + shape=self._shape, + dtype=self._dtype) + self._framelist.append(frame) + # all rmaining keys should be metadata + for key in keysd: + keysd[key] = arrs[key] + self._meta = keysd + + + @property + def metadata(self): + """(read-only) Image sequence metadata + """ + return self._meta + + def load_metadata(self, indict): + """(read-only) Image sequence metadata + + Currently returns none + """ + # TODO: Remove this. Currently not used; + # saved temporarily for np.array trigger + metad = {} + for k, v in indict.items(): + if v == '++np.array': + newk = k + '-array' + metad[k] = np.array(indict.pop(newk)) + metad.pop(newk, None) + else: + metad[k] = v + return metad + + @property + def dtype(self): + return self._dtype + + @property + def shape(self): + return self._shape + + def __getitem__(self, key): + return self._framelist[key].toarray() + + def __iter__(self): + return ImageSeriesIterator(self) + + #@memoize + def __len__(self): + return self._nframes + + pass # end class diff --git a/hexrd/imageseries/load/hdf5.py b/hexrd/imageseries/load/hdf5.py new file mode 100644 index 00000000..7da6f1f5 --- /dev/null +++ b/hexrd/imageseries/load/hdf5.py @@ -0,0 +1,89 @@ +"""HDF5 adapter class +""" +import h5py +import warnings + +from . import ImageSeriesAdapter +from ..imageseriesiter import ImageSeriesIterator + +class HDF5ImageSeriesAdapter(ImageSeriesAdapter): + """collection of images in HDF5 format""" + + format = 'hdf5' + + def __init__(self, fname, **kwargs): + """Constructor for H5FrameSeries + + *fname* - filename of the HDF5 file + *kwargs* - keyword arguments, choices are: + path - (required) path of dataset in HDF5 file + """ + self.__h5name = fname + self.__path = kwargs['path'] + self.__dataname = kwargs.pop('dataname', 'images') + self.__images = '/'.join([self.__path, self.__dataname]) + self.__h5file = h5py.File(self.__h5name, 'r') + self.__image_dataset = self.__h5file[self.__images] + self.__data_group = self.__h5file[self.__path] + self._meta = self._getmeta() + + + def close(self): + self.__image_dataset = None + self.__data_group = None + self.__h5file.close() + self.__h5file = None + + + def __del__(self): + # Note this is not ideal, as the use of __del__ is problematic. However, + # it is highly unlikely that the usage of a ImageSeries would pose + # a problem. + # + # A warning will (hopefully) be emitted if an issue arises at some point. + try: + self.close() + except: + warnings.warn("HDF5ImageSeries could not close h5file") + pass + + + def __getitem__(self, key): + return self.__image_dataset[key] + + + def __iter__(self): + return ImageSeriesIterator(self) + + + def __len__(self): + return len(self.__image_dataset) + + + def _getmeta(self): + mdict = {} + for k, v in self.__data_group.attrs.items(): + mdict[k] = v + + return mdict + + + @property + def metadata(self): + """(read-only) Image sequence metadata + + note: metadata loaded on open and allowed to be modified + """ + return self._meta + + + @property + def dtype(self): + return self.__image_dataset.dtype + + + @property + def shape(self): + return self.__image_dataset.shape[1:] + + pass # end class diff --git a/hexrd/imageseries/load/imagefiles.py b/hexrd/imageseries/load/imagefiles.py new file mode 100644 index 00000000..4dfa7c12 --- /dev/null +++ b/hexrd/imageseries/load/imagefiles.py @@ -0,0 +1,255 @@ +"""Adapter class for list of image files +""" +from __future__ import print_function + +import sys +import os +import logging +import glob + +# # Put this before fabio import and reset level if you +# # want to control its import warnings. +# logging.basicConfig(level=logging.INFO) + +import numpy as np +import fabio +import yaml + +from . import ImageSeriesAdapter +from .metadata import yamlmeta +from ..imageseriesiter import ImageSeriesIterator + + +class ImageFilesImageSeriesAdapter(ImageSeriesAdapter): + """collection of image files""" + + format = 'image-files' + + def __init__(self, fname, **kwargs): + """Constructor for image files image series + + *fname* - should be yaml file with files and metadata sections + *kwargs* - keyword arguments + . 'files' = a list of image files + . 'metadata' = a dictionary + """ + self._fname = fname + self._load_yml() + self._process_files() + + #@memoize + def __len__(self): + if self._maxframes_tot > 0: + return min(self._nframes, self._maxframes_tot) + else: + return self._nframes + + def __getitem__(self, key): + if self.singleframes: + imgf = self._files[key] + img = fabio.open(imgf) + else: + (fnum, frame) = self._file_and_frame(key) + fimg = self.infolist[fnum].fabioimage + img = fimg.getframe(frame) + if self._dtype is not None: + # !!! handled in self._process_files + try: + dinfo = np.iinfo(self._dtype) + except(ValueError): + dinfo = np.finfo(self._dtype) + if np.max(img.data) > dinfo.max: + raise RuntimeError("specified dtype will truncate image") + return np.array(img.data, dtype=self._dtype) + else: + return img.data + + def __iter__(self): + return ImageSeriesIterator(self) + + def __str__(self): + s = """==== imageseries from file list + fabio class: %s +number of files: %s + nframes: %s + dtype: %s + shape: %s + single frames: %s + """ % (self.fabioclass, len(self._files), len(self), + self.dtype, self.shape, self.singleframes) + return s + + def _load_yml(self): + EMPTY = 'empty-frames' + MAXTOTF = 'max-total-frames' + MAXFILF = 'max-file-frames' + DTYPE = 'dtype' + with open(self._fname, "r") as f: + d = yaml.load(f, Loader=yaml.SafeLoader) + imgsd = d['image-files'] + dname = imgsd['directory'] + fglob = imgsd['files'] + self._files = [] + for g in fglob.split(): + self._files += glob.glob(os.path.join(dname, g)) + + self.optsd = d['options'] if 'options' else None + self._empty = self.optsd[EMPTY] if EMPTY in self.optsd else 0 + self._maxframes_tot = self.optsd[MAXTOTF] if MAXTOTF in self.optsd else 0 + self._maxframes_file = self.optsd[MAXFILF] if MAXFILF in self.optsd else 0 + self._dtype = np.dtype(self.optsd[DTYPE]) if DTYPE in self.optsd else None + + self._meta = yamlmeta(d['meta']) #, path=imgsd) + + def _process_files(self): + kw = {'empty': self._empty, 'max_frames': self._maxframes_file} + fcl = None + shp = None + dtp = None + nf = 0 + self._singleframes = True + infolist = [] + for imgf in self._files: + info = FileInfo(imgf, **kw) + infolist.append(info) + shp = self._checkvalue(shp, info.shape, + "inconsistent image shapes") + if self._dtype is not None: + dtp = self._dtype + + else: + dtp = self._checkvalue( + dtp, info.dtype, + "inconsistent image dtypes") + fcl = self._checkvalue(fcl, info.fabioclass, + "inconsistent image types") + nf += info.nframes + if info.nframes > 1: + self._singleframes = False + + + self._nframes = nf + self._shape = shp + self._dtype = dtp + self._fabioclass = fcl + self._infolist = infolist + + # from make_imageseries_h5 + @staticmethod + def _checkvalue(v, vtest, msg): + """helper: ensure value set conistently""" + if v is None: + val = vtest + else: + if vtest != v: + raise ValueError(msg) + else: + val = v + + return val + + def _file_and_frame(self, key): + """for multiframe images""" + # allow for negatives (just use [nframes + key]) + nf = len(self) + if key < -nf or key >= nf: + msg = "frame out of range: %s" % key + raise LookupError(msg) + k = key if key >= 0 else (nf + key) + + frame = -nf - 1 + fnum = 0 + for info in self.infolist: + if k < info.nframes: + frame = k + info.empty + break + else: + k -= info.nframes + fnum += 1 + + return fnum, frame + + # ======================================== API + + @property + def metadata(self): + """(read-only) Image sequence metadata + + Currently returns none + """ + return self._meta + + @property + def shape(self): + return self._shape + + @property + def dtype(self): + return self._dtype + + @property + def infolist(self): + return self._infolist + + @property + def fabioclass(self): + return self._fabioclass + + @property + def singleframes(self): + """indicates whether all files are single frames""" + return self._singleframes + + pass # end class + + +class FileInfo(object): + """class for managing individual file information""" + def __init__(self, filename, **kwargs): + self.filename = filename + img = fabio.open(filename) + self._fabioclass = img.classname + self._imgframes = img.nframes + self.dat = img.data + self.fabioimage = img + + d = kwargs.copy() + self._empty = d.pop('empty', 0) + # user may set max-frames to 0, indicating use all frames + self._maxframes = d.pop('max_frames', 0) + if self._maxframes == 0: + self._maxframes = self._imgframes + if self._empty >= self._imgframes: + msg = "more empty frames than images: %s" % self.filename + raise ValueError(msg) + + def __str__(self): + s = """==== image file + name: %s +fabio class: %s + frames: %s + dtype: %s + shape: %s\n""" % (self.filename, self.fabioclass, + self.nframes, self.dtype, self.shape) + + return s + + @property + def empty(self): + return self._empty + + @property + def shape(self): + return self.dat.shape + + @property + def dtype(self): + return self.dat.dtype + + @property + def fabioclass(self): + return self._fabioclass + + @property + def nframes(self): + return min(self._maxframes, self._imgframes - self.empty) diff --git a/hexrd/imageseries/load/metadata.py b/hexrd/imageseries/load/metadata.py new file mode 100644 index 00000000..380a0e5a --- /dev/null +++ b/hexrd/imageseries/load/metadata.py @@ -0,0 +1,45 @@ +"""metadata tools for imageseries""" +import os + +import yaml +import numpy as np + +def yamlmeta(meta, path=None): + """ Image sequence metadata + + *path* is a full path or directory used to find the relative location + of files loaded via the trigger mechanism + +The usual yaml dictionary is returned with the exception that +if the first word of a multiword string is an exclamation mark ("!"), +it will trigger further processing determined by the rest of the string. +Currently only one trigger is used: + +! load-numpy-object + the returned value will the numpy object read from the file +""" + if path is not None: + path = os.path.dirname(path) + else: + path = '.' + + metad = {} + for k, v in meta.items(): + # check for triggers + istrigger = False + if isinstance(v, basestring): + words = v.split() + istrigger = (words[0] == "!") and (len(words) > 1) + + if v == '++np.array': # old way used in frame-cache (obsolescent) + newk = k + '-array' + metad[k] = np.array(meta.pop(newk)) + metad.pop(newk, None) + elif istrigger: + if words[1] == "load-numpy-array": + fname = os.path.join(path, words[2]) + metad[k] = np.load(fname) + else: + metad[k] = v + + return metad diff --git a/hexrd/imageseries/load/registry.py b/hexrd/imageseries/load/registry.py new file mode 100644 index 00000000..529487c7 --- /dev/null +++ b/hexrd/imageseries/load/registry.py @@ -0,0 +1,13 @@ +"""Adapter registry +""" +class Registry(object): + """Registry for imageseries adapters""" + adapter_registry = dict() + + @classmethod + def register(cls, acls): + """Register adapter class""" + if acls.__name__ is not 'ImageSeriesAdapter': + cls.adapter_registry[acls.format] = acls + + pass # end class diff --git a/hexrd/imageseries/load/trivial.py b/hexrd/imageseries/load/trivial.py new file mode 100644 index 00000000..c3871138 --- /dev/null +++ b/hexrd/imageseries/load/trivial.py @@ -0,0 +1,5 @@ +"""Trivial adapter: just testing auto-import""" +from . import ImageSeriesAdapter + +class TrivialAdapter(ImageSeriesAdapter): + pass diff --git a/hexrd/imageseries/omega.py b/hexrd/imageseries/omega.py new file mode 100644 index 00000000..3cda9f3c --- /dev/null +++ b/hexrd/imageseries/omega.py @@ -0,0 +1,198 @@ +"""Handle omega (specimen rotation) metadata + +* OmegaWedges class specifies omega metadata in wedges +""" +import numpy as np + +from .baseclass import ImageSeries + +OMEGA_KEY = 'omega' + +class OmegaImageSeries(ImageSeries): + """ImageSeries with omega metadata""" + DFLT_TOL = 1.0e-6 + TAU = 360 + + def __init__(self, ims): + """This class is initialized with an existing imageseries""" + # check for omega metadata + if OMEGA_KEY in ims.metadata: + self._omega = ims.metadata[OMEGA_KEY] + if len(ims) != self._omega.shape[0]: + msg = 'omega array mismatch: array has %s frames, expecting %s' + msg = msg % (self._omega.shape[0], len(ims)) + raise OmegaSeriesError(msg) + else: + raise OmegaSeriesError('Imageseries has no omega metadata') + + super(OmegaImageSeries, self).__init__(ims) + self._make_wedges() + + def _make_wedges(self, tol=DFLT_TOL): + nf = len(self) + om = self.omega + + # find the frames where the wedges break + starts = [0] + delta = om[0, 1] - om[0, 0] + omlast = om[0, 1] + for f in range(1, nf): + if delta <= 0: + raise OmegaSeriesError('omega array must be increasing') + # check whether delta changes or ranges not contiguous + d = om[f,1] - om[f,0] + if (np.abs(d - delta) > tol) or (np.abs(om[f,0] - omlast) > tol): + starts.append(f) + delta = d + omlast = om[f, 1] + starts.append(nf) + + nw = len(starts) - 1 + nf0 = 0 + self._wedge_om = np.zeros((nw, 3)) + self._wedge_f = np.zeros((nw, 2), dtype=int) + self._omegawedges = OmegaWedges(nf) + for s in range(nw): + ostart = om[starts[s], 0] + ostop = om[starts[s + 1] - 1, 1] + steps = starts[s+1] - starts[s] + self._omegawedges.addwedge(ostart, ostop, steps) + # + delta = (ostop - ostart)/steps + self._wedge_om[s, :] = (ostart, ostop, delta) + self._wedge_f[s, 0] = nf0 + self._wedge_f[s, 1] = steps + nf0 += steps + assert(nf0 == nf) + + @property + def omega(self): + """return omega range array (nframes, 2)""" + return self._omega + + @property + def omegawedges(self): + return self._omegawedges + + @property + def nwedges(self): + return self.omegawedges.nwedges + + def wedge(self, i): + """return i'th wedge as a dictionary""" + d = self.omegawedges.wedges[i] + delta = (d['ostop'] - d['ostart'])/d['nsteps'] + d.update(delta=delta) + return d + + def omega_to_frame(self, om): + """Return frame and wedge which includes given omega, -1 if not found""" + f = -1 + w = -1 + for i in range(len(self._wedge_om)): + omin = self._wedge_om[i, 0] + omax = self._wedge_om[i, 1] + omcheck = omin + np.mod(om - omin, self.TAU) + if omcheck < omax: + odel = self._wedge_om[i, 2] + f = self._wedge_f[i,0] + int(np.floor((omcheck - omin)/odel)) + w = i + break + + return f, w + + def omegarange_to_frames(self, omin, omax): + """Return list of frames for range of omegas""" + noframes = () + f0, w0 = self.omega_to_frame(omin) + if w0 < 0: + return noframes + f1, w1 = self.omega_to_frame(omax) + if w1 < 0: + return noframes + + # if same wedge, require frames be increasing + if (w0 == w1) and (f1 > f0): + return range(f0, f1+1) + + # case: adjacent wedges with 2pi jump in omega + w0max = self._wedge_om[w0, 1] + w1min = self._wedge_om[w1, 0] + + if np.mod(np.abs(w1min - w0max), self.TAU) < self.DFLT_TOL: + r0 = range(f0, self._wedge_f[w0, 0] + self._wedge_f[w0, 1]) + r1 = range(self._wedge_f[w1, 0], f1 + 1) + return r0 + r1 + + return noframes + + +class OmegaWedges(object): + """piecewise linear omega ranges""" + def __init__(self, nframes): + """Constructor for OmegaWedge""" + self.nframes = nframes + self._wedges = [] + # + # ============================== API + # + @property + def omegas(self): + """n x 2 array of omega values, one per frame""" + if self.nframes != self.wframes: + msg = "number of frames (%s) does not match "\ + "number of wedge frames (%s)" %(self.nframes, self.wframes) + raise OmegaSeriesError(msg) + + oa = np.zeros((self.nframes, 2)) + wstart = 0 + for w in self.wedges: + ns = w['nsteps'] + wr = range(wstart, wstart + ns) + wa0 = np.linspace(w['ostart'], w['ostop'], ns + 1) + oa[wr, 0] = wa0[:-1] + oa[wr, 1] = wa0[1:] + wstart += ns + + return oa + + @property + def nwedges(self): + """number of wedges""" + return len(self._wedges) + + @property + def wedges(self): + """list of wedges (dictionaries)""" + return self._wedges + + def addwedge(self, ostart, ostop, nsteps, loc=None): + """add wedge to list""" + d = dict(ostart=ostart, ostop=ostop, nsteps=nsteps) + if loc is None: + loc = self.nwedges + + self.wedges.insert(loc, d) + + def delwedge(self, i): + """delete wedge number i""" + self.wedges.pop(i) + + @property + def wframes(self): + """number of frames in wedges""" + wf = [w['nsteps'] for w in self.wedges] + return np.int(np.sum(wf)) + + def save_omegas(self, fname): + """save omegas to text file""" + np.save(fname, self.omegas) + + pass # end class + + +class OmegaSeriesError(Exception): + def __init__(self, value): + self.value = value + def __str__(self): + return repr(self.value) diff --git a/hexrd/imageseries/process.py b/hexrd/imageseries/process.py new file mode 100644 index 00000000..02173b83 --- /dev/null +++ b/hexrd/imageseries/process.py @@ -0,0 +1,114 @@ +"""Class for processing individual frames""" +import copy + +import numpy as np + +from .baseclass import ImageSeries + + +class ProcessedImageSeries(ImageSeries): + """Images series with mapping applied to frames""" + FLIP = 'flip' + DARK = 'dark' + RECT = 'rectangle' + + _opdict = {} + + def __init__(self, imser, oplist, **kwargs): + """imsageseries based on existing one with image processing options + + *imser* - an existing imageseries + *oplist* - list of processing operations; + a list of pairs (key, data) pairs, with key specifying the + operation to perform using specified data + + *keyword args* + 'frame_list' - specify subset of frames by list + + """ + self._imser = imser + self._meta = copy.deepcopy(imser.metadata) + self._oplist = oplist + self._frames = kwargs.pop('frame_list', None) + self._hasframelist = (self._frames is not None) + + self.addop(self.DARK, self._subtract_dark) + self.addop(self.FLIP, self._flip) + self.addop(self.RECT, self._rectangle) + + def __getitem__(self, key): + return self._process_frame(self._get_index(key)) + + def _get_index(self, key): + return self._frames[key] if self._hasframelist else key + + def __len__(self): + return len(self._frames) if self._hasframelist else len(self._imser) + + def _process_frame(self, key): + # note: key refers to original imageseries + img = np.copy(self._imser[key]) + for k, d in self.oplist: + func = self._opdict[k] + img = func(img, d) + + return img + + def _subtract_dark(self, img, dark): + # need to check for values below zero + # !!! careful, truncation going on here;necessary to promote dtype? + return np.where(img > dark, img - dark, 0) + + def _rectangle(self, img, r): + # restrict to rectangle + return img[r[0, 0]:r[0, 1], r[1, 0]:r[1, 1]] + + def _flip(self, img, flip): + if flip in ('y', 'v'): # about y-axis (vertical) + pimg = img[:, ::-1] + elif flip in ('x', 'h'): # about x-axis (horizontal) + pimg = img[::-1, :] + elif flip in ('vh', 'hv', 'r180'): # 180 degree rotation + pimg = img[::-1, ::-1] + elif flip in ('t', 'T'): # transpose (possible shape change) + pimg = img.T + elif flip in ('ccw90', 'r90'): # rotate 90 (possible shape change) + pimg = img.T[::-1, :] + elif flip in ('cw90', 'r270'): # rotate 270 (possible shape change) + pimg = img.T[:, ::-1] + else: + pimg = img + + return pimg + # + # ==================== API + # + + @property + def dtype(self): + return self[0].dtype + + @property + def shape(self): + return self[0].shape + + @property + def metadata(self): + # this is a modifiable copy of metadata of the original imageseries + return self._meta + + @classmethod + def addop(cls, key, func): + """Add operation to processing options + + *key* - string to use to specify this op + *func* - function to call for this op: f(data) + """ + cls._opdict[key] = func + + @property + def oplist(self): + """list of operations to apply""" + return self._oplist + + pass # end class diff --git a/hexrd/imageseries/save.py b/hexrd/imageseries/save.py new file mode 100644 index 00000000..f6240063 --- /dev/null +++ b/hexrd/imageseries/save.py @@ -0,0 +1,244 @@ +"""Write imageseries to various formats""" +from __future__ import print_function +import abc +import os +import warnings + +import numpy as np +import h5py +import yaml + +from hexrd.matrixutil import extract_ijv + +MAX_NZ_FRACTION = 0.1 # 10% sparsity trigger for frame-cache write + + +# ============================================================================= +# METHODS +# ============================================================================= + + +def write(ims, fname, fmt, **kwargs): + """write imageseries to file with options + + *ims* - an imageseries + *fname* - name of file + *fmt* - a format string + *kwargs* - options specific to format + """ + wcls = _Registry.getwriter(fmt) + w = wcls(ims, fname, **kwargs) + w.write() + + +# Registry +class _RegisterWriter(abc.ABCMeta): + + def __init__(cls, name, bases, attrs): + abc.ABCMeta.__init__(cls, name, bases, attrs) + _Registry.register(cls) + + +class _Registry(object): + """Registry for imageseries writers""" + writer_registry = dict() + + @classmethod + def register(cls, wcls): + """Register writer class""" + if wcls.__name__ is not 'Writer': + cls.writer_registry[wcls.fmt] = wcls + + @classmethod + def getwriter(cls, name): + """return instance associated with name""" + return cls.writer_registry[name] + # + pass # end class + + +class Writer(object): + """Base class for writers""" + __metaclass__ = _RegisterWriter + fmt = None + + def __init__(self, ims, fname, **kwargs): + self._ims = ims + self._shape = ims.shape + self._dtype = ims.dtype + self._nframes = len(ims) + self._meta = ims.metadata + self._fname = fname + self._opts = kwargs + + # split filename into components + tmp = os.path.split(fname) + self._fname_dir = tmp[0] + tmp = os.path.splitext(tmp[1]) + self._fname_base = tmp[0] + self._fname_suff = tmp[1] + + pass # end class + + +class WriteH5(Writer): + fmt = 'hdf5' + dflt_gzip = 1 + dflt_chrows = 0 + dflt_shuffle = True + + def __init__(self, ims, fname, **kwargs): + """Write imageseries in HDF5 file + + Required Args: + path - the path in HDF5 file + + Options: + gzip - 0-9; 0 turns off compression; 4 is default + chunk_rows - number of rows per chunk; default is all + """ + Writer.__init__(self, ims, fname, **kwargs) + self._path = self._opts['path'] + + # + # ======================================== API + # + def write(self): + """Write imageseries to HDF5 file""" + f = h5py.File(self._fname, "w") + g = f.create_group(self._path) + s0, s1 = self._shape + + ds = g.create_dataset('images', (self._nframes, s0, s1), self._dtype, + **self.h5opts) + + for i in range(self._nframes): + ds[i, :, :] = self._ims[i] + + # add metadata + for k, v in self._meta.items(): + g.attrs[k] = v + + @property + def h5opts(self): + d = {} + + # shuffle + shuffle = self._opts.pop('shuffle', self.dflt_shuffle) + d['shuffle'] = shuffle + + # compression + compress = self._opts.pop('gzip', self.dflt_gzip) + if compress > 9: + raise ValueError('gzip compression cannot exceed 9: %s' % compress) + if compress > 0: + d['compression'] = 'gzip' + d['compression_opts'] = compress + + # chunk size + s0, s1 = self._shape + chrows = self._opts.pop('chunk_rows', self.dflt_chrows) + if chrows < 1 or chrows > s0: + chrows = s0 + d['chunks'] = (1, chrows, s1) + + return d + + pass # end class + + +class WriteFrameCache(Writer): + """info from yml file""" + fmt = 'frame-cache' + + def __init__(self, ims, fname, **kwargs): + """write yml file with frame cache info + + kwargs has keys: + + cache_file - name of array cache file + meta - metadata dictionary + """ + Writer.__init__(self, ims, fname, **kwargs) + self._thresh = self._opts['threshold'] + cf = kwargs['cache_file'] + if os.path.isabs(cf): + self._cache = cf + else: + cdir = os.path.dirname(fname) + self._cache = os.path.join(cdir, cf) + self._cachename = cf + + def _process_meta(self, save_omegas=False): + d = {} + for k, v in self._meta.items(): + if isinstance(v, np.ndarray) and save_omegas: + # Save as a numpy array file + # if file does not exist (careful about directory) + # create new file + + cdir = os.path.dirname(self._cache) + b = self._fname_base + fname = os.path.join(cdir, "%s-%s.npy" % (b, k)) + if not os.path.exists(fname): + np.save(fname, v) + + # add trigger in yml file + d[k] = "! load-numpy-array %s" % fname + else: + d[k] = v + + return d + + def _write_yml(self): + datad = {'file': self._cachename, 'dtype': str(self._ims.dtype), + 'nframes': len(self._ims), 'shape': list(self._ims.shape)} + info = {'data': datad, 'meta': self._process_meta(save_omegas=True)} + with open(self._fname, "w") as f: + yaml.dump(info, f) + + def _write_frames(self): + """also save shape array as originally done (before yaml)""" + buff_size = self._ims.shape[0]*self._ims.shape[1] + rows = np.empty(buff_size, dtype=np.uint16) + cols = np.empty(buff_size, dtype=np.uint16) + vals = np.empty(buff_size, dtype=self._ims.dtype) + arrd = dict() + for i in range(len(self._ims)): + # ???: make it so we can use emumerate on self._ims? + # FIXME: in __init__() of ProcessedImageSeries: + # 'ProcessedImageSeries' object has no attribute '_adapter' + + # wrapper to find (sparse) pixels above threshold + count = extract_ijv(self._ims[i], self._thresh, + rows, cols, vals) + + # check the sparsity + # + # FIXME: formalize this a little better + # ???: maybe set a hard limit of total nonzeros for the imageseries + # ???: could pass as a kwarg on open + fullness = count / float(buff_size) + if fullness > MAX_NZ_FRACTION: + sparseness = 100.*(1 - fullness) + msg = "frame %d is %4.2f%% sparse (cutoff is 95%%)" \ + % (i, sparseness) + warnings.warn(msg) + arrd['%d_row' % i] = rows[:count].copy() + arrd['%d_col' % i] = cols[:count].copy() + arrd['%d_data' % i] = vals[:count].copy() + pass + arrd['shape'] = self._ims.shape + arrd['nframes'] = len(self._ims) + arrd['dtype'] = str(self._ims.dtype) + arrd.update(self._process_meta()) + np.savez_compressed(self._cache, **arrd) + + def write(self, output_yaml=False): + """writes frame cache for imageseries + + presumes sparse forms are small enough to contain all frames + """ + self._write_frames() + if output_yaml: + self._write_yml() diff --git a/hexrd/imageseries/stats.py b/hexrd/imageseries/stats.py new file mode 100644 index 00000000..3247f936 --- /dev/null +++ b/hexrd/imageseries/stats.py @@ -0,0 +1,86 @@ +"""Stats for imageseries""" +from __future__ import print_function + +import logging +import numpy as np + +from psutil import virtual_memory + +from hexrd.imageseries.process import ProcessedImageSeries as PIS + +# Default Buffer: 100 MB +#STATS_BUFFER = 419430400 # 50 GE frames +#STATS_BUFFER = 838860800 # 100 GE frames +vmem = virtual_memory() +STATS_BUFFER = int(0.5*vmem.available) + +def max(ims, nframes=0): + nf = _nframes(ims, nframes) + imgmax = ims[0] + for i in range(1, nf): + imgmax = np.maximum(imgmax, ims[i]) + return imgmax + +def average(ims, nframes=0): + """return image with average values over all frames""" + nf = _nframes(ims, nframes) + avg = np.array(ims[0], dtype=float) + for i in range(1, nf): + avg += ims[i] + return avg/nf + +def median(ims, nframes=0): + """return image with median values over all frames""" + # use percentile since it has better performance + return percentile(ims, 50, nframes=nframes) + +def percentile(ims, pct, nframes=0): + """return image with given percentile values over all frames""" + # could be done by rectangle by rectangle if full series + # too big for memory + nf = _nframes(ims, nframes) + dt = ims.dtype + (nr, nc) = ims.shape + nrpb = _rows_in_buffer(nframes, nf*nc*dt.itemsize) + + # now build the result a rectangle at a time + img = np.zeros_like(ims[0]) + for rr in _row_ranges(nr, nrpb): + rect = np.array([[rr[0], rr[1]], [0, nc]]) + pims = PIS(ims, [('rectangle', rect)]) + img[rr[0]:rr[1], :] = np.percentile(_toarray(pims, nf), pct, axis=0) + return img + +# +# ==================== Utilities +# +def _nframes(ims, nframes): + """number of frames to use: len(ims) or specified number""" + mynf = len(ims) + return np.min((mynf, nframes)) if nframes > 0 else mynf + +def _toarray(ims, nframes): + ashp = (nframes,) + ims.shape + a = np.zeros(ashp, dtype=ims.dtype) + for i in range(nframes): + logging.info('frame: %s', i) + a[i] = ims[i] + + return a + +def _row_ranges(n, m): + """return row ranges, representing m rows or remainder, until exhausted""" + i = 0 + while i < n: + imax = i+m + if imax <= n: + yield (i, imax) + else: + yield (i, n) + i = imax + +def _rows_in_buffer(ncol, rsize): + """number of rows in buffer + + NOTE: Use ceiling to make sure at it has at least one row""" + return int(np.ceil(STATS_BUFFER/rsize)) diff --git a/hexrd/imageseries/tests/__init__.py b/hexrd/imageseries/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hexrd/imageseries/tests/common.py b/hexrd/imageseries/tests/common.py new file mode 100644 index 00000000..a8f4b0f1 --- /dev/null +++ b/hexrd/imageseries/tests/common.py @@ -0,0 +1,47 @@ +import numpy as np +import unittest + +from hexrd import imageseries + +_NFXY = (3, 7, 5) + +class ImageSeriesTest(unittest.TestCase): + pass + +def make_array(): + a = np.zeros(_NFXY) + ind = np.array([0,1,2]) + a[ind, 1,2] = 1 + ind + return a + +def make_array_ims(): + is_a = imageseries.open(None, 'array', data=make_array(), + meta=make_meta()) + return is_a + +def compare(ims1, ims2): + """compare two imageseries""" + if len(ims1) != len(ims2): + raise ValueError("lengths do not match") + + if ims1.dtype is not ims2.dtype: + raise ValueError("types do not match") + + maxdiff = 0.0 + for i in range(len(ims1)): + f1 = ims1[i] + f2 = ims2[i] + fdiff = np.linalg.norm(f1 - f2) + maxdiff = np.maximum(maxdiff, fdiff) + + return maxdiff + +def make_meta(): + return {'testing': '1,2,3'} + +def compare_meta(ims1, ims2): + # check metadata (simple immutable cases only for now) + + m1 = set(ims1.metadata.items()) + m2 = set(ims2.metadata.items()) + return m1.issubset(m2) and m2.issubset(m1) diff --git a/hexrd/imageseries/tests/test_formats.py b/hexrd/imageseries/tests/test_formats.py new file mode 100644 index 00000000..9d9b027d --- /dev/null +++ b/hexrd/imageseries/tests/test_formats.py @@ -0,0 +1,118 @@ +import os +import tempfile +import unittest + +import numpy as np + +from .common import ImageSeriesTest +from .common import make_array_ims, compare, compare_meta + +from hexrd import imageseries + + +class ImageSeriesFormatTest(ImageSeriesTest): + @classmethod + def setUpClass(cls): + cls.tmpdir = tempfile.mkdtemp() + + @classmethod + def tearDownClass(cls): + os.rmdir(cls.tmpdir) + + +class TestFormatH5(ImageSeriesFormatTest): + + def setUp(self): + self.h5file = os.path.join(self.tmpdir, 'test_ims.h5') + self.h5path = 'array-data' + self.fmt = 'hdf5' + self.is_a = make_array_ims() + + def tearDown(self): + os.remove(self.h5file) + + + def test_fmth5(self): + """save/load HDF5 format""" + imageseries.write(self.is_a, self.h5file, self.fmt, path=self.h5path) + is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) + + diff = compare(self.is_a, is_h) + self.assertAlmostEqual(diff, 0., "h5 reconstruction failed") + self.assertTrue(compare_meta(self.is_a, is_h)) + + def test_fmth5_nparray(self): + """HDF5 format with numpy array metadata""" + key = 'np-array' + npa = np.array([0,2.0,1.3]) + self.is_a.metadata[key] = npa + imageseries.write(self.is_a, self.h5file, self.fmt, path=self.h5path) + is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) + meta = is_h.metadata + + diff = np.linalg.norm(meta[key] - npa) + self.assertAlmostEqual(diff, 0., "h5 numpy array metadata failed") + + def test_fmth5_nocompress(self): + """HDF5 options: no compression""" + imageseries.write(self.is_a, self.h5file, self.fmt, + path=self.h5path, gzip=0) + is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) + + diff = compare(self.is_a, is_h) + self.assertAlmostEqual(diff, 0., "h5 reconstruction failed") + self.assertTrue(compare_meta(self.is_a, is_h)) + + def test_fmth5_compress_err(self): + """HDF5 options: compression level out of range""" + with self.assertRaises(ValueError): + imageseries.write(self.is_a, self.h5file, self.fmt, + path=self.h5path, gzip=10) + + def test_fmth5_chunk(self): + """HDF5 options: chunk size""" + imageseries.write(self.is_a, self.h5file, self.fmt, + path=self.h5path, chunk_rows=0) + is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) + + diff = compare(self.is_a, is_h) + self.assertAlmostEqual(diff, 0., "h5 reconstruction failed") + self.assertTrue(compare_meta(self.is_a, is_h)) + +class TestFormatFrameCache(ImageSeriesFormatTest): + + def setUp(self): + self.fcfile = os.path.join(self.tmpdir, 'frame-cache.yml') + self.fmt = 'frame-cache' + self.thresh = 0.5 + self.cache_file='frame-cache.npz' + self.is_a = make_array_ims() + + def tearDown(self): + os.remove(self.fcfile) + os.remove(os.path.join(self.tmpdir, self.cache_file)) + + @unittest.skip("need to fix unit tests for framecache") + def test_fmtfc(self): + """save/load frame-cache format""" + imageseries.write(self.is_a, self.fcfile, self.fmt, + threshold=self.thresh, cache_file=self.cache_file) + is_fc = imageseries.open(self.fcfile, self.fmt, style='yml') + diff = compare(self.is_a, is_fc) + self.assertAlmostEqual(diff, 0., "frame-cache reconstruction failed") + self.assertTrue(compare_meta(self.is_a, is_fc)) + + @unittest.skip("need to fix unit tests for framecache") + def test_fmtfc_nparray(self): + """frame-cache format with numpy array metadata""" + key = 'np-array' + npa = np.array([0,2.0,1.3]) + self.is_a.metadata[key] = npa + + imageseries.write(self.is_a, self.fcfile, self.fmt, + threshold=self.thresh, cache_file=self.cache_file) + is_fc = imageseries.open(self.fcfile, self.fmt) + meta = is_fc.metadata + diff = np.linalg.norm(meta[key] - npa) + self.assertAlmostEqual(diff, 0., + "frame-cache numpy array metadata failed") diff --git a/hexrd/imageseries/tests/test_omega.py b/hexrd/imageseries/tests/test_omega.py new file mode 100644 index 00000000..fa5672b3 --- /dev/null +++ b/hexrd/imageseries/tests/test_omega.py @@ -0,0 +1,89 @@ +import numpy as np + +from .common import ImageSeriesTest + +from hexrd import imageseries +from hexrd.imageseries.omega import OmegaSeriesError, OmegaImageSeries + +class TestOmegaSeries(ImageSeriesTest): + + @staticmethod + def make_ims(nf, meta): + a = np.zeros((nf, 2, 2)) + ims = imageseries.open(None, 'array', data=a, meta=meta) + return ims + + def test_no_omega(self): + ims = self.make_ims(2, {}) + with self.assertRaises(OmegaSeriesError): + oms = OmegaImageSeries(ims) + + def test_nframes_mismatch(self): + m = dict(omega=np.zeros((3, 2))) + ims = self.make_ims(2, m) + with self.assertRaises(OmegaSeriesError): + oms = OmegaImageSeries(ims) + + def test_negative_delta(self): + om = np.zeros((3, 2)) + om[0,1] = -0.5 + m = dict(omega=om, dtype=np.float) + ims = self.make_ims(3, m) + with self.assertRaises(OmegaSeriesError): + oms = OmegaImageSeries(ims) + + def test_one_wedge(self): + nf = 5 + a = np.linspace(0, nf+1, nf+1) + om = np.zeros((nf, 2)) + om[:,0] = a[:-1] + om[:,1] = a[1:] + m = dict(omega=om, dtype=np.float) + ims = self.make_ims(nf, m) + oms = OmegaImageSeries(ims) + self.assertEqual(oms.nwedges, 1) + + def test_two_wedges(self): + nf = 5 + a = np.linspace(0, nf+1, nf+1) + om = np.zeros((nf, 2)) + om[:,0] = a[:-1] + om[:,1] = a[1:] + om[3:, :] += 0.1 + m = dict(omega=om, dtype=np.float) + ims = self.make_ims(nf, m) + oms = OmegaImageSeries(ims) + self.assertEqual(oms.nwedges, 2) + + def test_compare_omegas(self): + nf = 5 + a = np.linspace(0, nf+1, nf+1) + om = np.zeros((nf, 2)) + om[:,0] = a[:-1] + om[:,1] = a[1:] + om[3:, :] += 0.1 + m = dict(omega=om, dtype=np.float) + ims = self.make_ims(nf, m) + oms = OmegaImageSeries(ims) + domega = om - oms.omegawedges.omegas + dnorm = np.linalg.norm(domega) + + msg='omegas from wedges do not match originals' + self.assertAlmostEqual(dnorm, 0., msg=msg) + + def test_wedge_delta(self): + nf = 5 + a = np.linspace(0, nf+1, nf+1) + om = np.zeros((nf, 2)) + om[:,0] = a[:-1] + om[:,1] = a[1:] + om[3:, :] += 0.1 + m = dict(omega=om, dtype=np.float) + ims = self.make_ims(nf, m) + oms = OmegaImageSeries(ims) + + mydelta =om[nf - 1, 1] - om[nf - 1, 0] + d = oms.wedge(oms.nwedges - 1) + self.assertAlmostEqual(d['delta'], mydelta) + + # end class diff --git a/hexrd/imageseries/tests/test_process.py b/hexrd/imageseries/tests/test_process.py new file mode 100644 index 00000000..f0ccede7 --- /dev/null +++ b/hexrd/imageseries/tests/test_process.py @@ -0,0 +1,106 @@ +import numpy as np + +from .common import ImageSeriesTest, make_array, make_array_ims, compare + +from hexrd import imageseries +from hexrd.imageseries import process, ImageSeries + +class TestImageSeriesProcess(ImageSeriesTest): + + def _runfliptest(self, a, flip, aflip): + is_a = imageseries.open(None, 'array', data=a) + ops = [('flip', flip)] + is_p = process.ProcessedImageSeries(is_a, ops) + is_aflip = imageseries.open(None, 'array', data=aflip) + diff = compare(is_aflip, is_p) + msg = "flipped [%s] image series failed" % flip + self.assertAlmostEqual(diff, 0., msg=msg) + + def test_process(self): + """Processed image series""" + is_a = make_array_ims() + is_p = process.ProcessedImageSeries(is_a, []) + diff = compare(is_a, is_p) + msg = "processed image series failed to reproduce original" + self.assertAlmostEqual(diff, 0., msg) + + def test_process_flip_t(self): + """Processed image series: flip transpose""" + flip = 't' + a = make_array() + aflip = np.transpose(a, (0, 2, 1)) + self._runfliptest(a, flip, aflip) + + def test_process_flip_v(self): + """Processed image series: flip vertical""" + flip = 'v' + a = make_array() + aflip = a[:, :, ::-1] + self._runfliptest(a, flip, aflip) + + def test_process_flip_h(self): + """Processed image series: flip horizontal""" + flip = 'h' + a = make_array() + aflip = a[:, ::-1, :] + self._runfliptest(a, flip, aflip) + + def test_process_flip_vh(self): + """Processed image series: flip vertical + horizontal""" + flip = 'vh' + a = make_array() + aflip = a[:, ::-1, ::-1] + self._runfliptest(a, flip, aflip) + + def test_process_flip_r90(self): + """Processed image series: flip counterclockwise 90""" + flip = 'ccw90' + a = make_array() + aflip = np.transpose(a, (0, 2, 1))[:, ::-1, :] + self._runfliptest(a, flip, aflip) + + def test_process_flip_r270(self): + """Processed image series: flip clockwise 90 """ + flip = 'cw90' + a = make_array() + aflip = np.transpose(a, (0, 2, 1))[:, :, ::-1] + self._runfliptest(a, flip, aflip) + + def test_process_dark(self): + """Processed image series: dark image""" + a = make_array() + dark = np.ones_like(a[0]) + is_a = imageseries.open(None, 'array', data=a) + apos = np.where(a >= 1, a-1, 0) + is_a1 = imageseries.open(None, 'array', data=apos) + ops = [('dark', dark)] + is_p = process.ProcessedImageSeries(is_a, ops) + diff = compare(is_a1, is_p) + self.assertAlmostEqual(diff, 0., msg="dark image failed") + + def test_process_framelist(self): + a = make_array() + is_a = imageseries.open(None, 'array', data=a) + ops = [] + frames = [0, 2] + is_p = process.ProcessedImageSeries(is_a, ops, frame_list=frames) + is_a2 = imageseries.open(None, 'array', data=a[tuple(frames), ...]) + diff = compare(is_a2, is_p) + self.assertAlmostEqual(diff, 0., msg="frame list failed") + + def test_process_shape(self): + a = make_array() + is_a = imageseries.open(None, 'array', data=a) + ops = [] + is_p = process.ProcessedImageSeries(is_a, ops) + pshape = is_p.shape + fshape = is_p[0].shape + for i in range(2): + self.assertEqual(fshape[i], pshape[i]) + + def test_process_dtype(self): + a = make_array() + is_a = imageseries.open(None, 'array', data=a) + ops = [] + is_p = process.ProcessedImageSeries(is_a, ops) + self.assertEqual(is_p.dtype, is_p[0].dtype) diff --git a/hexrd/imageseries/tests/test_properties.py b/hexrd/imageseries/tests/test_properties.py new file mode 100644 index 00000000..9b0a08a3 --- /dev/null +++ b/hexrd/imageseries/tests/test_properties.py @@ -0,0 +1,15 @@ +from .common import ImageSeriesTest, make_array, make_array_ims + +class TestProperties(ImageSeriesTest): + def setUp(self): + self._a = make_array() + self._is_a = make_array_ims() + + def test_prop_nframes(self): + self.assertEqual(self._a.shape[0], len(self._is_a)) + + def test_prop_shape(self): + self.assertEqual(self._a.shape[1:], self._is_a.shape) + + def test_prop_dtype(self): + self.assertEqual(self._a.dtype, self._is_a.dtype) diff --git a/hexrd/imageseries/tests/test_stats.py b/hexrd/imageseries/tests/test_stats.py new file mode 100644 index 00000000..6d6b0491 --- /dev/null +++ b/hexrd/imageseries/tests/test_stats.py @@ -0,0 +1,27 @@ +import numpy as np + +from hexrd import imageseries +from hexrd.imageseries import stats + +from .common import ImageSeriesTest, make_array, make_array_ims + + +class TestImageSeriesStats(ImageSeriesTest): + + def test_stats_median(self): + """Processed imageseries: median""" + a = make_array() + is_a = imageseries.open(None, 'array', data=a) + ismed = stats.median(is_a) + amed = np.median(a, axis=0) + err = np.linalg.norm(amed - ismed) + self.assertAlmostEqual(err, 0., msg="median image failed") + + def test_stats_max(self): + """Processed imageseries: median""" + a = make_array() + is_a = imageseries.open(None, 'array', data=a) + ismax = stats.max(is_a) + amax = np.max(a, axis=0) + err = np.linalg.norm(amax - ismax) + self.assertAlmostEqual(err, 0., msg="max image failed") diff --git a/hexrd/instrument.py b/hexrd/instrument.py new file mode 100644 index 00000000..d246365c --- /dev/null +++ b/hexrd/instrument.py @@ -0,0 +1,2696 @@ +# -*- coding: utf-8 -*- +# ============================================================================= +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================================= +""" +Created on Fri Dec 9 13:05:27 2016 + +@author: bernier2 +""" +from __future__ import print_function + +import os + +import yaml + +import h5py + +import numpy as np + +from scipy import ndimage +from scipy.linalg.matfuncs import logm + +from hexrd.gridutil import cellIndices, make_tolerance_grid +from hexrd import matrixutil as mutil +from hexrd.valunits import valWUnit +from hexrd.xrd.transforms_CAPI import anglesToGVec, \ + angularDifference, \ + detectorXYToGvec, \ + gvecToDetectorXY, \ + makeOscillRotMat, \ + makeRotMatOfExpMap, \ + mapAngle, \ + oscillAnglesOfHKLs, \ + rowNorm, \ + unitRowVector +from hexrd.xrd import xrdutil +from hexrd.xrd.crystallography import PlaneData +from hexrd import constants as ct + +# from hexrd.utils.progressbar import ProgressBar, Bar, ETA, ReverseBar + +# FIXME: distortion kludge +from hexrd.xrd.distortion import GE_41RT # BAD, VERY BAD!!! + +from skimage.draw import polygon + +try: + from fast_histogram import histogram1d + fast_histogram = True +except(ImportError): + from numpy import histogram as histogram1d + fast_histogram = False + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +instrument_name_DFLT = 'instrument' + +beam_energy_DFLT = 65.351 +beam_vec_DFLT = ct.beam_vec + +eta_vec_DFLT = ct.eta_vec + +panel_id_DFLT = 'generic' +nrows_DFLT = 2048 +ncols_DFLT = 2048 +pixel_size_DFLT = (0.2, 0.2) + +tilt_params_DFLT = np.zeros(3) +t_vec_d_DFLT = np.r_[0., 0., -1000.] + +chi_DFLT = 0. +t_vec_s_DFLT = np.zeros(3) + +# [wavelength, beam azim, beam pola, chi, tvec_s], len is 7 +instr_param_flags_DFLT = np.array( + [0, 0, 0, 1, 0, 0, 0], + dtype=bool) +panel_param_flags_DFLT = np.array( + [1, 1, 1, 1, 1, 1], + dtype=bool) + +NP_INS = 7 +NP_DET = 6 +NP_GRN = 12 + +buffer_key = 'buffer' +distortion_key = 'distortion' + +# ============================================================================= +# UTILITY METHODS +# ============================================================================= + + +def _fix_indices(idx, lo, hi): + nidx = np.array(idx) + off_lo = nidx < lo + off_hi = nidx > hi + nidx[off_lo] = lo + nidx[off_hi] = hi + return nidx + + +def calc_beam_vec(azim, pola): + """ + Calculate unit beam propagation vector from + spherical coordinate spec in DEGREES + + ...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL! + """ + tht = np.radians(azim) + phi = np.radians(pola) + bv = np.r_[ + np.sin(phi)*np.cos(tht), + np.cos(phi), + np.sin(phi)*np.sin(tht)] + return -bv + + +def calc_angles_from_beam_vec(bvec): + """ + Return the azimuth and polar angle from a beam + vector + """ + bvec = np.atleast_1d(bvec).flatten() + nvec = unitRowVector(-bvec) + azim = float( + np.degrees(np.arctan2(nvec[2], nvec[0])) + ) + pola = float(np.degrees(np.arccos(nvec[1]))) + return azim, pola + + +def migrate_instrument_config(instrument_config): + """utility function to generate old instrument config dictionary""" + cfg_list = [] + for detector_id in instrument_config['detectors']: + cfg_list.append( + dict( + detector=instrument_config['detectors'][detector_id], + oscillation_stage=instrument_config['oscillation_stage'], + ) + ) + return cfg_list + + +def angle_in_range(angle, ranges, ccw=True, units='degrees'): + """ + Return the index of the first wedge the angle is found in + + WARNING: always clockwise; assumes wedges are not overlapping + """ + tau = 360. + if units.lower() == 'radians': + tau = 2*np.pi + w = np.nan + for i, wedge in enumerate(ranges): + amin = wedge[0] + amax = wedge[1] + check = amin + np.mod(angle - amin, tau) + if check < amax: + w = i + break + return w + + +# ???: move to gridutil? +def centers_of_edge_vec(edges): + assert np.r_[edges].ndim == 1, "edges must be 1-d" + return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0) + + +def max_tth(instr): + """ + Return the maximum Bragg angle (in randians) subtended by the input + instrument + + Parameters + ---------- + instr : hexrd.instrument.HEDMInstrument instance + the instrument class to evalutate. + + Returns + ------- + tth_max : float + The maximum observable Bragg angle by the instrument in radians. + """ + tth_max = 0. + for det in instr.detectors.values(): + ptth, peta = det.pixel_angles() + tth_max = max(np.max(ptth), tth_max) + return tth_max + + +# ============================================================================= +# CLASSES +# ============================================================================= + + +class HEDMInstrument(object): + """ + * Distortion needs to be moved to a class with registry; tuple unworkable + * where should reference eta be defined? currently set to default config + """ + def __init__(self, instrument_config=None, + image_series=None, eta_vector=None, + instrument_name=None): + self._id = instrument_name_DFLT + + if eta_vector is None: + self._eta_vector = eta_vec_DFLT + else: + self._eta_vector = eta_vector + + if instrument_config is None: + if instrument_name is not None: + self._id = instrument_name + self._num_panels = 1 + self._beam_energy = beam_energy_DFLT + self._beam_vector = beam_vec_DFLT + + self._detectors = dict( + panel_id_DFLT=PlanarDetector( + rows=nrows_DFLT, cols=ncols_DFLT, + pixel_size=pixel_size_DFLT, + tvec=t_vec_d_DFLT, + tilt=tilt_params_DFLT, + bvec=self._beam_vector, + evec=self._eta_vector, + distortion=None), + ) + + self._tvec = t_vec_s_DFLT + self._chi = chi_DFLT + else: + if instrument_name is None: + if 'id' in instrument_config: + self._id = instrument_config['id'] + else: + self._id = instrument_name + self._num_panels = len(instrument_config['detectors']) + self._beam_energy = instrument_config['beam']['energy'] # keV + self._beam_vector = calc_beam_vec( + instrument_config['beam']['vector']['azimuth'], + instrument_config['beam']['vector']['polar_angle'], + ) + + # now build detector dict + detectors_config = instrument_config['detectors'] + det_dict = dict.fromkeys(detectors_config) + for det_id, det_info in detectors_config.items(): + pixel_info = det_info['pixels'] + saturation_level = det_info['saturation_level'] + affine_info = det_info['transform'] + + shape = (pixel_info['rows'], pixel_info['columns']) + + panel_buffer = None + if buffer_key in det_info: + det_buffer = det_info[buffer_key] + if det_buffer is not None: + if isinstance(det_buffer, str): + panel_buffer = np.load(det_buffer) + assert panel_buffer.shape == shape, \ + "buffer shape must match detector" + elif isinstance(det_buffer, list): + panel_buffer = np.asarray(det_buffer) + elif np.isscalar(det_buffer): + panel_buffer = det_buffer*np.ones(2) + else: + raise RuntimeError( + "panel buffer spec invalid for %s" % det_id + ) + + # FIXME: must promote this to a class w/ registry + distortion = None + if distortion_key in det_info: + distortion = det_info[distortion_key] + if det_info[distortion_key] is not None: + # !!! hard-coded GE distortion + distortion = [GE_41RT, distortion['parameters']] + + det_dict[det_id] = PlanarDetector( + name=det_id, + rows=pixel_info['rows'], + cols=pixel_info['columns'], + pixel_size=pixel_info['size'], + panel_buffer=panel_buffer, + saturation_level=saturation_level, + tvec=affine_info['translation'], + tilt=affine_info['tilt'], + bvec=self._beam_vector, + evec=self._eta_vector, + distortion=distortion) + + self._detectors = det_dict + + self._tvec = np.r_[ + instrument_config['oscillation_stage']['translation'] + ] + self._chi = instrument_config['oscillation_stage']['chi'] + + self._param_flags = np.hstack( + [instr_param_flags_DFLT, + np.tile(panel_param_flags_DFLT, self._num_panels)] + ) + return + + # properties for physical size of rectangular detector + @property + def id(self): + return self._id + + @property + def num_panels(self): + return self._num_panels + + @property + def detectors(self): + return self._detectors + + @property + def tvec(self): + return self._tvec + + @tvec.setter + def tvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3, 'input must have length = 3' + self._tvec = x + + @property + def chi(self): + return self._chi + + @chi.setter + def chi(self, x): + self._chi = float(x) + + @property + def beam_energy(self): + return self._beam_energy + + @beam_energy.setter + def beam_energy(self, x): + self._beam_energy = float(x) + + @property + def beam_wavelength(self): + return ct.keVToAngstrom(self.beam_energy) + + @property + def beam_vector(self): + return self._beam_vector + + @beam_vector.setter + def beam_vector(self, x): + x = np.array(x).flatten() + if len(x) == 3: + assert sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + self._beam_vector = x + elif len(x) == 2: + self._beam_vector = calc_beam_vec(*x) + else: + raise RuntimeError("input must be a unit vector or angle pair") + # ...maybe change dictionary item behavior for 3.x compatibility? + for detector_id in self.detectors: + panel = self.detectors[detector_id] + panel.bvec = self._beam_vector + + @property + def eta_vector(self): + return self._eta_vector + + @eta_vector.setter + def eta_vector(self, x): + x = np.array(x).flatten() + assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + self._eta_vector = x + # ...maybe change dictionary item behavior for 3.x compatibility? + for detector_id in self.detectors: + panel = self.detectors[detector_id] + panel.evec = self._eta_vector + + @property + def param_flags(self): + return self._param_flags + + @param_flags.setter + def param_flags(self, x): + x = np.array(x, dtype=bool).flatten() + assert len(x) == NP_INS + NP_DET*self.num_panels, \ + "length of parameter list must be %d; you gave %d" \ + % (len(self._param_flags), len(x)) + self._param_flags = x + + # ========================================================================= + # METHODS + # ========================================================================= + + def calibration_params(self): + """ + Yield the full list of adjustable parameters for + instument calibration. + + Parameters + ---------- + None + + Returns + ------- + retval : array + concatenated list of calibration parameters. + """ + azim, pola = calc_angles_from_beam_vec(self.beam_vector) + + plist = np.zeros(NP_INS + NP_DET*self.num_panels) + + plist[0] = self.beam_energy + plist[1] = azim + plist[2] = pola + plist[3] = self.chi + plist[4], plist[5], plist[6] = self.tvec + + ii = NP_INS + for panel in self.detectors.values(): + plist[ii:ii + NP_DET] = np.hstack([ + panel.tilt.flatten(), + panel.tvec.flatten(), + ]) + ii += NP_DET + + # FIXME: FML!!! + # this assumes old style distiortion = (func, params) + for panel in self.detectors.values(): + if panel.distortion is not None: + plist = np.concatenate( + [plist, panel.distortion[1]] + ) + + return plist + + def update_from_calibration_params(self, plist): + """ + """ + # check total length + min_len_plist = NP_INS + NP_DET*self.num_panels + for panel in self.detectors.values(): + if panel.distortion is not None: + min_len_plist += len(panel.distortion[1]) + if len(plist) < min_len_plist: + # ??? could have grains on here + raise RuntimeError("input plist is not the correct length") + + # updates + self.beam_energy = plist[0] + bvec = calc_beam_vec(plist[1], plist[2]) + self.beam_vector = bvec + self.chi = plist[3] + self.tvec = plist[4:7] + + ii = NP_INS + for panel in self.detectors.values(): + tilt_n_trans = plist[ii:ii + NP_DET] + panel.tilt = tilt_n_trans[:3] + panel.tvec = tilt_n_trans[3:] + ii += NP_DET + + # FIXME: FML!!! + # this assumes old style distiortion = (func, params) + for panel in self.detectors.values(): + if panel.distortion is not None: + ldp = len(panel.distortion[1]) + panel.distortion[1] = plist[ii:ii + ldp] + ii += ldp + + return + + def write_config(self, filename=None, calibration_dict={}): + """ WRITE OUT YAML FILE """ + # initialize output dictionary + + par_dict = {} + + par_dict['id'] = self.id + + azim, pola = calc_angles_from_beam_vec(self.beam_vector) + beam = dict( + energy=self.beam_energy, + vector=dict( + azimuth=azim, + polar_angle=pola, + ) + ) + par_dict['beam'] = beam + + if calibration_dict: + par_dict['calibration_crystal'] = calibration_dict + + ostage = dict( + chi=self.chi, + translation=self.tvec.tolist() + ) + par_dict['oscillation_stage'] = ostage + + det_dict = dict.fromkeys(self.detectors) + for det_name, panel in self.detectors.items(): + det_dict[det_name] = panel.config_dict()['detector'] + par_dict['detectors'] = det_dict + if filename is not None: + with open(filename, 'w') as f: + yaml.dump(par_dict, stream=f) + return par_dict + + def extract_polar_maps(self, plane_data, imgser_dict, + active_hkls=None, threshold=None, + tth_tol=None, eta_tol=0.25): + """ + Quick and dirty way to histogram angular patch data for make + pole figures suitable for fiber generation + + TODO: streamline projection code + TODO: normalization + + !!!: images must be non-negative! + """ + if tth_tol is not None: + plane_data.tThWidth = np.radians(tth_tol) + else: + tth_tol = np.degrees(plane_data.tThWidth) + + tth_ranges = plane_data.getTThRanges() + if active_hkls is not None: + assert hasattr(active_hkls, '__len__'), \ + "active_hkls must be an iterable with __len__" + tth_ranges = tth_ranges[active_hkls] + + # # need this for making eta ranges + # eta_tol_vec = 0.5*np.radians([-eta_tol, eta_tol]) + + ring_maps_panel = dict.fromkeys(self.detectors) + for i_d, det_key in enumerate(self.detectors): + print("working on detector '%s'..." % det_key) + + # grab panel + panel = self.detectors[det_key] + # native_area = panel.pixel_area # pixel ref area + + # make rings clipped to panel + # !!! eta_idx has the same length as plane_data.exclusions + # each entry are the integer indices into the bins + # !!! eta_edges is the list of eta bin EDGES + pow_angs, pow_xys, eta_idx, eta_edges = panel.make_powder_rings( + plane_data, + merge_hkls=False, delta_eta=eta_tol, + full_output=True) + delta_eta = eta_edges[1] - eta_edges[0] + + # pixel angular coords for the detector panel + ptth, peta = panel.pixel_angles() + + # grab omegas from imageseries and squawk if missing + try: + omegas = imgser_dict[det_key].metadata['omega'] + except(KeyError): + msg = "imageseries for '%s' has no omega info" % det_key + raise RuntimeError(msg) + + # initialize maps and assing by row (omega/frame) + nrows_ome = len(omegas) + ncols_eta = len(eta_edges) - 1 + + ring_maps = [] + for i_r, tthr in enumerate(tth_ranges): + print("working on ring %d..." % i_r) + + # init map with NaNs + this_map = np.nan*np.ones((nrows_ome, ncols_eta)) + + # mark pixels in the spec'd tth range + pixels_in_tthr = np.logical_and( + ptth >= tthr[0], ptth <= tthr[1] + ) + + # catch case where ring isn't on detector + if not np.any(pixels_in_tthr): + ring_maps.append(this_map) + continue + + # ???: faster to index with bool or use np.where, + # or recode in numba? + rtth_idx = np.where(pixels_in_tthr) + + # grab relevant eta coords using histogram + # !!!: This allows use to calculate arc length and + # detect a branch cut. The histogram idx var + # is the left-hand edges... + retas = peta[rtth_idx] + if fast_histogram: + reta_hist = histogram1d( + retas, + len(eta_edges) - 1, + (eta_edges[0], eta_edges[-1]) + ) + else: + reta_hist, _ = histogram1d(retas, bins=eta_edges) + reta_idx = np.where(reta_hist)[0] + reta_bin_idx = np.hstack( + [reta_idx, + reta_idx[-1] + 1] + ) + + # ring arc lenght on panel + arc_length = angularDifference( + eta_edges[reta_bin_idx[0]], + eta_edges[reta_bin_idx[-1]] + ) + + # Munge eta bins + # !!! need to work with the subset to preserve + # NaN values at panel extents! + # + # !!! MUST RE-MAP IF BRANCH CUT IS IN RANGE + # + # The logic below assumes that eta_edges span 2*pi to + # single precision + eta_bins = eta_edges[reta_bin_idx] + if arc_length < 1e-4: + # have branch cut in here + ring_gap = np.where( + reta_idx + - np.arange(len(reta_idx)) + )[0] + if len(ring_gap) > 0: + # have incomplete ring + eta_stop_idx = ring_gap[0] + eta_stop = eta_edges[eta_stop_idx] + new_period = np.cumsum([eta_stop, 2*np.pi]) + # remap + retas = mapAngle(retas, new_period) + tmp_bins = mapAngle(eta_edges[reta_idx], new_period) + tmp_idx = np.argsort(tmp_bins) + reta_idx = reta_idx[np.argsort(tmp_bins)] + eta_bins = np.hstack( + [tmp_bins[tmp_idx], + tmp_bins[tmp_idx][-1] + delta_eta] + ) + pass + pass + + # histogram intensities over eta ranges + for i_row, image in enumerate(imgser_dict[det_key]): + # handle threshold if specified + if threshold is not None: + # !!! NaNs get preserved + image = np.array(image) + image[image < threshold] = 0. + if fast_histogram: + this_map[i_row, reta_idx] = histogram1d( + retas, + len(eta_bins) - 1, + (eta_bins[0], eta_bins[-1]), + weights=image[rtth_idx] + ) + else: + this_map[i_row, reta_idx], _ = histogram1d( + retas, + bins=eta_bins, + weights=image[rtth_idx] + ) + pass # end loop on rows + ring_maps.append(this_map) + pass # end loop on rings + ring_maps_panel[det_key] = ring_maps + return ring_maps_panel, eta_edges + + def extract_line_positions(self, plane_data, imgser_dict, + tth_tol=None, eta_tol=1., npdiv=2, + collapse_eta=True, collapse_tth=False, + do_interpolation=True): + """ + export 'caked' sector data over an instrument + + FIXME: must handle merged ranges (fixed by JVB 2018/06/28) + """ + + if not hasattr(plane_data, '__len__'): + plane_data = plane_data.makeNew() # make local copy to munge + if tth_tol is not None: + plane_data.tThWidth = np.radians(tth_tol) + tth_ranges = np.degrees(plane_data.getMergedRanges()[1]) + tth_tols = np.vstack([i[1] - i[0] for i in tth_ranges]) + else: + tth_tols = np.ones(len(plane_data))*tth_tol + + # ===================================================================== + # LOOP OVER DETECTORS + # ===================================================================== + panel_data = dict.fromkeys(self.detectors) + for i_det, detector_id in enumerate(self.detectors): + print("working on detector '%s'..." % detector_id) + # pbar.update(i_det + 1) + # grab panel + panel = self.detectors[detector_id] + # !!! + instr_cfg = panel.config_dict( + self.chi, self.tvec, + self.beam_energy, self.beam_vector + ) + native_area = panel.pixel_area # pixel ref area + images = imgser_dict[detector_id] + if images.ndim == 2: + n_images = 1 + images = np.tile(images, (1, 1, 1)) + elif images.ndim == 3: + n_images = len(images) + else: + raise RuntimeError("images must be 2- or 3-d") + + # make rings + pow_angs, pow_xys = panel.make_powder_rings( + plane_data, merge_hkls=True, + delta_tth=tth_tol, delta_eta=eta_tol) + + # ================================================================= + # LOOP OVER RING SETS + # ================================================================= + ring_data = [] + for i_ring, these_data in enumerate(zip(pow_angs, pow_xys)): + print("interpolating 2theta bin %d..." % i_ring) + + # points are already checked to fall on detector + angs = these_data[0] + xys = these_data[1] + + # make the tth,eta patches for interpolation + patches = xrdutil.make_reflection_patches( + instr_cfg, angs, panel.angularPixelSize(xys), + tth_tol=tth_tols[i_ring], eta_tol=eta_tol, + npdiv=npdiv, quiet=True) + + # loop over patches + # FIXME: fix initialization + if collapse_tth: + patch_data = np.zeros((len(angs), n_images)) + else: + patch_data = [] + for i_p, patch in enumerate(patches): + # strip relevant objects out of current patch + vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch + + # need to reshape eval pts for interpolation + xy_eval = np.vstack([ + xys_eval[0].flatten(), + xys_eval[1].flatten()]).T + + _, on_panel = panel.clip_to_panel(xy_eval) + + if np.any(~on_panel): + continue + + if collapse_tth: + ang_data = (vtx_angs[0][0, [0, -1]], + vtx_angs[1][[0, -1], 0]) + else: + ang_data = (vtx_angs[0][0, :], + angs[i_p][-1]) + + prows, pcols = areas.shape + area_fac = areas/float(native_area) + + # interpolate + if not collapse_tth: + ims_data = [] + for j_p in np.arange(len(images)): + # catch interpolation type + image = images[j_p] + if do_interpolation: + tmp = panel.interpolate_bilinear( + xy_eval, + image, + ).reshape(prows, pcols)*area_fac + else: + tmp = image[ijs[0], ijs[1]]*area_fac + + # catch collapsing options + if collapse_tth: + patch_data[i_p, j_p] = np.sum(tmp) + # ims_data.append(np.sum(tmp)) + else: + if collapse_eta: + ims_data.append(np.sum(tmp, axis=0)) + else: + ims_data.append(tmp) + pass # close image loop + if not collapse_tth: + patch_data.append((ang_data, ims_data)) + pass # close patch loop + ring_data.append(patch_data) + pass # close ring loop + panel_data[detector_id] = ring_data + pass # close panel loop + # pbar.finish() + return panel_data + + def simulate_laue_pattern(self, crystal_data, + minEnergy=5., maxEnergy=35., + rmat_s=None, grain_params=None): + """ + Simulates Laue diffraction for a list of grains. + + Parameters + ---------- + crystal_data : TYPE + DESCRIPTION. + minEnergy : TYPE, optional + DESCRIPTION. The default is 5.. + maxEnergy : TYPE, optional + DESCRIPTION. The default is 35.. + rmat_s : TYPE, optional + DESCRIPTION. The default is None. + grain_params : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + results : dict + results dictionary for each detector containing + [xy_det, hkls_in, angles, dspacing, energy] each a list over each + grain. + + """ + results = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + results[det_key] = panel.simulate_laue_pattern( + crystal_data, + minEnergy=minEnergy, maxEnergy=maxEnergy, + rmat_s=rmat_s, tvec_s=self.tvec, + grain_params=grain_params, + beam_vec=self.beam_vector) + return results + + def simulate_rotation_series(self, plane_data, grain_param_list, + eta_ranges=[(-np.pi, np.pi), ], + ome_ranges=[(-np.pi, np.pi), ], + ome_period=(-np.pi, np.pi), + wavelength=None): + """ + TODO: revisit output; dict, or concatenated list? + """ + results = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + results[det_key] = panel.simulate_rotation_series( + plane_data, grain_param_list, + eta_ranges=eta_ranges, + ome_ranges=ome_ranges, + ome_period=ome_period, + chi=self.chi, tVec_s=self.tvec, + wavelength=wavelength) + return results + + def pull_spots(self, plane_data, grain_params, + imgser_dict, + tth_tol=0.25, eta_tol=1., ome_tol=1., + npdiv=2, threshold=10, + eta_ranges=[(-np.pi, np.pi), ], + ome_period=(-np.pi, np.pi), + dirname='results', filename=None, output_format='text', + save_spot_list=False, + quiet=True, check_only=False, + interp='nearest'): + """ + Exctract reflection info from a rotation series encoded as an + OmegaImageseries object + """ + + # grain parameters + rMat_c = makeRotMatOfExpMap(grain_params[:3]) + tVec_c = grain_params[3:6] + + # grab omega ranges from first imageseries + # + # WARNING: all imageseries AND all wedges within are assumed to have + # the same omega values; put in a check that they are all the same??? + oims0 = imgser_dict[imgser_dict.keys()[0]] + ome_ranges = [np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges] + + # delta omega in DEGREES grabbed from first imageseries in the dict + delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] + + # make omega grid for frame expansion around reference frame + # in DEGREES + ndiv_ome, ome_del = make_tolerance_grid( + delta_ome, ome_tol, 1, adjust_window=True, + ) + + # generate structuring element for connected component labeling + if ndiv_ome == 1: + label_struct = ndimage.generate_binary_structure(2, 2) + else: + label_struct = ndimage.generate_binary_structure(3, 3) + + # simulate rotation series + sim_results = self.simulate_rotation_series( + plane_data, [grain_params, ], + eta_ranges=eta_ranges, + ome_ranges=ome_ranges, + ome_period=ome_period) + + # patch vertex generator (global for instrument) + tol_vec = 0.5*np.radians( + [-tth_tol, -eta_tol, + -tth_tol, eta_tol, + tth_tol, eta_tol, + tth_tol, -eta_tol]) + + # prepare output if requested + if filename is not None and output_format.lower() == 'hdf5': + this_filename = os.path.join(dirname, filename) + writer = GrainDataWriter_h5( + os.path.join(dirname, filename), + self.write_config(), grain_params) + + # ===================================================================== + # LOOP OVER PANELS + # ===================================================================== + iRefl = 0 + compl = [] + output = dict.fromkeys(self.detectors) + for detector_id in self.detectors: + # initialize text-based output writer + if filename is not None and output_format.lower() == 'text': + output_dir = os.path.join( + dirname, detector_id + ) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + this_filename = os.path.join( + output_dir, filename + ) + writer = PatchDataWriter(this_filename) + + # grab panel + panel = self.detectors[detector_id] + # !!! + instr_cfg = panel.config_dict( + self.chi, self.tvec, + self.beam_energy, self.beam_vector + ) + native_area = panel.pixel_area # pixel ref area + + # pull out the OmegaImageSeries for this panel from input dict + ome_imgser = imgser_dict[detector_id] + + # extract simulation results + sim_results_p = sim_results[detector_id] + hkl_ids = sim_results_p[0][0] + hkls_p = sim_results_p[1][0] + ang_centers = sim_results_p[2][0] + xy_centers = sim_results_p[3][0] + ang_pixel_size = sim_results_p[4][0] + + # now verify that full patch falls on detector... + # ???: strictly necessary? + # + # patch vertex array from sim + nangs = len(ang_centers) + patch_vertices = ( + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4*nangs, 2) + ome_dupl = np.tile( + ang_centers[:, 2], (4, 1) + ).T.reshape(len(patch_vertices), 1) + + # find vertices that all fall on the panel + det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( + np.hstack([patch_vertices, ome_dupl]), + panel.rmat, rMat_c, self.chi, + panel.tvec, tVec_c, self.tvec, + panel.distortion) + _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) + + # all vertices must be on... + patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) + patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] + + # re-filter... + hkl_ids = hkl_ids[patch_is_on] + hkls_p = hkls_p[patch_is_on, :] + ang_centers = ang_centers[patch_is_on, :] + xy_centers = xy_centers[patch_is_on, :] + ang_pixel_size = ang_pixel_size[patch_is_on, :] + + # TODO: add polygon testing right here! + # done + if check_only: + patch_output = [] + for i_pt, angs in enumerate(ang_centers): + # the evaluation omegas; + # expand about the central value using tol vector + ome_eval = np.degrees(angs[2]) + ome_del + + # ...vectorize the omega_to_frame function to avoid loop? + frame_indices = [ + ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval + ] + if -1 in frame_indices: + if not quiet: + msg = """ + window for (%d%d%d) falls outside omega range + """ % tuple(hkls_p[i_pt, :]) + print(msg) + continue + else: + these_vertices = patch_xys[i_pt] + ijs = panel.cartToPixel(these_vertices) + ii, jj = polygon(ijs[:, 0], ijs[:, 1]) + contains_signal = False + for i_frame in frame_indices: + contains_signal = contains_signal or np.any( + ome_imgser[i_frame][ii, jj] > threshold + ) + compl.append(contains_signal) + patch_output.append((ii, jj, frame_indices)) + else: + # make the tth,eta patches for interpolation + patches = xrdutil.make_reflection_patches( + instr_cfg, + ang_centers[:, :2], ang_pixel_size, + omega=ang_centers[:, 2], + tth_tol=tth_tol, eta_tol=eta_tol, + rmat_c=rMat_c, tvec_c=tVec_c, + npdiv=npdiv, quiet=True) + + # GRAND LOOP over reflections for this panel + patch_output = [] + for i_pt, patch in enumerate(patches): + + # strip relevant objects out of current patch + vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch + + prows, pcols = areas.shape + nrm_fac = areas/float(native_area) + nrm_fac = nrm_fac / np.min(nrm_fac) + + # grab hkl info + hkl = hkls_p[i_pt, :] + hkl_id = hkl_ids[i_pt] + + # edge arrays + tth_edges = vtx_angs[0][0, :] + delta_tth = tth_edges[1] - tth_edges[0] + eta_edges = vtx_angs[1][:, 0] + delta_eta = eta_edges[1] - eta_edges[0] + + # need to reshape eval pts for interpolation + xy_eval = np.vstack([xy_eval[0].flatten(), + xy_eval[1].flatten()]).T + + # the evaluation omegas; + # expand about the central value using tol vector + ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del + + # ???: vectorize the omega_to_frame function to avoid loop? + frame_indices = [ + ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval + ] + + if -1 in frame_indices: + if not quiet: + msg = """ + window for (%d%d%d) falls outside omega range + """ % tuple(hkl) + print(msg) + continue + else: + # initialize spot data parameters + # !!! maybe change these to nan to not fuck up writer + peak_id = -999 + sum_int = None + max_int = None + meas_angs = None + meas_xy = None + + # quick check for intensity + contains_signal = False + patch_data_raw = [] + for i_frame in frame_indices: + tmp = ome_imgser[i_frame][ijs[0], ijs[1]] + contains_signal = contains_signal or np.any( + tmp > threshold + ) + patch_data_raw.append(tmp) + pass + patch_data_raw = np.stack(patch_data_raw, axis=0) + compl.append(contains_signal) + + if contains_signal: + # initialize patch data array for intensities + if interp.lower() == 'bilinear': + patch_data = np.zeros( + (len(frame_indices), prows, pcols)) + for i, i_frame in enumerate(frame_indices): + patch_data[i] = \ + panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False + ).reshape(prows, pcols) # * nrm_fac + elif interp.lower() == 'nearest': + patch_data = patch_data_raw # * nrm_fac + else: + msg = "interpolation option " + \ + "'%s' not understood" + raise(RuntimeError, msg % interp) + + # now have interpolated patch data... + labels, num_peaks = ndimage.label( + patch_data > threshold, structure=label_struct + ) + slabels = np.arange(1, num_peaks + 1) + + if num_peaks > 0: + peak_id = iRefl + coms = np.array( + ndimage.center_of_mass( + patch_data, + labels=labels, + index=slabels + ) + ) + if num_peaks > 1: + center = np.r_[patch_data.shape]*0.5 + center_t = np.tile(center, (num_peaks, 1)) + com_diff = coms - center_t + closest_peak_idx = np.argmin( + np.sum(com_diff**2, axis=1) + ) + else: + closest_peak_idx = 0 + pass # end multipeak conditional + coms = coms[closest_peak_idx] + # meas_omes = \ + # ome_edges[0] + (0.5 + coms[0])*delta_ome + meas_omes = \ + ome_eval[0] + coms[0]*delta_ome + meas_angs = np.hstack( + [tth_edges[0] + (0.5 + coms[2])*delta_tth, + eta_edges[0] + (0.5 + coms[1])*delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ) + ] + ) + + # intensities + # - summed is 'integrated' over interpolated + # data + # - max is max of raw input data + sum_int = np.sum( + patch_data[ + labels == slabels[closest_peak_idx] + ] + ) + max_int = np.max( + patch_data_raw[ + labels == slabels[closest_peak_idx] + ] + ) + # ???: Should this only use labeled pixels? + # Those are segmented from interpolated data, + # not raw; likely ok in most cases. + + # need MEASURED xy coords + gvec_c = anglesToGVec( + meas_angs, + chi=self.chi, + rMat_c=rMat_c, + bHat_l=self.beam_vector) + rMat_s = makeOscillRotMat( + [self.chi, meas_angs[2]] + ) + meas_xy = gvecToDetectorXY( + gvec_c, + panel.rmat, rMat_s, rMat_c, + panel.tvec, self.tvec, tVec_c, + beamVec=self.beam_vector) + if panel.distortion is not None: + # FIXME: distortion handling + meas_xy = panel.distortion[0]( + np.atleast_2d(meas_xy), + panel.distortion[1], + invert=True).flatten() + pass + # FIXME: why is this suddenly necessary??? + meas_xy = meas_xy.squeeze() + pass # end num_peaks > 0 + else: + patch_data = patch_data_raw + pass # end contains_signal + # write output + if filename is not None: + if output_format.lower() == 'text': + writer.dump_patch( + peak_id, hkl_id, hkl, sum_int, max_int, + ang_centers[i_pt], meas_angs, + xy_centers[i_pt], meas_xy) + elif output_format.lower() == 'hdf5': + xyc_arr = xy_eval.reshape( + prows, pcols, 2 + ).transpose(2, 0, 1) + writer.dump_patch( + detector_id, iRefl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, np.radians(ome_eval), + xyc_arr, ijs, frame_indices, patch_data, + ang_centers[i_pt], xy_centers[i_pt], + meas_angs, meas_xy) + pass # end conditional on write output + pass # end conditional on check only + patch_output.append([ + peak_id, hkl_id, hkl, sum_int, max_int, + ang_centers[i_pt], meas_angs, meas_xy, + ]) + iRefl += 1 + pass # end patch conditional + pass # end patch loop + output[detector_id] = patch_output + if filename is not None and output_format.lower() == 'text': + writer.close() + pass # end detector loop + if filename is not None and output_format.lower() == 'hdf5': + writer.close() + return compl, output + + """def fit_grain(self, grain_params, data_dir='results'):""" + + pass # end class: HEDMInstrument + + +class PlanarDetector(object): + """ + base class for 2D planar, rectangular row-column detector + """ + + __pixelPitchUnit = 'mm' + + def __init__(self, + rows=2048, cols=2048, + pixel_size=(0.2, 0.2), + tvec=np.r_[0., 0., -1000.], + tilt=ct.zeros_3, + name='default', + bvec=ct.beam_vec, + evec=ct.eta_vec, + saturation_level=None, + panel_buffer=None, + roi=None, + distortion=None): + """ + Instantiate PlanarDetector object. + + Parameters + ---------- + rows : TYPE, optional + DESCRIPTION. The default is 2048. + cols : TYPE, optional + DESCRIPTION. The default is 2048. + pixel_size : TYPE, optional + DESCRIPTION. The default is (0.2, 0.2). + tvec : TYPE, optional + DESCRIPTION. The default is np.r_[0., 0., -1000.]. + tilt : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + name : TYPE, optional + DESCRIPTION. The default is 'default'. + bvec : TYPE, optional + DESCRIPTION. The default is ct.beam_vec. + evec : TYPE, optional + DESCRIPTION. The default is ct.eta_vec. + saturation_level : TYPE, optional + DESCRIPTION. The default is None. + panel_buffer : array_like, optional + If panel_buffer has size 2, it is interpreted as a buffer in mm + in the row and column dimensions, respectively. If it is a 2-d + array with shape (rows, cols), then it is interpreted as a boolean + mask where valid pixels are marked with True. Refer to + self.clip_to_panel for usage. The default is None. + roi : TYPE, optional + DESCRIPTION. The default is None. + distortion : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + None. + + """ + + self._name = name + + self._rows = rows + self._cols = cols + + self._pixel_size_row = pixel_size[0] + self._pixel_size_col = pixel_size[1] + + self._saturation_level = saturation_level + + self._panel_buffer = panel_buffer + + self._roi = roi + + self._tvec = np.array(tvec).flatten() + self._tilt = np.array(tilt).flatten() + + self._bvec = np.array(bvec).flatten() + self._evec = np.array(evec).flatten() + + self._distortion = distortion + + return + + # detector ID + @property + def name(self): + return self._name + + @name.setter + def name(self, s): + assert isinstance(s, (str, unicode)), "requires string input" + self._name = s + + # properties for physical size of rectangular detector + @property + def rows(self): + return self._rows + + @rows.setter + def rows(self, x): + assert isinstance(x, int) + self._rows = x + + @property + def cols(self): + return self._cols + + @cols.setter + def cols(self, x): + assert isinstance(x, int) + self._cols = x + + @property + def pixel_size_row(self): + return self._pixel_size_row + + @pixel_size_row.setter + def pixel_size_row(self, x): + self._pixel_size_row = float(x) + + @property + def pixel_size_col(self): + return self._pixel_size_col + + @pixel_size_col.setter + def pixel_size_col(self, x): + self._pixel_size_col = float(x) + + @property + def pixel_area(self): + return self.pixel_size_row * self.pixel_size_col + + @property + def saturation_level(self): + return self._saturation_level + + @saturation_level.setter + def saturation_level(self, x): + if x is not None: + assert np.isreal(x) + self._saturation_level = x + + @property + def panel_buffer(self): + return self._panel_buffer + + @panel_buffer.setter + def panel_buffer(self, x): + """if not None, a buffer in mm (x, y)""" + if x is not None: + assert len(x) == 2 or x.ndim == 2 + self._panel_buffer = x + + @property + def roi(self): + return self._roi + + @roi.setter + def roi(self, vertex_array): + """ + vertex array must be + + [[r0, c0], [r1, c1], ..., [rn, cn]] + + and have len >= 3 + + does NOT need to repeat start vertex for closure + """ + if vertex_array is not None: + assert len(vertex_array) >= 3 + self._roi = vertex_array + + @property + def row_dim(self): + return self.rows * self.pixel_size_row + + @property + def col_dim(self): + return self.cols * self.pixel_size_col + + @property + def row_pixel_vec(self): + return self.pixel_size_row*(0.5*(self.rows-1)-np.arange(self.rows)) + + @property + def row_edge_vec(self): + return self.pixel_size_row*(0.5*self.rows-np.arange(self.rows+1)) + + @property + def col_pixel_vec(self): + return self.pixel_size_col*(np.arange(self.cols)-0.5*(self.cols-1)) + + @property + def col_edge_vec(self): + return self.pixel_size_col*(np.arange(self.cols+1)-0.5*self.cols) + + @property + def corner_ul(self): + return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] + + @property + def corner_ll(self): + return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim] + + @property + def corner_lr(self): + return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim] + + @property + def corner_ur(self): + return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] + + @property + def tvec(self): + return self._tvec + + @tvec.setter + def tvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3, 'input must have length = 3' + self._tvec = x + + @property + def tilt(self): + return self._tilt + + @tilt.setter + def tilt(self, x): + assert len(x) == 3, 'input must have length = 3' + self._tilt = np.array(x).squeeze() + + @property + def bvec(self): + return self._bvec + + @bvec.setter + def bvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + self._bvec = x + + @property + def evec(self): + return self._evec + + @evec.setter + def evec(self, x): + x = np.array(x).flatten() + assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + self._evec = x + + @property + def distortion(self): + return self._distortion + + @distortion.setter + def distortion(self, x): + """ + Probably should make distortion a class... + ***FIX THIS*** + """ + assert len(x) == 2 and hasattr(x[0], '__call__'), \ + 'distortion must be a tuple: (, params)' + self._distortion = x + + @property + def rmat(self): + return makeRotMatOfExpMap(self.tilt) + + @property + def normal(self): + return self.rmat[:, 2] + + @property + def beam_position(self): + """ + returns the coordinates of the beam in the cartesian detector + frame {Xd, Yd, Zd}. NaNs if no intersection. + """ + output = np.nan * np.ones(2) + b_dot_n = np.dot(self.bvec, self.normal) + if np.logical_and( + abs(b_dot_n) > ct.sqrt_epsf, + np.sign(b_dot_n) == -1 + ): + u = np.dot(self.normal, self.tvec) / b_dot_n + p2_l = u*self.bvec + p2_d = np.dot(self.rmat.T, p2_l - self.tvec) + output = p2_d[:2] + return output + + # ...memoize??? + @property + def pixel_coords(self): + pix_i, pix_j = np.meshgrid( + self.row_pixel_vec, self.col_pixel_vec, + indexing='ij') + return pix_i, pix_j + + """ + ##################### METHODS + """ + + def config_dict(self, chi=0, tvec=ct.zeros_3, + beam_energy=beam_energy_DFLT, beam_vector=ct.beam_vec, + sat_level=None, panel_buffer=None): + """ + Return a dictionary of detector parameters, with optional instrument + level parameters. This is a convenience function to work with the + APIs in several functions in xrdutil. + + Parameters + ---------- + chi : float, optional + DESCRIPTION. The default is 0. + tvec : array_like (3,), optional + DESCRIPTION. The default is ct.zeros_3. + beam_energy : float, optional + DESCRIPTION. The default is beam_energy_DFLT. + beam_vector : aray_like (3,), optional + DESCRIPTION. The default is ct.beam_vec. + sat_level : scalar, optional + DESCRIPTION. The default is None. + panel_buffer : scalar, array_like (2,), optional + DESCRIPTION. The default is None. + + Returns + ------- + config_dict : dict + DESCRIPTION. + + """ + config_dict = {} + + # ===================================================================== + # DETECTOR PARAMETERS + # ===================================================================== + if sat_level is None: + sat_level = self.saturation_level + + if panel_buffer is None: + # FIXME: won't work right if it is an array + panel_buffer = self.panel_buffer + if isinstance(panel_buffer, np.ndarray): + panel_buffer = panel_buffer.flatten().tolist() + + det_dict = dict( + transform=dict( + tilt=self.tilt.tolist(), + translation=self.tvec.tolist(), + ), + pixels=dict( + rows=self.rows, + columns=self.cols, + size=[self.pixel_size_row, self.pixel_size_col], + ) + ) + + # saturation level + det_dict['saturation_level'] = sat_level + + # panel buffer + # FIXME if it is an array, the write will be a mess + det_dict['panel_buffer'] = panel_buffer + + if self.distortion is not None: + """...HARD CODED DISTORTION! FIX THIS!!!""" + dist_d = dict( + function_name='GE_41RT', + parameters=np.r_[self.distortion[1]].tolist() + ) + det_dict['distortion'] = dist_d + + # ===================================================================== + # SAMPLE STAGE PARAMETERS + # ===================================================================== + stage_dict = dict( + chi=chi, + translation=tvec.tolist() + ) + + # ===================================================================== + # BEAM PARAMETERS + # ===================================================================== + beam_dict = dict( + energy=beam_energy, + vector=beam_vector + ) + + config_dict['detector'] = det_dict + config_dict['oscillation_stage'] = stage_dict + config_dict['beam'] = beam_dict + + return config_dict + + def pixel_angles(self, origin=ct.zeros_3): + assert len(origin) == 3, "origin must have 3 elemnts" + pix_i, pix_j = self.pixel_coords + xy = np.ascontiguousarray( + np.vstack([ + pix_j.flatten(), pix_i.flatten() + ]).T + ) + angs, g_vec = detectorXYToGvec( + xy, self.rmat, ct.identity_3x3, + self.tvec, ct.zeros_3, origin, + beamVec=self.bvec, etaVec=self.evec) + del(g_vec) + tth = angs[0].reshape(self.rows, self.cols) + eta = angs[1].reshape(self.rows, self.cols) + return tth, eta + + def cartToPixel(self, xy_det, pixels=False): + """ + Convert vstacked array or list of [x,y] points in the center-based + cartesian frame {Xd, Yd, Zd} to (i, j) edge-based indices + + i is the row index, measured from the upper-left corner + j is the col index, measured from the upper-left corner + + if pixels=True, then (i,j) are integer pixel indices. + else (i,j) are continuous coords + """ + xy_det = np.atleast_2d(xy_det) + + npts = len(xy_det) + + tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1)) + i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5 + j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5 + + ij_det = np.vstack([i_pix, j_pix]).T + if pixels: + ij_det = np.array(np.round(ij_det), dtype=int) + return ij_det + + def pixelToCart(self, ij_det): + """ + Convert vstacked array or list of [i,j] pixel indices + (or UL corner-based points) and convert to (x,y) in the + cartesian frame {Xd, Yd, Zd} + """ + ij_det = np.atleast_2d(ij_det) + + x = (ij_det[:, 1] + 0.5)*self.pixel_size_col\ + + self.corner_ll[0] + y = (self.rows - ij_det[:, 0] - 0.5)*self.pixel_size_row\ + + self.corner_ll[1] + return np.vstack([x, y]).T + + def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): + """ + Wraps xrdutil.angularPixelSize + """ + # munge kwargs + if rMat_s is None: + rMat_s = ct.identity_3x3 + if tVec_s is None: + tVec_s = ct.zeros_3x1 + if tVec_c is None: + tVec_c = ct.zeros_3x1 + + # call function + ang_ps = xrdutil.angularPixelSize( + xy, (self.pixel_size_row, self.pixel_size_col), + self.rmat, rMat_s, + self.tvec, tVec_s, tVec_c, + distortion=self.distortion, + beamVec=self.bvec, etaVec=self.evec) + return ang_ps + + def clip_to_panel(self, xy, buffer_edges=True): + """ + if self.roi is not None, uses it by default + + TODO: check if need shape kwarg + TODO: optimize ROI search better than list comprehension below + TODO: panel_buffer can be a 2-d boolean mask, but needs testing + + """ + xy = np.atleast_2d(xy) + + if self.roi is not None: + ij_crds = self.cartToPixel(xy, pixels=True) + ii, jj = polygon(self.roi[:, 0], self.roi[:, 1], + shape=(self.rows, self.cols)) + on_panel_rows = [i in ii for i in ij_crds[:, 0]] + on_panel_cols = [j in jj for j in ij_crds[:, 1]] + on_panel = np.logical_and(on_panel_rows, on_panel_cols) + else: + xlim = 0.5*self.col_dim + ylim = 0.5*self.row_dim + if buffer_edges and self.panel_buffer is not None: + # ok, panel_buffer should be array-like + if self.panel_buffer.ndim == 2: + pix = self.cartToPixel(xy, pixels=True) + + roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows) + coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols) + + idx = np.logical_or(roff, coff) + + pix[idx, :] = 0 + + on_panel = self.panel_buffer[pix[:, 0], pix[:, 1]] + on_panel[idx] = False + else: + xlim -= self.panel_buffer[0] + ylim -= self.panel_buffer[1] + on_panel_x = np.logical_and( + xy[:, 0] >= -xlim, xy[:, 0] <= xlim + ) + on_panel_y = np.logical_and( + xy[:, 1] >= -ylim, xy[:, 1] <= ylim + ) + on_panel = np.logical_and(on_panel_x, on_panel_y) + elif not buffer_edges or self.panel_buffer is None: + on_panel_x = np.logical_and( + xy[:, 0] >= -xlim, xy[:, 0] <= xlim + ) + on_panel_y = np.logical_and( + xy[:, 1] >= -ylim, xy[:, 1] <= ylim + ) + on_panel = np.logical_and(on_panel_x, on_panel_y) + return xy[on_panel, :], on_panel + + def cart_to_angles(self, xy_data): + """ + TODO: distortion + """ + rmat_s = ct.identity_3x3 + tvec_s = ct.zeros_3 + tvec_c = ct.zeros_3 + angs, g_vec = detectorXYToGvec( + xy_data, self.rmat, rmat_s, + self.tvec, tvec_s, tvec_c, + beamVec=self.bvec, etaVec=self.evec) + tth_eta = np.vstack([angs[0], angs[1]]).T + return tth_eta, g_vec + + def angles_to_cart(self, tth_eta): + """ + TODO: distortion + """ + rmat_s = rmat_c = ct.identity_3x3 + tvec_s = tvec_c = ct.zeros_3 + + angs = np.hstack([tth_eta, np.zeros((len(tth_eta), 1))]) + + xy_det = gvecToDetectorXY( + anglesToGVec(angs, bHat_l=self.bvec, eHat_l=self.evec), + self.rmat, rmat_s, rmat_c, + self.tvec, tvec_s, tvec_c, + beamVec=self.bvec) + return xy_det + + def interpolate_nearest(self, xy, img, pad_with_nans=True): + """ + TODO: revisit normalization in here? + + """ + is_2d = img.ndim == 2 + right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols + assert is_2d and right_shape,\ + "input image must be 2-d with shape (%d, %d)"\ + % (self.rows, self.cols) + + # initialize output with nans + if pad_with_nans: + int_xy = np.nan*np.ones(len(xy)) + else: + int_xy = np.zeros(len(xy)) + + # clip away points too close to or off the edges of the detector + xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) + + # get pixel indices of clipped points + i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) + j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) + + # next interpolate across cols + int_vals = img[i_src, j_src] + int_xy[on_panel] = int_vals + return int_xy + + def interpolate_bilinear(self, xy, img, pad_with_nans=True): + """ + Interpolates an image array at the specified cartesian points. + + !!! the `xy` input is in *unwarped* detector coords! + + TODO: revisit normalization in here? + """ + is_2d = img.ndim == 2 + right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols + assert is_2d and right_shape,\ + "input image must be 2-d with shape (%d, %d)"\ + % (self.rows, self.cols) + + # initialize output with nans + if pad_with_nans: + int_xy = np.nan*np.ones(len(xy)) + else: + int_xy = np.zeros(len(xy)) + + # clip away points too close to or off the edges of the detector + xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) + + # grab fractional pixel indices of clipped points + ij_frac = self.cartToPixel(xy_clip) + + # get floors/ceils from array of pixel _centers_ + # and fix indices running off the pixel centers + # !!! notice we already clipped points to the panel! + i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) + i_floor_img = _fix_indices(i_floor, 0, self.rows - 1) + + j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) + j_floor_img = _fix_indices(j_floor, 0, self.cols - 1) + + # ceilings from floors + i_ceil = i_floor + 1 + i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1) + + j_ceil = j_floor + 1 + j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) + + # first interpolate at top/bottom rows + row_floor_int = \ + (j_ceil - ij_frac[:, 1])*img[i_floor_img, j_floor_img] \ + + (ij_frac[:, 1] - j_floor)*img[i_floor_img, j_ceil_img] + row_ceil_int = \ + (j_ceil - ij_frac[:, 1])*img[i_ceil_img, j_floor_img] \ + + (ij_frac[:, 1] - j_floor)*img[i_ceil_img, j_ceil_img] + + # next interpolate across cols + int_vals = \ + (i_ceil - ij_frac[:, 0])*row_floor_int \ + + (ij_frac[:, 0] - i_floor)*row_ceil_int + int_xy[on_panel] = int_vals + return int_xy + + def make_powder_rings( + self, pd, merge_hkls=False, delta_tth=None, + delta_eta=10., eta_period=None, + rmat_s=ct.identity_3x3, tvec_s=ct.zeros_3, + tvec_c=ct.zeros_3, full_output=False): + """ + !!! it is assuming that rmat_s is built from (chi, ome) + !!! as it the case for HEDM + """ + # in case you want to give it tth angles directly + if hasattr(pd, '__len__'): + tth = np.array(pd).flatten() + if delta_tth is None: + raise RuntimeError( + "If supplying a 2theta list as first arg, " + + "must supply a delta_tth") + sector_vertices = np.tile( + 0.5*np.radians([-delta_tth, -delta_eta, + -delta_tth, delta_eta, + delta_tth, delta_eta, + delta_tth, -delta_eta, + 0.0, 0.0]), (len(tth), 1) + ) + else: + # Okay, we have a PlaneData object + try: + pd = PlaneData.makeNew(pd) # make a copy to munge + except(TypeError): + # !!! have some other object here, likely a dummy plane data + # object of some sort... + pass + + if delta_tth is not None: + pd.tThWidth = np.radians(delta_tth) + else: + delta_tth = np.degrees(pd.tThWidth) + + # conversions, meh... + del_eta = np.radians(delta_eta) + + # do merging if asked + if merge_hkls: + _, tth_ranges = pd.getMergedRanges() + tth = np.array([0.5*sum(i) for i in tth_ranges]) + else: + tth_ranges = pd.getTThRanges() + tth = pd.getTTh() + tth_pm = tth_ranges - np.tile(tth, (2, 1)).T + sector_vertices = np.vstack( + [[i[0], -del_eta, + i[0], del_eta, + i[1], del_eta, + i[1], -del_eta, + 0.0, 0.0] + for i in tth_pm]) + + # for generating rings, make eta vector in correct period + if eta_period is None: + eta_period = (-np.pi, np.pi) + neta = int(360./float(delta_eta)) + + # this is the vector of ETA EDGES + eta_edges = mapAngle( + np.radians( + delta_eta*np.linspace(0., neta, num=neta + 1) + ) + eta_period[0], + eta_period + ) + + # get eta bin centers from edges + """ + # !!! this way is probably overkill, since we have delta eta + eta_centers = np.average( + np.vstack([eta[:-1], eta[1:]), + axis=0) + """ + # !!! should be safe as eta_edges are monotonic + eta_centers = eta_edges[:-1] + 0.5*del_eta + + # !!! get chi and ome from rmat_s + # chi = np.arctan2(rmat_s[2, 1], rmat_s[1, 1]) + ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) + + # make list of angle tuples + angs = [ + np.vstack( + [i*np.ones(neta), eta_centers, ome*np.ones(neta)] + ) for i in tth + ] + + # need xy coords and pixel sizes + valid_ang = [] + valid_xy = [] + map_indices = [] + npp = 5 # [ll, ul, ur, lr, center] + for i_ring in range(len(angs)): + # expand angles to patch vertices + these_angs = angs[i_ring].T + patch_vertices = ( + np.tile(these_angs[:, :2], (1, npp)) + + np.tile(sector_vertices[i_ring], (neta, 1)) + ).reshape(npp*neta, 2) + + # duplicate ome array + ome_dupl = np.tile( + these_angs[:, 2], (npp, 1) + ).T.reshape(npp*neta, 1) + + # find vertices that all fall on the panel + gVec_ring_l = anglesToGVec( + np.hstack([patch_vertices, ome_dupl]), + bHat_l=self.bvec) + all_xy = gvecToDetectorXY( + gVec_ring_l, + self.rmat, rmat_s, ct.identity_3x3, + self.tvec, tvec_s, tvec_c, + beamVec=self.bvec) + _, on_panel = self.clip_to_panel(all_xy) + + # all vertices must be on... + patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) + patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] + + # the surving indices + idx = np.where(patch_is_on)[0] + + # form output arrays + valid_ang.append(these_angs[patch_is_on, :2]) + valid_xy.append(patch_xys[:, -1, :].squeeze()) + map_indices.append(idx) + pass + # ??? is this option necessary? + if full_output: + return valid_ang, valid_xy, map_indices, eta_edges + else: + return valid_ang, valid_xy + + def map_to_plane(self, pts, rmat, tvec): + """ + map detctor points to specified plane + + by convention + + n * (u*pts_l - tvec) = 0 + + [pts]_l = rmat*[pts]_m + tvec + """ + # arg munging + pts = np.atleast_2d(pts) + npts = len(pts) + + # map plane normal & translation vector, LAB FRAME + nvec_map_lab = rmat[:, 2].reshape(3, 1) + tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1) + tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1) + + # put pts as 3-d in panel CS and transform to 3-d lab coords + pts_det = np.hstack([pts, np.zeros((npts, 1))]) + pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab + + # scaling along pts vectors to hit map plane + u = np.dot(nvec_map_lab.T, tvec_map_lab) \ + / np.dot(nvec_map_lab.T, pts_lab) + + # pts on map plane, in LAB FRAME + pts_map_lab = np.tile(u, (3, 1)) * pts_lab + + return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T + + def simulate_rotation_series(self, plane_data, grain_param_list, + eta_ranges=[(-np.pi, np.pi), ], + ome_ranges=[(-np.pi, np.pi), ], + ome_period=(-np.pi, np.pi), + chi=0., tVec_s=ct.zeros_3, + wavelength=None): + """ + """ + + # grab B-matrix from plane data + bMat = plane_data.latVecOps['B'] + + # reconcile wavelength + # * added sanity check on exclusions here; possible to + # * make some reflections invalid (NaN) + if wavelength is None: + wavelength = plane_data.wavelength + else: + if plane_data.wavelength != wavelength: + plane_data.wavelength = ct.keVToAngstrom(wavelength) + assert not np.any(np.isnan(plane_data.getTTh())),\ + "plane data exclusions incompatible with wavelength" + + # vstacked G-vector id, h, k, l + full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) + + """ LOOP OVER GRAINS """ + valid_ids = [] + valid_hkls = [] + valid_angs = [] + valid_xys = [] + ang_pixel_size = [] + for gparm in grain_param_list: + + # make useful parameters + rMat_c = makeRotMatOfExpMap(gparm[:3]) + tVec_c = gparm[3:6] + vInv_s = gparm[6:] + + # All possible bragg conditions as vstacked [tth, eta, ome] + # for each omega solution + angList = np.vstack( + oscillAnglesOfHKLs( + full_hkls[:, 1:], chi, + rMat_c, bMat, wavelength, + vInv=vInv_s, + ) + ) + + # filter by eta and omega ranges + # ??? get eta range from detector? + allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( + full_hkls, angList, eta_ranges, ome_ranges + ) + allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) + + # find points that fall on the panel + det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( + allAngs, + self.rmat, rMat_c, chi, + self.tvec, tVec_c, tVec_s, + self.distortion) + xys_p, on_panel = self.clip_to_panel(det_xy) + valid_xys.append(xys_p) + + # filter angs and hkls that are on the detector plane + # !!! check this -- seems unnecessary but the results of + # _project_on_detector_plane() can have len < the input. + # the output of _project_on_detector_plane has been modified to + # hand back the index array to remedy this JVB 2020-05-27 + filtered_angs = np.atleast_2d(allAngs[on_plane, :]) + filtered_hkls = np.atleast_2d(allHKLs[on_plane, :]) + + # grab hkls and gvec ids for this panel + valid_hkls.append(filtered_hkls[on_panel, 1:]) + valid_ids.append(filtered_hkls[on_panel, 0]) + + # reflection angles (voxel centers) and pixel size in (tth, eta) + valid_angs.append(filtered_angs[on_panel, :]) + ang_pixel_size.append(self.angularPixelSize(xys_p)) + return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size + + def simulate_laue_pattern(self, crystal_data, + minEnergy=5., maxEnergy=35., + rmat_s=None, tvec_s=None, + grain_params=None, + beam_vec=None): + """ + """ + if isinstance(crystal_data, PlaneData): + + plane_data = crystal_data + + # grab the expanded list of hkls from plane_data + hkls = np.hstack(plane_data.getSymHKLs()) + + # and the unit plane normals (G-vectors) in CRYSTAL FRAME + gvec_c = np.dot(plane_data.latVecOps['B'], hkls) + elif len(crystal_data) == 2: + # !!! should clean this up + hkls = np.array(crystal_data[0]) + bmat = crystal_data[1] + gvec_c = np.dot(bmat, hkls) + else: + raise(RuntimeError, 'argument list not understood') + nhkls_tot = hkls.shape[1] + + # parse energy ranges + # TODO: allow for spectrum parsing + multipleEnergyRanges = False + if hasattr(maxEnergy, '__len__'): + assert len(maxEnergy) == len(minEnergy), \ + 'energy cutoff ranges must have the same length' + multipleEnergyRanges = True + lmin = [] + lmax = [] + for i in range(len(maxEnergy)): + lmin.append(ct.keVToAngstrom(maxEnergy[i])) + lmax.append(ct.keVToAngstrom(minEnergy[i])) + else: + lmin = ct.keVToAngstrom(maxEnergy) + lmax = ct.keVToAngstrom(minEnergy) + + # parse grain parameters kwarg + if grain_params is None: + grain_params = np.atleast_2d( + np.hstack([np.zeros(6), ct.identity_6x1]) + ) + n_grains = len(grain_params) + + # sample rotation + if rmat_s is None: + rmat_s = ct.identity_3x3 + + # dummy translation vector... make input + if tvec_s is None: + tvec_s = ct.zeros_3 + + # beam vector + if beam_vec is None: + beam_vec = ct.beam_vec + + # ========================================================================= + # LOOP OVER GRAINS + # ========================================================================= + + # pre-allocate output arrays + xy_det = np.nan*np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan*np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan*np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan*np.ones((n_grains, nhkls_tot)) + energy = np.nan*np.ones((n_grains, nhkls_tot)) + for iG, gp in enumerate(grain_params): + rmat_c = makeRotMatOfExpMap(gp[:3]) + tvec_c = gp[3:6].reshape(3, 1) + vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) + + # stretch them: V^(-1) * R * Gc + gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) + ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) + + # project + dpts = gvecToDetectorXY(ghat_c_str.T, + self.rmat, rmat_s, rmat_c, + self.tvec, tvec_s, tvec_c, + beamVec=beam_vec) + + # check intersections with detector plane + canIntersect = ~np.isnan(dpts[:, 0]) + npts_in = sum(canIntersect) + + if np.any(canIntersect): + dpts = dpts[canIntersect, :].reshape(npts_in, 2) + dhkl = hkls[:, canIntersect].reshape(3, npts_in) + + # back to angles + tth_eta, gvec_l = detectorXYToGvec( + dpts, + self.rmat, rmat_s, + self.tvec, tvec_s, tvec_c, + beamVec=beam_vec) + tth_eta = np.vstack(tth_eta).T + + # warp measured points + if self.distortion is not None: + if len(self.distortion) == 2: + dpts = self.distortion[0]( + dpts, self.distortion[1], + invert=True) + else: + raise(RuntimeError, + "something is wrong with the distortion") + + # plane spacings and energies + dsp = 1. / rowNorm(gvec_s_str[:, canIntersect].T) + wlen = 2*dsp*np.sin(0.5*tth_eta[:, 0]) + + # clip to detector panel + _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) + + if multipleEnergyRanges: + validEnergy = np.zeros(len(wlen), dtype=bool) + for i in range(len(lmin)): + in_energy_range = np.logical_and( + wlen >= lmin[i], + wlen <= lmax[i]) + validEnergy = validEnergy | in_energy_range + pass + else: + validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) + pass + + # index for valid reflections + keepers = np.where(np.logical_and(on_panel, validEnergy))[0] + + # assign output arrays + xy_det[iG][keepers, :] = dpts[keepers, :] + hkls_in[iG][:, keepers] = dhkl[:, keepers] + angles[iG][keepers, :] = tth_eta[keepers, :] + dspacing[iG, keepers] = dsp[keepers] + energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) + pass # close conditional on valids + pass # close loop on grains + return xy_det, hkls_in, angles, dspacing, energy + +# ============================================================================= +# UTILITIES +# ============================================================================= + + +class PatchDataWriter(object): + """ + """ + def __init__(self, filename): + self._delim = ' ' + header_items = ( + '# ID', 'PID', + 'H', 'K', 'L', + 'sum(int)', 'max(int)', + 'pred tth', 'pred eta', 'pred ome', + 'meas tth', 'meas eta', 'meas ome', + 'pred X', 'pred Y', + 'meas X', 'meas Y' + ) + self._header = self._delim.join([ + self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]), + self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), + self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) + ]) + if isinstance(filename, file): + self.fid = filename + else: + self.fid = open(filename, 'w') + print(self._header, file=self.fid) + + def __del__(self): + self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, peak_id, hkl_id, + hkl, spot_int, max_int, + pangs, mangs, pxy, mxy): + """ + !!! maybe need to check that last four inputs are arrays + """ + if mangs is None: + spot_int = np.nan + max_int = np.nan + mangs = np.ones(3)*np.nan + mxy = np.ones(2)*np.nan + + res = [int(peak_id), int(hkl_id)] \ + + np.array(hkl, dtype=int).tolist() \ + + [spot_int, max_int] \ + + pangs.tolist() \ + + mangs.tolist() \ + + pxy.tolist() \ + + mxy.tolist() + + output_str = self._delim.join( + [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + ) + print(output_str, file=self.fid) + return output_str + + +class GrainDataWriter(object): + """ + """ + def __init__(self, filename): + self._delim = ' ' + header_items = ( + '# grain ID', 'completeness', 'chi^2', + 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', + 't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]', + 'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]', + 'inv(V_s)[1,2]*sqrt(2)', + 'inv(V_s)[0,2]*sqrt(2)', + 'inv(V_s)[0,2]*sqrt(2)', + 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]', + 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]' + ) + self._header = self._delim.join( + [self._delim.join( + np.tile('{:<12}', 3) + ).format(*header_items[:3]), + self._delim.join( + np.tile('{:<23}', len(header_items) - 3) + ).format(*header_items[3:])] + ) + if isinstance(filename, file): + self.fid = filename + else: + self.fid = open(filename, 'w') + print(self._header, file=self.fid) + + def __del__(self): + self.close() + + def close(self): + self.fid.close() + + def dump_grain(self, grain_id, completeness, chisq, + grain_params): + assert len(grain_params) == 12, \ + "len(grain_params) must be 12, not %d" % len(grain_params) + + # extract strain + emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) + evec = mutil.symmToVecMV(emat, scale=False) + + res = [int(grain_id), completeness, chisq] \ + + grain_params.tolist() \ + + evec.tolist() + output_str = self._delim.join( + [self._delim.join( + ['{:<12d}', '{:<12f}', '{:<12e}'] + ).format(*res[:3]), + self._delim.join( + np.tile('{:<23.16e}', len(res) - 3) + ).format(*res[3:])] + ) + print(output_str, file=self.fid) + return output_str + + +class GrainDataWriter_h5(object): + """ + TODO: add material spec + """ + def __init__(self, filename, instr_cfg, grain_params, use_attr=False): + + if isinstance(filename, h5py.File): + self.fid = filename + else: + self.fid = h5py.File(filename + ".hdf5", "w") + icfg = dict(instr_cfg) + + # add instrument groups and attributes + self.instr_grp = self.fid.create_group('instrument') + unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr) + + # add grain group + self.grain_grp = self.fid.create_group('grain') + rmat_c = makeRotMatOfExpMap(grain_params[:3]) + tvec_c = np.array(grain_params[3:6]).flatten() + vinv_s = np.array(grain_params[6:]).flatten() + vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) + + if use_attr: # attribute version + self.grain_grp.attrs.create('rmat_c', rmat_c) + self.grain_grp.attrs.create('tvec_c', tvec_c) + self.grain_grp.attrs.create('inv(V)_s', vinv_s) + self.grain_grp.attrs.create('vmat_s', vmat_s) + else: # dataset version + self.grain_grp.create_dataset('rmat_c', data=rmat_c) + self.grain_grp.create_dataset('tvec_c', data=tvec_c) + self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) + self.grain_grp.create_dataset('vmat_s', data=vmat_s) + + data_key = 'reflection_data' + self.data_grp = self.fid.create_group(data_key) + + for det_key in self.instr_grp['detectors'].keys(): + self.data_grp.create_group(det_key) + + # FIXME: throws exception when called after close method + # def __del__(self): + # self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, panel_id, + i_refl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, ome_centers, + xy_centers, ijs, frame_indices, + spot_data, pangs, pxy, mangs, mxy, gzip=1): + """ + to be called inside loop over patches + + default GZIP level for data arrays is 1 + """ + fi = np.array(frame_indices, dtype=int) + + panel_grp = self.data_grp[panel_id] + spot_grp = panel_grp.create_group("spot_%05d" % i_refl) + spot_grp.attrs.create('peak_id', int(peak_id)) + spot_grp.attrs.create('hkl_id', int(hkl_id)) + spot_grp.attrs.create('hkl', np.array(hkl, dtype=int)) + spot_grp.attrs.create('predicted_angles', pangs) + spot_grp.attrs.create('predicted_xy', pxy) + if mangs is None: + mangs = np.nan*np.ones(3) + spot_grp.attrs.create('measured_angles', mangs) + if mxy is None: + mxy = np.nan*np.ones(3) + spot_grp.attrs.create('measured_xy', mxy) + + # get centers crds from edge arrays + # FIXME: export full coordinate arrays, or just center vectors??? + # + # ome_crd, eta_crd, tth_crd = np.meshgrid( + # ome_centers, + # centers_of_edge_vec(eta_edges), + # centers_of_edge_vec(tth_edges), + # indexing='ij') + # + # ome_dim, eta_dim, tth_dim = spot_data.shape + + # !!! for now just exporting center vectors for spot_data + tth_crd = centers_of_edge_vec(tth_edges) + eta_crd = centers_of_edge_vec(eta_edges) + + shuffle_data = True # reduces size by 20% + spot_grp.create_dataset('tth_crd', data=tth_crd, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('eta_crd', data=eta_crd, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('ome_crd', data=ome_centers, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('xy_centers', data=xy_centers, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('ij_centers', data=ijs, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('frame_indices', data=fi, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('intensities', data=spot_data, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + return + + +def unwrap_dict_to_h5(grp, d, asattr=True): + while len(d) > 0: + key, item = d.popitem() + if isinstance(item, dict): + subgrp = grp.create_group(key) + unwrap_dict_to_h5(subgrp, item) + else: + if asattr: + grp.attrs.create(key, item) + else: + grp.create_dataset(key, data=np.atleast_1d(item)) + + +class GenerateEtaOmeMaps(object): + """ + eta-ome map class derived from new image_series and YAML config + + ...for now... + + must provide: + + self.dataStore + self.planeData + self.iHKLList + self.etaEdges # IN RADIANS + self.omeEdges # IN RADIANS + self.etas # IN RADIANS + self.omegas # IN RADIANS + + """ + def __init__(self, image_series_dict, instrument, plane_data, + active_hkls=None, eta_step=0.25, threshold=None, + ome_period=(0, 360)): + """ + image_series must be OmegaImageSeries class + instrument_params must be a dict (loaded from yaml spec) + active_hkls must be a list (required for now) + """ + + self._planeData = plane_data + + # ???: change name of iHKLList? + # ???: can we change the behavior of iHKLList? + if active_hkls is None: + n_rings = len(plane_data.getTTh()) + self._iHKLList = range(n_rings) + else: + self._iHKLList = active_hkls + n_rings = len(active_hkls) + + # ???: need to pass a threshold? + eta_mapping, etas = instrument.extract_polar_maps( + plane_data, image_series_dict, + active_hkls=active_hkls, threshold=threshold, + tth_tol=None, eta_tol=eta_step) + + # grab a det key + # WARNING: this process assumes that the imageseries for all panels + # have the same length and omegas + det_key = eta_mapping.keys()[0] + data_store = [] + for i_ring in range(n_rings): + full_map = np.zeros_like(eta_mapping[det_key][i_ring]) + nan_mask_full = np.zeros( + (len(eta_mapping), full_map.shape[0], full_map.shape[1]) + ) + i_p = 0 + for det_key, eta_map in eta_mapping.items(): + nan_mask = ~np.isnan(eta_map[i_ring]) + nan_mask_full[i_p] = nan_mask + full_map[nan_mask] += eta_map[i_ring][nan_mask] + i_p += 1 + re_nan_these = np.sum(nan_mask_full, axis=0) == 0 + full_map[re_nan_these] = np.nan + data_store.append(full_map) + self._dataStore = data_store + + # handle omegas + omegas_array = image_series_dict[det_key].metadata['omega'] + self._omegas = mapAngle( + np.radians(np.average(omegas_array, axis=1)), + np.radians(ome_period) + ) + self._omeEdges = mapAngle( + np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), + np.radians(ome_period) + ) + + # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the + # indexer to work properly + if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf: + # !!! SIGNED delta ome + del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0]) + self._omeEdges[-1] = self._omeEdges[-2] + del_ome + + # handle etas + # WARNING: unlinke the omegas in imageseries metadata, + # these are in RADIANS and represent bin centers + self._etaEdges = etas + self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + + @property + def dataStore(self): + return self._dataStore + + @property + def planeData(self): + return self._planeData + + @property + def iHKLList(self): + return np.atleast_1d(self._iHKLList).flatten() + + @property + def etaEdges(self): + return self._etaEdges + + @property + def omeEdges(self): + return self._omeEdges + + @property + def etas(self): + return self._etas + + @property + def omegas(self): + return self._omegas + + def save(self, filename): + """ + self.dataStore + self.planeData + self.iHKLList + self.etaEdges + self.omeEdges + self.etas + self.omegas + """ + args = np.array(self.planeData.getParams())[:4] + args[2] = valWUnit('wavelength', 'length', args[2], 'angstrom') + hkls = self.planeData.hkls + save_dict = {'dataStore': self.dataStore, + 'etas': self.etas, + 'etaEdges': self.etaEdges, + 'iHKLList': self.iHKLList, + 'omegas': self.omegas, + 'omeEdges': self.omeEdges, + 'planeData_args': args, + 'planeData_hkls': hkls} + np.savez_compressed(filename, **save_dict) + return + pass # end of class: GenerateEtaOmeMaps diff --git a/hexrd/matrixutil.py b/hexrd/matrixutil.py index 0ac24396..c21d6d8f 100644 --- a/hexrd/matrixutil.py +++ b/hexrd/matrixutil.py @@ -1,24 +1,24 @@ # ============================================================ -# Copyright (c) 2007-2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. +# Copyright (c) 2007-2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. # All rights reserved. -# +# # This file is part of HEXRD. For details on dowloading the source, # see the file COPYING. -# +# # Please also see the file LICENSE. -# +# # This program is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License (as published by the Free Software # Foundation) version 2.1 dated February 1999. -# +# # This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, @@ -39,11 +39,17 @@ from scipy.linalg import svd import numpy as num +from hexrd import USE_NUMBA +if USE_NUMBA: + import numba + + # module variables sqr6i = 1./sqrt(6.) sqr3i = 1./sqrt(3.) sqr2i = 1./sqrt(2.) sqr2 = sqrt(2.) +sqr3 = sqrt(3.) sqr2b3 = sqrt(2./3.) fpTol = finfo(float).eps # ~2.2e-16 @@ -56,9 +62,9 @@ def columnNorm(a): """ if len(a.shape) > 2: raise RuntimeError, "incorrect shape: arg must be 1-d or 2-d, yours is %d" %(len(a.shape)) - + cnrma = sqrt(sum(asarray(a)**2, 0)) - + return cnrma def rowNorm(a): @@ -67,9 +73,9 @@ def rowNorm(a): """ if len(a.shape) > 2: raise RuntimeError, "incorrect shape: arg must be 1-d or 2-d, yours is %d" %(len(a.shape)) - + cnrma = sqrt(sum(asarray(a)**2, 1)) - + return cnrma def unitVector(a): @@ -77,19 +83,19 @@ def unitVector(a): normalize array of column vectors (hstacked, axis = 0) """ assert a.ndim in [1, 2], "incorrect arg shape; must be 1-d or 2-d, yours is %d-d" % (a.ndim) - + ztol = 1.0e-14 - + m = a.shape[0]; n = 1 - + nrm = tile(sqrt(sum(asarray(a)**2, 0)), (m, n)) - + # prevent divide by zero zchk = nrm <= ztol nrm[zchk] = 1.0 - + nrma = a/nrm - + return nrma def nullSpace(A, tol=vTol): @@ -97,14 +103,14 @@ def nullSpace(A, tol=vTol): computes the null space of the real matrix A """ assert A.ndim == 2, 'input must be 2-d; yours is %d-d' % (A.ndim) - + n, m = A.shape if n > m : return nullSpace(A.T, tol).T U, S, V = svd(A) - + S = hstack([S, zeros(m-n)]) null_mask = (S <= tol) @@ -118,103 +124,111 @@ def blockSparseOfMatArray(matArray): Constructs a block diagonal sparse matrix (csc format) from a (p, m, n) ndarray of p (m, n) arrays - + ...maybe optional args to pick format type? """ # if isinstance(args[0], str): # a = args[0] # if a == 'csc': ... - + if len(matArray.shape) != 3: raise RuntimeError, "input array is not the correct shape!" - + l = matArray.shape[0] m = matArray.shape[1] n = matArray.shape[2] - + mn = m*n; jmax = l*n; imax = l*m; ntot = l*m*n; - - rl = asarray(range(l), 'int') + + rl = asarray(range(l), 'int') rm = asarray(range(m), 'int') rjmax = asarray(range(jmax), 'int') - + sij = matArray.transpose(0, 2, 1).reshape(1, ntot).squeeze() j = reshape(tile(rjmax, (m, 1)).T, (1, ntot)) i = reshape(tile(rm, (1, jmax)), (1, ntot)) + reshape(tile(m*rl, (mn, 1)).T, (1, ntot)) - + ij = concatenate((i, j), 0) - + smat = sparse.csc_matrix((sij, ij), shape=(imax, jmax)) # syntax as of scipy-0.7.0 - + return smat -def symmToVecMV(A): +def symmToVecMV(A, scale=True): """ convert from symmetric matrix to Mandel-Voigt vector representation (JVB) - """ + """ + if scale: + fac = sqr2 + else: + fac = 1. mvvec = zeros(6, dtype='float64') mvvec[0] = A[0,0] mvvec[1] = A[1,1] mvvec[2] = A[2,2] - mvvec[3] = sqr2 * A[1,2] - mvvec[4] = sqr2 * A[0,2] - mvvec[5] = sqr2 * A[0,1] + mvvec[3] = fac * A[1,2] + mvvec[4] = fac * A[0,2] + mvvec[5] = fac * A[0,1] return mvvec -def vecMVToSymm(A): +def vecMVToSymm(A, scale=True): """ - convert from Mandel-Voigt vector to symmetric matrix - representation (JVB) - """ + convert from Mandel-Voigt vector to symmetric matrix + representation (JVB) + """ + if scale: + fac = sqr2 + else: + fac = 1. symm = zeros((3, 3), dtype='float64') symm[0, 0] = A[0] symm[1, 1] = A[1] symm[2, 2] = A[2] - symm[1, 2] = A[3] / sqr2 - symm[0, 2] = A[4] / sqr2 - symm[0, 1] = A[5] / sqr2 - symm[2, 1] = A[3] / sqr2 - symm[2, 0] = A[4] / sqr2 - symm[1, 0] = A[5] / sqr2 + symm[1, 2] = A[3] / fac + symm[0, 2] = A[4] / fac + symm[0, 1] = A[5] / fac + symm[2, 1] = A[3] / fac + symm[2, 0] = A[4] / fac + symm[1, 0] = A[5] / fac return symm def vecMVCOBMatrix(R): """ GenerateS array of 6 x 6 basis transformation matrices for the - Mandel-Voigt tensor representation in 3-D given by: - + Mandel-Voigt tensor representation in 3-D given by: + [A] = [[A_11, A_12, A_13], [A_12, A_22, A_23], [A_13, A_23, A_33]] - + {A} = [A_11, A_22, A_33, sqrt(2)*A_23, sqrt(2)*A_13, sqrt(2)*A_12] - + where the operation :math:`R*A*R.T` (in tensor notation) is obtained by the matrix-vector product [T]*{A}. - + USAGE - + T = vecMVCOBMatrix(R) - + INPUTS - + 1) R is (3, 3) an ndarray representing a change of basis matrix - + OUTPUTS - + 1) T is (6, 6), an ndarray of transformation matrices as described above - + NOTES - + 1) Compoments of symmetric 4th-rank tensors transform in a manner analogous to symmetric 2nd-rank tensors in full - matrix notation. + matrix notation. SEE ALSO @@ -230,10 +244,10 @@ def vecMVCOBMatrix(R): raise RuntimeError, \ "R array must be (3, 3) or (n, 3, 3); input has dimension %d" \ % (rdim) - + T = zeros((nrot, 6, 6), dtype='float64') - - T[:, 0, 0] = R[:, 0, 0]**2 + + T[:, 0, 0] = R[:, 0, 0]**2 T[:, 0, 1] = R[:, 0, 1]**2 T[:, 0, 2] = R[:, 0, 2]**2 T[:, 0, 3] = sqr2 * R[:, 0, 1] * R[:, 0, 2] @@ -269,7 +283,7 @@ def vecMVCOBMatrix(R): T[:, 5, 3] = R[:, 0, 2] * R[:, 1, 1] + R[:, 0, 1] * R[:, 1, 2] T[:, 5, 4] = R[:, 0, 0] * R[:, 1, 2] + R[:, 0, 2] * R[:, 1, 0] T[:, 5, 5] = R[:, 0, 1] * R[:, 1, 0] + R[:, 0, 0] * R[:, 1, 1] - + if nrot == 1: T = T.squeeze() @@ -283,68 +297,68 @@ def nrmlProjOfVecMV(vec): Nvec = normalProjectionOfMV(vec) *) the input vector array need not be normalized; it is performed in place - + """ # normalize in place... col vectors! n = unitVector(vec) - - nmat = array([n[0, :]**2, - n[1, :]**2, - n[2, :]**2, - sqr2 * n[1, :] * n[2, :], - sqr2 * n[0, :] * n[2, :], - sqr2 * n[0, :] * n[1, :]], + + nmat = array([n[0, :]**2, + n[1, :]**2, + n[2, :]**2, + sqr2 * n[1, :] * n[2, :], + sqr2 * n[0, :] * n[2, :], + sqr2 * n[0, :] * n[1, :]], dtype='float64') - + return nmat.T def rankOneMatrix(vec1, *args): """ Create rank one matrices (dyadics) from vectors. - + r1mat = rankOneMatrix(vec1) r1mat = rankOneMatrix(vec1, vec2) - + vec1 is m1 x n, an array of n hstacked m1 vectors vec2 is m2 x n, (optional) another array of n hstacked m2 vectors - + r1mat is n x m1 x m2, an array of n rank one matrices formed as c1*c2' from columns c1 and c2 - + With one argument, the second vector is taken to the same as the first. - + Notes: - - *) This routine loops on the dimension m, assuming this + + *) This routine loops on the dimension m, assuming this is much smaller than the number of points, n. """ if len(vec1.shape) > 2: raise RuntimeError, "input vec1 is the wrong shape" - + if (len(args) == 0): vec2 = vec1.copy() else: vec2 = args[0] if len(vec1.shape) > 2: raise RuntimeError, "input vec2 is the wrong shape" - + m1, n1 = asmatrix(vec1).shape m2, n2 = asmatrix(vec2).shape - + if (n1 != n2): raise RuntimeError, "Number of vectors differ in arguments." - + m1m2 = m1 * m2 - + r1mat = zeros((m1m2, n1), dtype='float64') - + mrange = asarray(range(m1), dtype='int') - + for i in range(m2): r1mat[mrange, :] = vec1 * tile(vec2[i, :], (m1, 1)) mrange = mrange + m1 - + r1mat = reshape(r1mat.T, (n1, m2, m1)).transpose(0, 2, 1) return squeeze(r1mat) @@ -362,7 +376,7 @@ def skew(A): if m != n: raise RuntimeError, "this function only works for square arrays; " \ + "yours is (%d, %d)" %(m, n) - A.resize(1, m, n) + A.resize(1, m, n) elif A.ndim == 3: m = A.shape[1] n = A.shape[2] @@ -370,13 +384,13 @@ def skew(A): raise RuntimeError, "this function only works for square arrays" else: raise RuntimeError, "this function only works for square arrays" - + return squeeze(0.5*(A - A.transpose(0, 2, 1))) - + def symm(A): """ symmetric decomposition of n square (m, m) ndarrays. Result - is a (squeezed) (n, m, m) ndarray. + is a (squeezed) (n, m, m) ndarray. """ if not isinstance(A, ndarray): raise RuntimeError, "input argument is of incorrect type; should be numpy ndarray." @@ -387,7 +401,7 @@ def symm(A): if m != n: raise RuntimeError, "this function only works for square arrays; " \ + "yours is (%d, %d)" %(m, n) - A.resize(1, m, n) + A.resize(1, m, n) elif A.ndim == 3: m = A.shape[1] n = A.shape[2] @@ -395,7 +409,7 @@ def symm(A): raise RuntimeError, "this function only works for square arrays" else: raise RuntimeError, "this function only works for square arrays" - + return squeeze(0.5*(A + A.transpose(0, 2, 1))) def skewMatrixOfVector(w): @@ -423,7 +437,7 @@ def skewMatrixOfVector(w): stackdim = w.shape[1] else: raise RuntimeError, 'input is incorrect shape; expecting ndim = 1 or 2' - + zs = zeros((1, stackdim), dtype='float64') W = vstack([ zs, -w[2, :], @@ -434,9 +448,9 @@ def skewMatrixOfVector(w): -w[1, :], w[0, :], zs ]) - + return squeeze(reshape(W.T, (stackdim, 3, 3))) - + def vectorOfSkewMatrix(W): """ vectorOfSkewMatrix(W) @@ -459,11 +473,11 @@ def vectorOfSkewMatrix(W): stackdim = W.shape[0] else: raise RuntimeError, 'input is incorrect shape; expecting (n, 3, 3)' - + w = zeros((3, stackdim), dtype='float64') for i in range(stackdim): w[:, i] = r_[-W[i, 1, 2], W[i, 0, 2], -W[i, 0, 1]] - + return w def multMatArray(ma1, ma2): @@ -472,8 +486,8 @@ def multMatArray(ma1, ma2): """ shp1 = ma1.shape shp2 = ma2.shape - - + + if len(shp1) != 3 or len(shp2) != 3: raise RuntimeError, 'input is incorrect shape; ' \ + 'expecting len(ma1).shape = len(ma2).shape = 3' @@ -483,7 +497,7 @@ def multMatArray(ma1, ma2): if shp1[2] != shp2[1]: raise RuntimeError, 'mismatch on internal matrix dimensions' - + prod = zeros((shp1[0], shp1[1], shp2[2])) for j in range(shp1[0]): prod[j, :, :] = dot( ma1[j, :, :], ma2[j, :, :] ) @@ -495,17 +509,17 @@ def uniqueVectors(v, tol=1.0e-12): Sort vectors and discard duplicates. USAGE: - + uvec = uniqueVectors(vec, tol=1.0e-12) - v -- + v -- tol -- (optional) comparison tolerance D. E. Boyce 2010-03-18 """ - + vdims = v.shape - + iv = zeros(vdims) iv2 = zeros(vdims, dtype="bool") bsum = zeros((vdims[1], ), dtype="bool") @@ -513,18 +527,18 @@ def uniqueVectors(v, tol=1.0e-12): tmpord = num.argsort(v[row, :]).tolist() tmpsrt = v[ix_([row], tmpord)].squeeze() tmpcmp = abs(tmpsrt[1:] - tmpsrt[0:-1]) - indep = num.hstack([True, tmpcmp > tol]) # independent values + indep = num.hstack([True, tmpcmp > tol]) # independent values rowint = indep.cumsum() iv[ix_([row], tmpord)] = rowint pass - + # # Dictionary sort from bottom up # iNum = num.lexsort(iv) ivSrt = iv[:, iNum] vSrt = v[:, iNum] - + ivInd = zeros(vdims[1], dtype='int') nUniq = 1; ivInd[0] = 0 for col in range(1, vdims[1]): @@ -533,61 +547,61 @@ def uniqueVectors(v, tol=1.0e-12): nUniq += 1 pass pass - + return vSrt[:, ivInd[0:nUniq]] def findDuplicateVectors(vec, tol=vTol, equivPM=False): """ Find vectors in an array that are equivalent to within a specified tolerance - + USAGE: - + eqv = DuplicateVectors(vec, *tol) - + INPUT: - + 1) vec is n x m, a double array of m horizontally concatenated n-dimensional vectors. *2) tol is 1 x 1, a scalar tolerance. If not specified, the default tolerance is 1e-14. *3) set equivPM to True if vec and -vec are to be treated as equivalent - + OUTPUT: - + 1) eqv is 1 x p, a list of p equivalence relationships. - + NOTES: - + Each equivalence relationship is a 1 x q vector of indices that represent the locations of duplicate columns/entries in the array vec. For example: - + | 1 2 2 2 1 2 7 | vec = | | | 2 3 5 3 2 3 3 | - + eqv = [[1x2 double] [1x3 double]], where - + eqv[0] = [0 4] eqv[1] = [1 3 5] """ - + vlen = vec.shape[1] vlen0 = vlen orid = asarray(range(vlen), dtype="int") torid = orid.copy() tvec = vec.copy() - + eqv = [] eqvTot = 0 uid = 0 - + ii = 1 while vlen > 1 and ii < vlen0: dupl = tile(tvec[:, 0], (vlen, 1)) - + if not equivPM: diff = abs(tvec - dupl.T).sum(0) match = abs(diff[1:]) <= tol # logical to find duplicates @@ -597,25 +611,25 @@ def findDuplicateVectors(vec, tol=vTol, equivPM=False): diffp = abs(tvec + dupl.T).sum(0) matchp = abs(diffp[1:]) <= tol match = matchn + matchp - + kick = hstack([True, match]) # pick self too - + if kick.sum() > 1: eqv += [torid[kick].tolist()] eqvTot = hstack( [ eqvTot, torid[kick] ] ) uid = hstack( [ uid, torid[kick][0] ] ) - + cmask = ones((vlen,)) cmask[kick] = 0 - cmask = cmask != 0 - + cmask = cmask != 0 + tvec = tvec[:, cmask] torid = torid[cmask] - + vlen = tvec.shape[1] - ii += 1 + ii += 1 if len(eqv) == 0: eqvTot = [] @@ -623,7 +637,7 @@ def findDuplicateVectors(vec, tol=vTol, equivPM=False): else: eqvTot = eqvTot[1:].tolist() uid = uid[1:].tolist() - + # find all single-instance vectors singles = sort( setxor1d( eqvTot, range(vlen0) ) ) @@ -632,7 +646,7 @@ def findDuplicateVectors(vec, tol=vTol, equivPM=False): # make sure is a 1D list if not hasattr(uid,'__len__'): uid = [uid] - + return eqv, uid def normvec(v): @@ -663,3 +677,164 @@ def determinant3(mat): det = sum(mat[2,:] * v[:]) return det +def strainTenToVec(strainTen): + + strainVec = num.zeros(6, dtype='float64') + strainVec[0] = strainTen[0, 0] + strainVec[1] = strainTen[1, 1] + strainVec[2] = strainTen[2, 2] + strainVec[3] = 2*strainTen[1, 2] + strainVec[4] = 2*strainTen[0, 2] + strainVec[5] = 2*strainTen[0, 1] + + strainVec=num.atleast_2d(strainVec).T + + return strainVec + +def strainVecToTen(strainVec): + + strainTen = num.zeros((3, 3), dtype='float64') + strainTen[0, 0] = strainVec[0] + strainTen[1, 1] = strainVec[1] + strainTen[2, 2] = strainVec[2] + strainTen[1, 2] = strainVec[3] / 2 + strainTen[0, 2] = strainVec[4] / 2 + strainTen[0, 1] = strainVec[5] / 2 + strainTen[2, 1] = strainVec[3] / 2 + strainTen[2, 0] = strainVec[4] / 2 + strainTen[1, 0] = strainVec[5] / 2 + + return strainTen + + +def stressTenToVec(stressTen): + + stressVec = num.zeros(6, dtype='float64') + stressVec[0] = stressTen[0, 0] + stressVec[1] = stressTen[1, 1] + stressVec[2] = stressTen[2, 2] + stressVec[3] = stressTen[1, 2] + stressVec[4] = stressTen[0, 2] + stressVec[5] = stressTen[0, 1] + + stressVec=num.atleast_2d(stressVec).T + + return stressVec + + +def stressVecToTen(stressVec): + + stressTen = num.zeros((3, 3), dtype='float64') + stressTen[0, 0] = stressVec[0] + stressTen[1, 1] = stressVec[1] + stressTen[2, 2] = stressVec[2] + stressTen[1, 2] = stressVec[3] + stressTen[0, 2] = stressVec[4] + stressTen[0, 1] = stressVec[5] + stressTen[2, 1] = stressVec[3] + stressTen[2, 0] = stressVec[4] + stressTen[1, 0] = stressVec[5] + + return stressTen + + + + +def ale3dStrainOutToV(vecds): + #takes 5 components of evecd and the 6th component is lndetv + + + """convert from vecds representation to symmetry matrix""" + eps = num.zeros([3,3],dtype='float64') + #Akk_by_3 = sqr3i * vecds[5] # -p + a = num.exp(vecds[5])**(1./3.)# -p + t1 = sqr2i*vecds[0] + t2 = sqr6i*vecds[1] + + eps[0, 0] = t1 - t2 + eps[1, 1] = -t1 - t2 + eps[2, 2] = sqr2b3*vecds[1] + eps[1, 0] = vecds[2] * sqr2i + eps[2, 0] = vecds[3] * sqr2i + eps[2, 1] = vecds[4] * sqr2i + + eps[0, 1] = eps[1, 0] + eps[0, 2] = eps[2, 0] + eps[1, 2] = eps[2, 1] + + epstar=eps/a + + V=(num.identity(3)+epstar)*a + Vinv=(num.identity(3)-epstar)/a + + return V,Vinv + +def vecdsToSymm(vecds): + """convert from vecds representation to symmetry matrix""" + A = num.zeros([3,3],dtype='float64') + Akk_by_3 = sqr3i * vecds[5] # -p + t1 = sqr2i*vecds[0] + t2 = sqr6i*vecds[1] + + A[0, 0] = t1 - t2 + Akk_by_3 + A[1, 1] = -t1 - t2 + Akk_by_3 + A[2, 2] = sqr2b3*vecds[1] + Akk_by_3 + A[1, 0] = vecds[2] * sqr2i + A[2, 0] = vecds[3] * sqr2i + A[2, 1] = vecds[4] * sqr2i + + A[0, 1] = A[1, 0] + A[0, 2] = A[2, 0] + A[1, 2] = A[2, 1] + return A + +def traceToVecdsS(Akk): + return sqr3i * Akk + +def vecdsSToTrace(vecdsS): + return vecdsS * sqr3 + +def trace3(A): + return A[0,0]+A[1,1]+A[2,2] + +def symmToVecds(A): + """convert from symmetry matrix to vecds representation""" + vecds = num.zeros(6,dtype='float64') + vecds[0] = sqr2i * (A[0,0] - A[1,1]) + vecds[1] = sqr6i * (2. * A[2,2] - A[0,0] - A[1,1]) + vecds[2] = sqr2 * A[1,0] + vecds[3] = sqr2 * A[2,0] + vecds[4] = sqr2 * A[2,1] + vecds[5] = traceToVecdsS(trace3(A)) + return vecds + + + +# ============================================================================= +# Numba-fied frame cache writer +# ============================================================================= + + +if USE_NUMBA: + @numba.njit + def extract_ijv(in_array, threshold, out_i, out_j, out_v): + n = 0 + w, h = in_array.shape + for i in range(w): + for j in range(h): + v = in_array[i, j] + if v > threshold: + out_i[n] = i + out_j[n] = j + out_v[n] = v + n += 1 + return n +else: # not USE_NUMBA + def extract_ijv(in_array, threshold, out_i, out_j, out_v): + mask = in_array > threshold + n = sum(mask) + tmp_i, tmp_j = mask.nonzero() + out_i[:n] = tmp_i + out_j[:n] = tmp_j + out_v[:n] = in_array[mask] + return n diff --git a/hexrd/transforms/debug_helpers.h b/hexrd/transforms/debug_helpers.h new file mode 100644 index 00000000..aacce7d5 --- /dev/null +++ b/hexrd/transforms/debug_helpers.h @@ -0,0 +1,61 @@ +#include + + + +static void debug_dump_val(const char *name, double val) +{ + printf("%s: %10.6f\n", name, val); +} + +static void debug_dump_m33(const char *name, double *array) +{ + printf("%s:\n", name); + printf("\t%10.6f %10.6f %10.6f\n", array[0], array[1], array[2]); + printf("\t%10.6f %10.6f %10.6f\n", array[3], array[4], array[5]); + printf("\t%10.6f %10.6f %10.6f\n", array[6], array[7], array[8]); +} + +static void debug_dump_v3(const char *name, double *vec) +{ + printf("%s: %10.6f %10.6f %10.6f\n", name, vec[0], vec[1], vec[2]); +} + +/* ============================================================================ + * These can be used to initialize and check buffers if we suspect the + * code may leave it uninitialized + * ============================================================================ + */ + +#define SNAN_HI 0x7ff700a0 +#define SNAN_LO 0xbad0feed +void fill_signaling_nans(double *arr, int count) +{ + int i; + npy_uint32 *arr_32 = (npy_uint32 *)arr; + /* Fills an array with signaling nans to detect errors + * Use the 0x7ff700a0bad0feed as the pattern + */ + for (i = 0; i < count; ++i) + { + arr_32[2*i+0] = SNAN_LO; + arr_32[2*i+1] = SNAN_HI; + } +} + +int detect_signaling_nans(double *arr, int count) +{ + int i; + npy_uint32 *arr_32 = (npy_uint32 *) arr; + for (i = 0; i < count; ++i) + { + if (arr_32[2*i+0] == SNAN_LO && + arr_32[2*i+1] == SNAN_HI) + { + return 1; + } + } + + return 0; +} +#undef SNAN_HI +#undef SNAN_LO diff --git a/hexrd/transforms/transforms_CAPI.c b/hexrd/transforms/transforms_CAPI.c index d3ce2934..dc07b65e 100644 --- a/hexrd/transforms/transforms_CAPI.c +++ b/hexrd/transforms/transforms_CAPI.c @@ -14,6 +14,7 @@ static PyMethodDef _transform_methods[] = { {"gvecToDetectorXY",gvecToDetectorXY,METH_VARARGS,""}, {"gvecToDetectorXYArray",gvecToDetectorXYArray,METH_VARARGS,""}, {"detectorXYToGvec",detectorXYToGvec,METH_VARARGS,"take cartesian coordinates to G-vectors"}, + {"detectorXYToGvecArray",detectorXYToGvecArray,METH_VARARGS,"take cartesian coordinates to G-vectors"}, {"oscillAnglesOfHKLs",oscillAnglesOfHKLs,METH_VARARGS,"solve angle specs for G-vectors"}, {"arccosSafe",arccosSafe,METH_VARARGS,""}, {"angularDifference",angularDifference,METH_VARARGS,"difference for cyclical angles"}, @@ -38,7 +39,7 @@ static PyMethodDef _transform_methods[] = { void init_transforms_CAPI(void) { - (void)Py_InitModule("_transforms_CAPI",_transform_methods); + (void)Py_InitModule("_transforms_CAPI", _transform_methods); import_array(); } @@ -197,18 +198,18 @@ static PyObject * makeGVector(PyObject * self, PyObject * args) static PyObject * gvecToDetectorXY(PyObject * self, PyObject * args) { PyArrayObject *gVec_c, - *rMat_d, *rMat_s, *rMat_c, - *tVec_d, *tVec_s, *tVec_c, - *beamVec; + *rMat_d, *rMat_s, *rMat_c, + *tVec_d, *tVec_s, *tVec_c, + *beamVec; PyArrayObject *result; int dgc, drd, drs, drc, dtd, dts, dtc, dbv; npy_intp npts, dims[2]; double *gVec_c_Ptr, - *rMat_d_Ptr, *rMat_s_Ptr, *rMat_c_Ptr, - *tVec_d_Ptr, *tVec_s_Ptr, *tVec_c_Ptr, - *beamVec_Ptr; + *rMat_d_Ptr, *rMat_s_Ptr, *rMat_c_Ptr, + *tVec_d_Ptr, *tVec_s_Ptr, *tVec_c_Ptr, + *beamVec_Ptr; double *result_Ptr; /* Parse arguments */ @@ -301,18 +302,18 @@ static PyObject * gvecToDetectorXY(PyObject * self, PyObject * args) static PyObject * gvecToDetectorXYArray(PyObject * self, PyObject * args) { PyArrayObject *gVec_c, - *rMat_d, *rMat_s, *rMat_c, - *tVec_d, *tVec_s, *tVec_c, - *beamVec; + *rMat_d, *rMat_s, *rMat_c, + *tVec_d, *tVec_s, *tVec_c, + *beamVec; PyArrayObject *result; int dgc, drd, drs, drc, dtd, dts, dtc, dbv; npy_intp npts, dims[2]; double *gVec_c_Ptr, - *rMat_d_Ptr, *rMat_s_Ptr, *rMat_c_Ptr, - *tVec_d_Ptr, *tVec_s_Ptr, *tVec_c_Ptr, - *beamVec_Ptr; + *rMat_d_Ptr, *rMat_s_Ptr, *rMat_c_Ptr, + *tVec_d_Ptr, *tVec_s_Ptr, *tVec_c_Ptr, + *beamVec_Ptr; double *result_Ptr; /* Parse arguments */ @@ -345,7 +346,7 @@ static PyObject * gvecToDetectorXYArray(PyObject * self, PyObject * args) if (npts != PyArray_DIM(rMat_s, 0)) { PyErr_Format(PyExc_ValueError, "gVec_c and rMat_s length mismatch %d vs %d", - (int)PyArray_DIM(gVec_c, 0), (int)PyArray_DIM(rMat_s, 0)); + (int)PyArray_DIM(gVec_c, 0), (int)PyArray_DIM(rMat_s, 0)); return NULL; } assert( PyArray_DIMS(gVec_c)[1] == 3 ); @@ -412,8 +413,8 @@ static PyObject * gvecToDetectorXYArray(PyObject * self, PyObject * args) static PyObject * detectorXYToGvec(PyObject * self, PyObject * args) { PyArrayObject *xy_det, *rMat_d, *rMat_s, - *tVec_d, *tVec_s, *tVec_c, - *beamVec, *etaVec; + *tVec_d, *tVec_s, *tVec_c, + *beamVec, *etaVec; PyArrayObject *tTh, *eta, *gVec_l; PyObject *inner_tuple, *outer_tuple; @@ -421,8 +422,8 @@ static PyObject * detectorXYToGvec(PyObject * self, PyObject * args) npy_intp npts, dims[2]; double *xy_Ptr, *rMat_d_Ptr, *rMat_s_Ptr, - *tVec_d_Ptr, *tVec_s_Ptr, *tVec_c_Ptr, - *beamVec_Ptr, *etaVec_Ptr; + *tVec_d_Ptr, *tVec_s_Ptr, *tVec_c_Ptr, + *beamVec_Ptr, *etaVec_Ptr; double *tTh_Ptr, *eta_Ptr, *gVec_l_Ptr; /* Parse arguments */ @@ -502,6 +503,126 @@ static PyObject * detectorXYToGvec(PyObject * self, PyObject * args) return outer_tuple; } +/* + Takes a list cartesian (x, y) pairs in the detector coordinates and calculates + the associated reciprocal lattice (G) vectors and (bragg angle, azimuth) pairs + with respect to the specified beam and azimth (eta) reference directions + + Required Arguments: + xy_det -- (n, 2) ndarray or list-like input of n detector (x, y) points + rMat_d -- (3, 3) ndarray, the COB taking DETECTOR FRAME components to LAB FRAME + rMat_s -- (n, 3, 3) ndarray, the COB taking SAMPLE FRAME components to LAB FRAME + tVec_d -- (3, 1) ndarray, the translation vector connecting LAB to DETECTOR + tVec_s -- (3, 1) ndarray, the translation vector connecting LAB to SAMPLE + tVec_c -- (3, 1) ndarray, the translation vector connecting SAMPLE to CRYSTAL + + Optional Keyword Arguments: + beamVec -- (1, 3) mdarray containing the incident beam direction components in the LAB FRAME + etaVec -- (1, 3) mdarray containing the reference azimuth direction components in the LAB FRAME + + Outputs: + (n, 2) ndarray containing the (tTh, eta) pairs associated with each (x, y) + (n, 3) ndarray containing the associated G vector directions in the LAB FRAME + associated with gVecs +*/ +static PyObject * detectorXYToGvecArray(PyObject * self, PyObject * args) +{ + PyArrayObject *xy_det, *rMat_d, *rMat_s, + *tVec_d, *tVec_s, *tVec_c, + *beamVec, *etaVec; + PyArrayObject *tTh, *eta, *gVec_l; + PyObject *inner_tuple, *outer_tuple; + + int dxy, drd, drs, dtd, dts, dtc, dbv, dev; + npy_intp npts, dims[2]; + + double *xy_Ptr, *rMat_d_Ptr, *rMat_s_Ptr, + *tVec_d_Ptr, *tVec_s_Ptr, *tVec_c_Ptr, + *beamVec_Ptr, *etaVec_Ptr; + double *tTh_Ptr, *eta_Ptr, *gVec_l_Ptr; + + /* Parse arguments */ + if ( !PyArg_ParseTuple(args,"OOOOOOOO", + &xy_det, + &rMat_d, &rMat_s, + &tVec_d, &tVec_s, &tVec_c, + &beamVec, &etaVec)) return(NULL); + if ( xy_det == NULL || rMat_d == NULL || rMat_s == NULL || + tVec_d == NULL || tVec_s == NULL || tVec_c == NULL || + beamVec == NULL || etaVec == NULL ) return(NULL); + + /* Verify shape of input arrays */ + dxy = PyArray_NDIM(xy_det); + drd = PyArray_NDIM(rMat_d); + drs = PyArray_NDIM(rMat_s); + dtd = PyArray_NDIM(tVec_d); + dts = PyArray_NDIM(tVec_s); + dtc = PyArray_NDIM(tVec_c); + dbv = PyArray_NDIM(beamVec); + dev = PyArray_NDIM(etaVec); + assert( dxy == 2 && drd == 2 && drs == 2 && + dtd == 1 && dts == 1 && dtc == 1 && + dbv == 1 && dev == 1); + + /* Verify dimensions of input arrays */ + npts = PyArray_DIMS(xy_det)[0]; + if (npts != PyArray_DIM(rMat_s, 0)) { + PyErr_Format(PyExc_ValueError, "xy_det and rMat_s length mismatch %d vs %d", + (int)PyArray_DIM(xy_det, 0), (int)PyArray_DIM(rMat_s, 0)); + return NULL; + } + + assert( PyArray_DIMS(xy_det)[1] == 2 ); + assert( PyArray_DIMS(rMat_d)[0] == 3 && PyArray_DIMS(rMat_d)[1] == 3 ); + assert( PyArray_DIMS(rMat_s)[0] == 3 && PyArray_DIMS(rMat_s)[1] == 3 ); + assert( PyArray_DIMS(tVec_d)[0] == 3 ); + assert( PyArray_DIMS(tVec_s)[0] == 3 ); + assert( PyArray_DIMS(tVec_c)[0] == 3 ); + assert( PyArray_DIMS(beamVec)[0] == 3 ); + assert( PyArray_DIMS(etaVec)[0] == 3 ); + + /* Allocate arrays for return values */ + dims[0] = npts; dims[1] = 3; + gVec_l = (PyArrayObject*)PyArray_EMPTY(2,dims,NPY_DOUBLE,0); + + tTh = (PyArrayObject*)PyArray_EMPTY(1,&npts,NPY_DOUBLE,0); + eta = (PyArrayObject*)PyArray_EMPTY(1,&npts,NPY_DOUBLE,0); + + /* Grab data pointers into various arrays */ + xy_Ptr = (double*)PyArray_DATA(xy_det); + gVec_l_Ptr = (double*)PyArray_DATA(gVec_l); + + tTh_Ptr = (double*)PyArray_DATA(tTh); + eta_Ptr = (double*)PyArray_DATA(eta); + + rMat_d_Ptr = (double*)PyArray_DATA(rMat_d); + rMat_s_Ptr = (double*)PyArray_DATA(rMat_s); + + tVec_d_Ptr = (double*)PyArray_DATA(tVec_d); + tVec_s_Ptr = (double*)PyArray_DATA(tVec_s); + tVec_c_Ptr = (double*)PyArray_DATA(tVec_c); + + beamVec_Ptr = (double*)PyArray_DATA(beamVec); + etaVec_Ptr = (double*)PyArray_DATA(etaVec); + + /* Call the computational routine */ + detectorXYToGvecArray_cfunc(npts, xy_Ptr, + rMat_d_Ptr, rMat_s_Ptr, + tVec_d_Ptr, tVec_s_Ptr, tVec_c_Ptr, + beamVec_Ptr, etaVec_Ptr, + tTh_Ptr, eta_Ptr, gVec_l_Ptr); + + /* Build and return the nested data structure */ + /* Note that Py_BuildValue with 'O' increases reference count */ + inner_tuple = Py_BuildValue("OO",tTh,eta); + outer_tuple = Py_BuildValue("OO", inner_tuple, gVec_l); + Py_DECREF(inner_tuple); + Py_DECREF(tTh); + Py_DECREF(eta); + Py_DECREF(gVec_l); + return outer_tuple; +} + static PyObject * oscillAnglesOfHKLs(PyObject * self, PyObject * args) { PyArrayObject *hkls, *rMat_c, *bMat, diff --git a/hexrd/transforms/transforms_CAPI.h b/hexrd/transforms/transforms_CAPI.h index fa9a53ca..b72d3448 100644 --- a/hexrd/transforms/transforms_CAPI.h +++ b/hexrd/transforms/transforms_CAPI.h @@ -35,6 +35,8 @@ static PyObject * gvecToDetectorXYArray(PyObject * self, PyObject * args); static PyObject * detectorXYToGvec(PyObject * self, PyObject * args); +static PyObject * detectorXYToGvecArray(PyObject * self, PyObject * args); + static PyObject * oscillAnglesOfHKLs(PyObject * self, PyObject * args); /******************************************************************************/ diff --git a/hexrd/transforms/transforms_CFUNC.c b/hexrd/transforms/transforms_CFUNC.c index e6c75a18..f9b233c3 100644 --- a/hexrd/transforms/transforms_CFUNC.c +++ b/hexrd/transforms/transforms_CFUNC.c @@ -6,195 +6,687 @@ #include "transforms_CFUNC.h" +/* + * Microsoft's C compiler, when running in C mode, does not support the inline + * keyword. However it does support an __inline one. + * + * So if compiling with MSC, just use __inline as inline + */ +#if defined(_MSC_VER) +# define inline __inline +#endif + +/* + * For now, disable C99 codepaths + */ +#define USE_C99_CODE 0 +#if ! defined(USE_C99_CODE) +# if defined(__STDC__) +# if (__STD_VERSION__ >= 199901L) +# define USE_C99_CODE 1 +# else +# define USE_C99_CODE 0 +# endif +# endif +#endif + +#if ! USE_C99_CODE +/* + * Just remove any "restrict" keyword that may be present. + */ +#define restrict +#endif + static double epsf = 2.2e-16; static double sqrt_epsf = 1.5e-8; - static double Zl[3] = {0.0,0.0,1.0}; + /******************************************************************************/ /* Functions */ +#if USE_C99_CODE +static inline +double * +v3_v3s_inplace_add(double *dst_src1, + const double *src2, int stride) +{ + dst_src1[0] += src2[0]; + dst_src1[1] += src2[1*stride]; + dst_src1[2] += src2[2*stride]; + return dst_src1; +} -void anglesToGvec_cfunc(long int nvecs, double * angs, - double * bHat_l, double * eHat_l, - double chi, double * rMat_c, - double * gVec_c) +static inline +double * +v3_v3s_add(const double *src1, + const double *src2, int stride, + double * restrict dst) { - /* - * takes an angle spec (2*theta, eta, omega) for nvecs g-vectors and - * returns the unit g-vector components in the crystal frame - * - * For unit g-vector in the lab frame, spec rMat_c = Identity and - * overwrite the omega values with zeros - */ - int i, j, k, l; - double rMat_e[9], rMat_s[9], rMat_ctst[9]; - double gVec_e[3], gVec_l[3], gVec_c_tmp[3]; + dst[0] = src1[0] + src2[0]; + dst[1] = src1[1] + src2[1*stride]; + dst[2] = src1[2] + src2[2*stride]; - /* Need eta frame cob matrix (could omit for standard setting) */ - makeEtaFrameRotMat_cfunc(bHat_l, eHat_l, rMat_e); + return dst; +} - /* make vector array */ - for (i=0; i epsf) { + double normalize_factor = 1./sqrt(sqr_norm); + v[0] *= normalize_factor; + v[1] *= normalize_factor; + v[2] *= normalize_factor; } - /* need pointwise rMat_s according to omega */ - makeOscillRotMat_cfunc(chi, angs[3*i+2], rMat_s); + return v; +} - /* Compute dot(rMat_c.T, rMat_s.T) and hit against gVec_l */ - for (j=0; j<3; j++) { - for (k=0; k<3; k++) { - rMat_ctst[3*j+k] = 0.0; - for (l=0; l<3; l++) { - rMat_ctst[3*j+k] += rMat_c[3*l+j]*rMat_s[3*k+l]; - } - } - gVec_c_tmp[j] = 0.0; - for (k=0; k<3; k++) { - gVec_c_tmp[j] += rMat_ctst[3*j+k]*gVec_l[k]; - } - gVec_c[3*i+j] = gVec_c_tmp[j]; +static inline +double * +v3_normalize(const double *in, + double * restrict out) +{ + double in0 = in[0], in1 = in[1], in2 = in[2]; + double sqr_norm = in0*in0 + in1*in1 + in2*in2; + + if (sqr_norm > epsf) { + double normalize_factor = 1./sqrt(sqr_norm); + out[0] = in0 * normalize_factor; + out[1] = in1 * normalize_factor; + out[2] = in2 * normalize_factor; + } else { + out[0] = in0; + out[1] = in1; + out[2] = in2; } - } + + return out; +} + +static inline +double * +m33_inplace_transpose(double * restrict m) +{ + double e1 = m[1]; + double e2 = m[2]; + double e5 = m[5]; + m[1] = m[3]; + m[2] = m[6]; + m[5] = m[7]; + m[3] = e1; + m[6] = e2; + m[7] = e5; + + return m; +} + +static inline +double * +m33_transpose(const double *m, + double * restrict dst) +{ + dst[0] = m[0]; dst[1] = m[3]; dst[2] = m[6]; + dst[3] = m[1]; dst[4] = m[4]; dst[5] = m[7]; + dst[7] = m[2]; dst[8] = m[5]; dst[9] = m[9]; + + return dst; +} + +static inline +double +v3_v3s_dot(const double *v1, + const double *v2, int stride) +{ + return v1[0]*v2[0] + v1[1]*v2[stride] + v1[2]*v2[2*stride]; +} + + +/* 3x3 matrix by strided 3 vector product ------------------------------------- + hopefully a constant stride will be optimized + */ +static inline +double * +m33_v3s_multiply(const double *m, + const double *v, int stride, + double * restrict dst) +{ + dst[0] = m[0]*v[0] + m[1]*v[stride] + m[2]*v[2*stride]; + dst[1] = m[3]*v[0] + m[4]*v[stride] + m[5]*v[2*stride]; + dst[2] = m[6]*v[0] + m[7]*v[stride] + m[8]*v[2*stride]; + + return dst; +} + +/* transposed 3x3 matrix by strided 3 vector product -------------------------- + */ +static inline +double * +v3s_m33t_multiply(const double *v, int stride, + const double *m, + double * restrict dst) +{ + double v0 = v[0]; double v1 = v[stride]; double v2 = v[2*stride]; + dst[0] = v0*m[0] + v1*m[1] + v2*m[2]; + dst[1] = v0*m[3] + v1*m[4] + v2*m[5]; + dst[2] = v0*m[6] + v1*m[7] + v2*m[8]; + + return dst; +} + +static inline +double * +v3s_m33_multiply(const double *v, int stride, + const double *m, + double * restrict dst) +{ + double v0 = v[0]; double v1 = v[stride]; double v2 = v[2*stride]; + dst[0] = v0*m[0] + v1*m[3] + v2*m[6]; + dst[1] = v0*m[1] + v1*m[4] + v2*m[7]; + dst[2] = v0*m[2] + v1*m[5] + v2*m[8]; + + return dst; +} + +static inline +double * +m33t_v3s_multiply(const double *m, + const double *v, int stride, + double * restrict dst) +{ + dst[0] = m[0]*v[0] + m[3]*v[stride] + m[6]*v[2*stride]; + dst[1] = m[1]*v[0] + m[4]*v[stride] + m[7]*v[2*stride]; + dst[2] = m[2]*v[0] + m[5]*v[stride] + m[8]*v[2*stride]; + + return dst; +} + +static inline +double * +m33_m33_multiply(const double *src1, + const double *src2, + double * restrict dst) +{ + v3s_m33_multiply(src1 + 0, 1, src2, dst+0); + v3s_m33_multiply(src1 + 3, 1, src2, dst+3); + v3s_m33_multiply(src1 + 6, 1, src2, dst+6); + + return dst; +} + +static inline +double * +m33t_m33_multiply(const double *src1, + const double *src2, + double * restrict dst) +{ + v3s_m33_multiply(src1 + 0, 3, src2, dst+0); + v3s_m33_multiply(src1 + 1, 3, src2, dst+3); + v3s_m33_multiply(src1 + 2, 3, src2, dst+6); + + return dst; +} + +static inline +double * +m33_m33t_multiply(const double *src1, + const double *src2, + double * restrict dst) +{ + return m33_inplace_transpose(m33t_m33_multiply(src2, src1, dst)); +} + +static inline +double * +m33t_m33t_multiply(const double *src1, + const double *src2, + double * restrict dst) +{ + return m33_inplace_transpose(m33_m33_multiply(src2, src1, dst)); +} + +#endif + +#if USE_C99_CODE +static inline +void anglesToGvec_single(double *v3_ang, double *m33_e, + double chi, double *m33_c, + double * restrict v3_c) +{ + double v3_g[3], v3_tmp1[3], v3_tmp2[3], m33_s[9], m33_ctst[9]; + + /* build g */ + double cx = cos(0.5*v3_ang[0]); + double sx = sin(0.5*v3_ang[0]); + double cy = cos(v3_ang[1]); + double sy = sin(v3_ang[1]); + v3_g[0] = cx*cy; + v3_g[1] = cx*sy; + v3_g[2] = sx; + + /* build S */ + makeOscillRotMat_cfunc(chi, v3_ang[2], m33_s); + + /* beam frame to lab frame */ + /* eval the chain: + C.T _dot_ S.T _dot_ E _dot_ g + */ + m33_v3s_multiply (m33_e, v3_g, 1, v3_tmp1); /* E _dot_ g */ + m33t_v3s_multiply(m33_s, v3_tmp1, 1, v3_tmp2); /* S.T _dot_ E _dot_ g */ + m33t_v3s_multiply(m33_c, v3_tmp2, 1, v3_c); /* the whole lot */ } +void anglesToGvec_cfunc(long int nvecs, double * angs, + double * bHat_l, double * eHat_l, + double chi, double * rMat_c, + double * gVec_c) +{ + double m33_e[9]; + + makeEtaFrameRotMat_cfunc(bHat_l, eHat_l, m33_e); + + for (int i = 0; i= ztol && bDot <= 1.0-ztol ) { + /* + * If we are here diffraction is possible so increment the number of + * admissable vectors + */ + double brMat[9]; + makeBinaryRotMat_cfunc(gVec_l, brMat); + + double dVec_l[3]; + m33_v3s_multiply(brMat, bHat_l, 1, dVec_l); + double denom = v3_v3s_dot(nVec_l, dVec_l, 1); + + if (denom > ztol) { + double u = num/denom; + double v3_tmp[3]; + + /* v3_tmp = P0_l + u*dVec_l - tVec_d */ + for (int j=0; j<3; j++) + v3_tmp[j] = P0_l[j] + u*dVec_l[j] - tVec_d[j]; + + result[0] = v3_v3s_dot(v3_tmp, rMat_d + 0, 3); + result[1] = v3_v3s_dot(v3_tmp, rMat_d + 1, 3); + + /* result when computation can be finished */ + return; + } } - /* need pointwise rMat_s according to omega */ - makeOscillRotMat_cfunc(chi, angs[3*i+2], rMat_s); + /* default result when computation can't be finished */ + result[0] = NAN; + result[1] = NAN; +} - /* Compute dot(rMat_c.T, rMat_s.T) and hit against gVec_l */ - for (j=0; j<3; j++) { - for (k=0; k<3; k++) { - rMat_ctst[3*j+k] = 0.0; - for (l=0; l<3; l++) { - rMat_ctst[3*j+k] += rMat_c[3*l+j]*rMat_s[3*k+l]; - } - } - gVec_c_tmp[j] = 0.0; - for (k=0; k<3; k++) { - gVec_c_tmp[j] += rMat_ctst[3*j+k]*gVec_l[k]; - } - gVec_c[3*i+j] = gVec_c_tmp[j]; +/* + * The only difference between this and the non-Array version + * is that rMat_s is an array of matrices of length npts instead + * of a single matrix. + */ +void gvecToDetectorXYArray_cfunc(long int npts, double * gVec_c_array, + double * rMat_d, double * rMat_s_array, double * rMat_c, + double * tVec_d, double * tVec_s, double * tVec_c, + double * beamVec, double * result_array) +{ + /* Normalize the beam vector */ + double bHat_l[3]; + v3_normalize(beamVec, bHat_l); + double nVec_l[3]; + m33_v3s_multiply(rMat_d, Zl, 1, nVec_l); + + for (size_t i = 0; i < npts; i++) { + double *rMat_s = rMat_s_array + 9*i; + double *gVec_c = gVec_c_array + 3*i; + double * restrict result = result_array + 2*i; + /* Initialize the detector normal and frame origins */ + + double P0_l[3]; + m33_v3s_multiply(rMat_s, tVec_c, 1, P0_l); + v3_v3s_inplace_add(P0_l, tVec_s, 1); + + double P3_l_minus_P0_l[3]; + v3_v3s_sub(tVec_d, P0_l, 1, P3_l_minus_P0_l); + double num = v3_v3s_dot(nVec_l, P3_l_minus_P0_l, 1); + + double gHat_c[3]; + v3_normalize(gVec_c, gHat_c); + /* + double rMat_sc[9]; + m33_m33_multiply(rMat_s, rMat_c, rMat_sc); + double gVec_l[3]; + m33_v3s_multiply(rMat_sc, gHat_c, 1, gVec_l); + */ + double tmp_vec[3], gVec_l[3]; + m33_v3s_multiply(rMat_c, gHat_c, 1, tmp_vec); + m33_v3s_multiply(rMat_s, tmp_vec, 1, gVec_l); + + double bDot = -v3_v3s_dot(bHat_l, gVec_l, 1); + double ztol = epsf; + + if (bDot < ztol || bDot > 1.0-ztol) { + result[0] = NAN; result[1] = NAN; + continue; + } + + double brMat[9]; + makeBinaryRotMat_cfunc(gVec_l, brMat); + + double dVec_l[3]; + m33_v3s_multiply(brMat, bHat_l, 1, dVec_l); + double denom = v3_v3s_dot(nVec_l, dVec_l, 1); + if (denom < ztol) { + result[0] = NAN; result[1] = NAN; + continue; + } + + double u = num/denom; + double v3_tmp[3]; + for (int j=0; j < 3; j++) + v3_tmp[j] = u*dVec_l[j] - P3_l_minus_P0_l[j]; + + result[0] = v3_v3s_dot(v3_tmp, rMat_d + 0, 3); + result[1] = v3_v3s_dot(v3_tmp, rMat_d + 1, 3); } - } } +#else void gvecToDetectorXYOne_cfunc(double * gVec_c, double * rMat_d, - double * rMat_sc, double * tVec_d, - double * bHat_l, - double * nVec_l, double num, double * P0_l, - double * result) + double * rMat_sc, double * tVec_d, + double * bHat_l, + double * nVec_l, double num, double * P0_l, + double * result) { - int j, k; - double bDot, ztol, denom, u; - double gHat_c[3], gVec_l[3], dVec_l[3], P2_l[3], P2_d[3]; - double brMat[9]; + int j, k; + double bDot, ztol, denom, u; + double gHat_c[3], gVec_l[3], dVec_l[3], P2_l[3], P2_d[3]; + double brMat[9]; - ztol = epsf; + ztol = epsf; - /* Compute unit reciprocal lattice vector in crystal frame w/o translation */ - unitRowVector_cfunc(3,gVec_c,gHat_c); + /* Compute unit reciprocal lattice vector in crystal frame w/o + translation */ + unitRowVector_cfunc(3, gVec_c, gHat_c); - /* - * Compute unit reciprocal lattice vector in lab frame - * and dot with beam vector - */ - bDot = 0.0; - for (j=0; j<3; j++) { - gVec_l[j] = 0.0; - for (k=0; k<3; k++) - gVec_l[j] += rMat_sc[3*j+k]*gHat_c[k]; + /* Compute unit reciprocal lattice vector in lab frame and dot with beam + vector */ + bDot = 0.0; + for (j=0; j<3; j++) { + gVec_l[j] = 0.0; + for (k=0; k<3; k++) + gVec_l[j] += rMat_sc[3*j+k]*gHat_c[k]; - bDot -= bHat_l[j]*gVec_l[j]; - } + bDot -= bHat_l[j]*gVec_l[j]; + } - if ( bDot >= ztol && bDot <= 1.0-ztol ) { - /* - * If we are here diffraction is possible so increment - * the number of admissable vectors - */ - makeBinaryRotMat_cfunc(gVec_l,brMat); + if ( bDot >= ztol && bDot <= 1.0-ztol ) { + /* If we are here diffraction is possible so increment the number of + admissable vectors */ + makeBinaryRotMat_cfunc(gVec_l, brMat); - denom = 0.0; - for (j=0; j<3; j++) { - dVec_l[j] = 0.0; - for (k=0; k<3; k++) - dVec_l[j] -= brMat[3*j+k]*bHat_l[k]; + denom = 0.0; + for (j=0; j<3; j++) { + dVec_l[j] = 0.0; + for (k=0; k<3; k++) + dVec_l[j] -= brMat[3*j+k]*bHat_l[k]; - denom += nVec_l[j]*dVec_l[j]; - } + denom += nVec_l[j]*dVec_l[j]; + } - if ( denom < -ztol ) { + if ( denom < -ztol ) { - u = num/denom; + u = num/denom; - for (j=0; j<3; j++) - P2_l[j] = P0_l[j]+u*dVec_l[j]; + for (j=0; j<3; j++) + P2_l[j] = P0_l[j]+u*dVec_l[j]; - for (j=0; j<2; j++) { - P2_d[j] = 0.0; - for (k=0; k<3; k++) - P2_d[j] += rMat_d[3*k+j]*(P2_l[k]-tVec_d[k]); - result[j] = P2_d[j]; - } - } else { - result[0] = NAN; - result[1] = NAN; + for (j=0; j<2; j++) { + P2_d[j] = 0.0; + for (k=0; k<3; k++) + P2_d[j] += rMat_d[3*k+j]*(P2_l[k]-tVec_d[k]); + result[j] = P2_d[j]; + } + /* result when computation can be finished */ + return; + } } - - } else { + /* default result when computation can't be finished */ result[0] = NAN; result[1] = NAN; - } } +/* + * The only difference between this and the non-Array version + * is that rMat_s is an array of matrices of length npts instead + * of a single matrix. + */ +void gvecToDetectorXYArray_cfunc(long int npts, double * gVec_c, + double * rMat_d, double * rMat_s, + double * rMat_c, double * tVec_d, + double * tVec_s, double * tVec_c, + double * beamVec, double * result) +{ + long int i; + int j, k, l; + double num; + double nVec_l[3], bHat_l[3], P0_l[3], P3_l[3]; + double rMat_sc[9]; + + /* Normalize the beam vector */ + unitRowVector_cfunc(3,beamVec,bHat_l); + + for (i=0L; i < npts; i++) { + /* Initialize the detector normal and frame origins */ + num = 0.0; + for (j=0; j<3; j++) { + nVec_l[j] = 0.0; + P0_l[j] = tVec_s[j]; + + for (k=0; k<3; k++) { + nVec_l[j] += rMat_d[3*j+k]*Zl[k]; + P0_l[j] += rMat_s[9*i + 3*j+k]*tVec_c[k]; + } + + P3_l[j] = tVec_d[j]; + + num += nVec_l[j]*(P3_l[j]-P0_l[j]); + } + + /* Compute the matrix product of rMat_s and rMat_c */ + for (j=0; j<3; j++) { + for (k=0; k<3; k++) { + rMat_sc[3*j+k] = 0.0; + for (l=0; l<3; l++) { + rMat_sc[3*j+k] += rMat_s[9*i + 3*j+l]*rMat_c[3*l+k]; + } + } + } + + gvecToDetectorXYOne_cfunc(gVec_c + 3*i, rMat_d, rMat_sc, + tVec_d, bHat_l, nVec_l, num, + P0_l, result + 2*i); + } +} + +#endif void gvecToDetectorXY_cfunc(long int npts, double * gVec_c, - double * rMat_d, double * rMat_s, double * rMat_c, - double * tVec_d, double * tVec_s, double * tVec_c, - double * beamVec, double * result) + double * rMat_d, double * rMat_s, double * rMat_c, + double * tVec_d, double * tVec_s, double * tVec_c, + double * beamVec, double * result) { long int i; int j, k, l; @@ -206,9 +698,9 @@ void gvecToDetectorXY_cfunc(long int npts, double * gVec_c, /* Normalize the beam vector */ unitRowVector_cfunc(3,beamVec,bHat_l); - /* Initialize the detector normal and frame origins */ - num = 0.0; - for (j=0; j<3; j++) { + /* Initialize the detector normal and frame origins */ + num = 0.0; + for (j=0; j<3; j++) { nVec_l[j] = 0.0; P0_l[j] = tVec_s[j]; @@ -220,87 +712,116 @@ void gvecToDetectorXY_cfunc(long int npts, double * gVec_c, P3_l[j] = tVec_d[j]; num += nVec_l[j]*(P3_l[j]-P0_l[j]); - } + } - /* Compute the matrix product of rMat_s and rMat_c */ - for (j=0; j<3; j++) { - for (k=0; k<3; k++) { - rMat_sc[3*j+k] = 0.0; - for (l=0; l<3; l++) { - rMat_sc[3*j+k] += rMat_s[3*j+l]*rMat_c[3*l+k]; + /* Compute the matrix product of rMat_s and rMat_c */ + for (j=0; j<3; j++) { + for (k=0; k<3; k++) { + rMat_sc[3*j+k] = 0.0; + for (l=0; l<3; l++) { + rMat_sc[3*j+k] += rMat_s[3*j+l]*rMat_c[3*l+k]; + } } } - } for (i=0L; i epsf ) { + double nrm_factor = 1.0/sqrt(nrm); + for (j=0; j<3; j++) { + dHat_l[j] *= nrm_factor; + } + } - P3_l[j] = tVec_d[j]; + /* Compute tTh */ + b_dot_dHat_l = 0.0; + for (j=0; j<3; j++) { + b_dot_dHat_l += bVec[j]*dHat_l[j]; + } + tTh = acos(b_dot_dHat_l); - num += nVec_l[j]*(P3_l[j]-P0_l[j]); - } + /* Compute eta */ + for (j=0; j<2; j++) { + tVec2[j] = 0.0; + for (k=0; k<3; k++) { + tVec2[j] += rMat_e[3*k+j]*dHat_l[k]; + } + } + eta = atan2(tVec2[1], tVec2[0]); - /* Compute the matrix product of rMat_s and rMat_c */ + /* Compute n_g vector */ + nrm = 0.0; for (j=0; j<3; j++) { - for (k=0; k<3; k++) { - rMat_sc[3*j+k] = 0.0; - for (l=0; l<3; l++) { - rMat_sc[3*j+k] += rMat_s[9*i + 3*j+l]*rMat_c[3*l+k]; - } - } + double val; + int j1 = j < 2 ? j+1 : 0; + int j2 = j > 0 ? j-1 : 2; + val = bVec[j1] * dHat_l[j2] - bVec[j2] * dHat_l[j1]; + nrm += val*val; + n_g[j] = val; + } + if ( nrm > epsf ) { + double nrm_factor = 1.0/sqrt(nrm); + for (j=0; j<3; j++) { + n_g[j] *= nrm_factor; + } } - gvecToDetectorXYOne_cfunc(&gVec_c[3*i], rMat_d, rMat_sc, tVec_d, - bHat_l, nVec_l, num, - P0_l, &result[2*i]); - } + /* Rotate dHat_l vector */ + phi = 0.5*(M_PI-tTh); + *tTh_out = tTh; + *eta_out = eta; + rotate_vecs_about_axis_cfunc(1, &phi, 1, n_g, 1, dHat_l, gVec_l_out); } void detectorXYToGvec_cfunc(long int npts, double * xy, - double * rMat_d, double * rMat_s, - double * tVec_d, double * tVec_s, double * tVec_c, - double * beamVec, double * etaVec, - double * tTh, double * eta, double * gVec_l) + double * rMat_d, double * rMat_s, + double * tVec_d, double * tVec_s, double * tVec_c, + double * beamVec, double * etaVec, + double * tTh, double * eta, double * gVec_l) { long int i; int j, k; - double nrm, phi, bVec[3], tVec1[3], tVec2[3], dHat_l[3], n_g[3]; + double nrm, bVec[3], tVec1[3]; double rMat_e[9]; /* Fill rMat_e */ @@ -311,10 +832,11 @@ void detectorXYToGvec_cfunc(long int npts, double * xy, for (j=0; j<3; j++) { nrm += beamVec[j]*beamVec[j]; } - nrm = sqrt(nrm); + if ( nrm > epsf ) { + double nrm_factor = 1.0/sqrt(nrm); for (j=0; j<3; j++) - bVec[j] = beamVec[j]/nrm; + bVec[j] = beamVec[j]*nrm_factor; } else { for (j=0; j<3; j++) bVec[j] = beamVec[j]; @@ -329,59 +851,66 @@ void detectorXYToGvec_cfunc(long int npts, double * xy, } for (i=0; i epsf ) { - for (j=0; j<3; j++) { - dHat_l[j] /= sqrt(nrm); + double nrm_factor = 1.0/sqrt(nrm); + for (j=0; j<3; j++) + bVec[j] = beamVec[j]*nrm_factor; + } else { + for (j=0; j<3; j++) + bVec[j] = beamVec[j]; } - } - /* Compute tTh */ - nrm = 0.0; for (j=0; j<3; j++) { - nrm += bVec[j]*dHat_l[j]; - } - tTh[i] = acos(nrm); - - /* Compute eta */ - for (j=0; j<2; j++) { - tVec2[j] = 0.0; + tVec1[j] = tVec_d[j]-tVec_s[j]; for (k=0; k<3; k++) { - tVec2[j] += rMat_e[3*k+j]*dHat_l[k]; + tVec1[j] -= rMat_s[3*j+k]*tVec_c[k]; } } - eta[i] = atan2(tVec2[1],tVec2[0]); - /* Compute n_g vector */ - nrm = 0.0; + for (i=0; i epsf ) { for (j=0; j<3; j++) { - gHat_c[j] /= nrm0; - gHat_s[j] = tmpVec[j]/nrm0; + gHat_c[j] /= nrm0; + gHat_s[j] = tmpVec[j]/nrm0; } } @@ -490,9 +1019,9 @@ void oscillAnglesOfHKLs_cfunc(long int npts, double * hkls, double chi, if ( fabs(rhs) > 1.0 ) { for (j=0; j<3; j++) - oangs0[3L*i+j] = NAN; + oangs0[3L*i+j] = NAN; for (j=0; j<3; j++) - oangs1[3L*i+j] = NAN; + oangs1[3L*i+j] = NAN; continue; } @@ -511,16 +1040,16 @@ void oscillAnglesOfHKLs_cfunc(long int npts, double * hkls, double chi, makeOscillRotMat_cfunc(oVec[0], oVec[1], rMat_s); for (j=0; j<3; j++) { - tVec0[j] = 0.0; - for (k=0; k<3; k++) { - tVec0[j] += rMat_s[3*j+k]*gHat_s[k]; - } + tVec0[j] = 0.0; + for (k=0; k<3; k++) { + tVec0[j] += rMat_s[3*j+k]*gHat_s[k]; + } } for (j=0; j<2; j++) { - gVec_e[j] = 0.0; - for (k=0; k<3; k++) { - gVec_e[j] += rMat_e[3*k+j]*tVec0[k]; - } + gVec_e[j] = 0.0; + for (k=0; k<3; k++) { + gVec_e[j] += rMat_e[3*k+j]*tVec0[k]; + } } oangs0[3L*i+1] = atan2(gVec_e[1],gVec_e[0]); @@ -528,16 +1057,16 @@ void oscillAnglesOfHKLs_cfunc(long int npts, double * hkls, double chi, makeOscillRotMat_cfunc(oVec[0], oVec[1], rMat_s); for (j=0; j<3; j++) { - tVec0[j] = 0.0; - for (k=0; k<3; k++) { - tVec0[j] += rMat_s[3*j+k]*gHat_s[k]; - } + tVec0[j] = 0.0; + for (k=0; k<3; k++) { + tVec0[j] += rMat_s[3*j+k]*gHat_s[k]; + } } for (j=0; j<2; j++) { - gVec_e[j] = 0.0; - for (k=0; k<3; k++) { - gVec_e[j] += rMat_e[3*k+j]*tVec0[k]; - } + gVec_e[j] = 0.0; + for (k=0; k<3; k++) { + gVec_e[j] += rMat_e[3*k+j]*tVec0[k]; + } } oangs1[3L*i+1] = atan2(gVec_e[1],gVec_e[0]); @@ -584,11 +1113,11 @@ void unitRowVectors_cfunc(int m, int n, double * cIn, double * cOut) nrm = sqrt(nrm); if ( nrm > epsf ) { for (j=0; j 2.0*M_PI ) - thetaMax -= 2.0*M_PI; + thetaMax -= 2.0*M_PI; while ( theta < 0.0 ) - theta += 2.0*M_PI; + theta += 2.0*M_PI; while ( theta > 2.0*M_PI ) - theta -= 2.0*M_PI; + theta -= 2.0*M_PI; if ( theta > -sqrt_epsf && theta < thetaMax + sqrt_epsf ) { - rPtr[i] = true; + rPtr[i] = true; - /* No need to check other ranges */ - break; + /* No need to check other ranges */ + break; } } } } void validateAngleRanges_cfunc(int na, double * aPtr, int nr, - double * minPtr, double * maxPtr, - bool * rPtr, int ccw) + double * minPtr, double * maxPtr, + bool * rPtr, int ccw) { int i, j; double thetaMax, theta; @@ -822,58 +1354,59 @@ void validateAngleRanges_cfunc(int na, double * aPtr, int nr, for (j=0; j 2.0*M_PI ) - thetaMax -= 2.0*M_PI; + thetaMax -= 2.0*M_PI; /* Check for an empty range */ if ( fabs(thetaMax) < sqrt_epsf ) { - rPtr[i] = true; + rPtr[i] = true; - /* No need to check other ranges */ - break; + /* No need to check other ranges */ + break; } /* Check for a range which spans a full circle */ if ( fabs(thetaMax-2.0*M_PI) < sqrt_epsf ) { - /* Double check the initial range */ - if ( (ccw && maxPtr[j] > minPtr[j]) || ((!ccw) && maxPtr[j] < minPtr[j]) ) { - rPtr[i] = true; + /* Double check the initial range */ + if ( (ccw && maxPtr[j] > minPtr[j]) || ((!ccw) && maxPtr[j] < minPtr[j]) ) { + rPtr[i] = true; - /* No need to check other ranges */ - break; - } + /* No need to check other ranges */ + break; + } } while ( theta < 0.0 ) - theta += 2.0*M_PI; + theta += 2.0*M_PI; while ( theta > 2.0*M_PI ) - theta -= 2.0*M_PI; + theta -= 2.0*M_PI; if ( theta >= -sqrt_epsf && theta <= thetaMax+sqrt_epsf ) { - rPtr[i] = true; + rPtr[i] = true; - /* No need to check other ranges */ - break; + /* No need to check other ranges */ + break; } } } } + void rotate_vecs_about_axis_cfunc(long int na, double * angles, - long int nax, double * axes, - long int nv, double * vecs, - double * rVecs) + long int nax, double * axes, + long int nv, double * vecs, + double * rVecs) { int i, j, sa, sax; double c, s, nrm, proj, aCrossV[3]; @@ -898,7 +1431,7 @@ void rotate_vecs_about_axis_cfunc(long int na, double * angles, if ( nax > 1 || i == 0 ) { nrm = 0.0; for (j=0; j<3; j++) - nrm += axes[sax*i+j]*axes[sax*i+j]; + nrm += axes[sax*i+j]*axes[sax*i+j]; nrm = sqrt(nrm); } @@ -969,9 +1502,9 @@ void homochoricOfQuat_cfunc(int nq, double * qPtr, double * hPtr) if (phi > epsf) { arg = 0.75*(phi - sin(phi)); if (arg < 0.) { - f = -pow(-arg, 1./3.); + f = -pow(-arg, 1./3.); } else { - f = pow(arg, 1./3.); + f = pow(arg, 1./3.); } s = 1. / sin(0.5*phi); diff --git a/hexrd/transforms/transforms_CFUNC.h b/hexrd/transforms/transforms_CFUNC.h index 7460e8d1..628883f7 100644 --- a/hexrd/transforms/transforms_CFUNC.h +++ b/hexrd/transforms/transforms_CFUNC.h @@ -53,6 +53,12 @@ void detectorXYToGvec_cfunc(long int npts, double * xy, double * beamVec, double * etaVec, double * tTh, double * eta, double * gVec_l); +void detectorXYToGvecArray_cfunc(long int npts, double * xy, + double * rMat_d, double * rMat_s, + double * tVec_d, double * tVec_s, double * tVec_c, + double * beamVec, double * etaVec, + double * tTh, double * eta, double * gVec_l); + void oscillAnglesOfHKLs_cfunc(long int npts, double * hkls, double chi, double * rMat_c, double * bMat, double wavelength, double * vInv_s, double * beamVec, double * etaVec, diff --git a/hexrd/utils/profiler.py b/hexrd/utils/profiler.py index d4591f25..aeb9076a 100644 --- a/hexrd/utils/profiler.py +++ b/hexrd/utils/profiler.py @@ -16,7 +16,7 @@ pass try: - from numbapro import nvtx + import nvtxpy as nvtx except ImportError: pass diff --git a/hexrd/wx/caking.py b/hexrd/wx/caking.py index 80e634d8..5fbfdebd 100644 --- a/hexrd/wx/caking.py +++ b/hexrd/wx/caking.py @@ -89,18 +89,18 @@ def __makeObjects(self): self.tbarSizer = makeTitleBar(self, 'Polar Rebinning') # - self.std_pan = standardOptsPanel(self, wx.NewId()) - self.mrb_pan = multiringOptsPanel(self, wx.NewId()) - self.sph_pan = sphericalOptsPanel(self, wx.NewId()) + self.std_pan = standardOptsPanel(self, wx.NewIdRef()) + self.mrb_pan = multiringOptsPanel(self, wx.NewIdRef()) + self.sph_pan = sphericalOptsPanel(self, wx.NewIdRef()) # # Method # - self.method_cho = wx.Choice(self, wx.NewId(), choices=prOpts.cakeMethods) + self.method_cho = wx.Choice(self, wx.NewIdRef(), choices=prOpts.cakeMethods) self.method_cho.SetSelection(2) # # Run # - self.run_but = wx.Button(self, wx.NewId(), 'Run Polar Rebin') + self.run_but = wx.Button(self, wx.NewIdRef(), 'Run Polar Rebin') # # Canvas for figures moved to separate window: see cakingCanvas.py # @@ -168,7 +168,7 @@ def __cake_img(self): 'args' : (), 'kwargs': dict() } - logwin = logWindow(self, wx.NewId(), action, 'Standard Polar Rebinning') + logwin = logWindow(self, wx.NewIdRef(), action, 'Standard Polar Rebinning') logwin.ShowModal() # # ============================== @@ -177,7 +177,7 @@ def __cake_img(self): # # Now draw # - cCan = cakeDisplay(self, wx.NewId(), prOpts.CAKE_IMG, self.img_info) + cCan = cakeDisplay(self, wx.NewIdRef(), prOpts.CAKE_IMG, self.img_info) # pass @@ -191,12 +191,12 @@ def __cake_rng(self): # 'args' : (), # 'kwargs': dict() # } - # logwin = logWindow(self, wx.NewId(), action, 'Multiring Binning') + # logwin = logWindow(self, wx.NewIdRef(), action, 'Multiring Binning') # logwin.ShowModal() # ==================== - cCan = cakeDisplay(self, wx.NewId(), prOpts.CAKE_RNG, self.mrb) + cCan = cakeDisplay(self, wx.NewIdRef(), prOpts.CAKE_RNG, self.mrb) return @@ -276,7 +276,7 @@ def __cake_sph(self): self.omeEta = CollapseOmeEta(reader, pdata, hklIDs, det, **kwargs) - cCan = cakeDisplay(self, wx.NewId(), prOpts.CAKE_SPH, self.omeEta) + cCan = cakeDisplay(self, wx.NewIdRef(), prOpts.CAKE_SPH, self.omeEta) return # @@ -355,7 +355,7 @@ def __init__(self, parent, id, **kwargs): # self.titlebar = wx.StaticText(self, -1, 'cakingDialog', style=wx.ALIGN_CENTER|wx.SIMPLE_BORDER) - self.dataPanel = cakingPanel(self, wx.NewId()) + self.dataPanel = cakingPanel(self, wx.NewIdRef()) # # Bindings. # @@ -445,33 +445,33 @@ def __makeObjects(self): # # Labels # - self.min_lab = wx.StaticText(self, wx.NewId(), 'min', style=wx.ALIGN_CENTER) - self.max_lab = wx.StaticText(self, wx.NewId(), 'max', style=wx.ALIGN_CENTER) - self.num_lab = wx.StaticText(self, wx.NewId(), 'num', style=wx.ALIGN_CENTER) - self.rho_lab = wx.StaticText(self, wx.NewId(), 'rho', style=wx.ALIGN_CENTER) - self.eta_lab = wx.StaticText(self, wx.NewId(), 'eta', style=wx.ALIGN_CENTER) + self.min_lab = wx.StaticText(self, wx.NewIdRef(), 'min', style=wx.ALIGN_CENTER) + self.max_lab = wx.StaticText(self, wx.NewIdRef(), 'max', style=wx.ALIGN_CENTER) + self.num_lab = wx.StaticText(self, wx.NewIdRef(), 'num', style=wx.ALIGN_CENTER) + self.rho_lab = wx.StaticText(self, wx.NewIdRef(), 'rho', style=wx.ALIGN_CENTER) + self.eta_lab = wx.StaticText(self, wx.NewIdRef(), 'eta', style=wx.ALIGN_CENTER) # # Rho # - self.rmin_spn = wx.SpinCtrl(self, wx.NewId(), min=1, max=10000, value=str(100)) - self.rmax_spn = wx.SpinCtrl(self, wx.NewId(), min=1, max=10000, value=str(1000)) - self.rnum_spn = wx.SpinCtrl(self, wx.NewId(), min=1, max=10000, value=str(500)) + self.rmin_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, max=10000, value=str(100)) + self.rmax_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, max=10000, value=str(1000)) + self.rnum_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, max=10000, value=str(500)) # # Eta # - self.emin_spn = wx.SpinCtrl(self, wx.NewId(), min=-360, max=360, value=str(0)) - self.emax_spn = wx.SpinCtrl(self, wx.NewId(), min=-360, max=360, value=str(360)) - self.enum_spn = wx.SpinCtrl(self, wx.NewId(), min=1, max=360, value=str(36)) + self.emin_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=-360, max=360, value=str(0)) + self.emax_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=-360, max=360, value=str(360)) + self.enum_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, max=360, value=str(36)) # # Other options # - self.corr_cbox = wx.CheckBox(self, wx.NewId(), 'corrected') - self.npdv_lab = wx.StaticText(self, wx.NewId(), 'pixel divisions', + self.corr_cbox = wx.CheckBox(self, wx.NewIdRef(), 'corrected') + self.npdv_lab = wx.StaticText(self, wx.NewIdRef(), 'pixel divisions', style=wx.ALIGN_CENTER) - self.npdv_spn = wx.SpinCtrl(self, wx.NewId(), min=1, max=10, initial=1) - self.frame_lab = wx.StaticText(self, wx.NewId(), 'frame', + self.npdv_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, max=10, initial=1) + self.frame_lab = wx.StaticText(self, wx.NewIdRef(), 'frame', style=wx.ALIGN_CENTER) - self.frame_cho = wx.Choice(self, wx.NewId(), choices=['frame 1']) + self.frame_cho = wx.Choice(self, wx.NewIdRef(), choices=['frame 1']) return @@ -629,28 +629,28 @@ def __makeObjects(self): self.tbarSizer = makeTitleBar(self, 'Options for Multiring Rebin') - self.ring_pan = ringPanel(self, wx.NewId()) + self.ring_pan = ringPanel(self, wx.NewIdRef()) - self.emin_lab = wx.StaticText(self, wx.NewId(), + self.emin_lab = wx.StaticText(self, wx.NewIdRef(), 'Eta min', style=wx.ALIGN_RIGHT) - self.emax_lab = wx.StaticText(self, wx.NewId(), + self.emax_lab = wx.StaticText(self, wx.NewIdRef(), 'Eta max', style=wx.ALIGN_RIGHT) - self.emin_txt = wx.TextCtrl(self, wx.NewId(), + self.emin_txt = wx.TextCtrl(self, wx.NewIdRef(), value='0', style=wx.RAISED_BORDER | wx.TE_PROCESS_ENTER) - self.emax_txt = wx.TextCtrl(self, wx.NewId(), + self.emax_txt = wx.TextCtrl(self, wx.NewIdRef(), value='360', style=wx.RAISED_BORDER | wx.TE_PROCESS_ENTER) - self.numEta_lab = wx.StaticText(self, wx.NewId(), + self.numEta_lab = wx.StaticText(self, wx.NewIdRef(), 'Number of Eta Bins', style=wx.ALIGN_RIGHT) - self.numRho_lab = wx.StaticText(self, wx.NewId(), + self.numRho_lab = wx.StaticText(self, wx.NewIdRef(), 'Rho Bins Per Ring', style=wx.ALIGN_RIGHT) - self.numEta_spn = wx.SpinCtrl(self, wx.NewId(), min=1, value=str(36) ) - self.numRho_spn = wx.SpinCtrl(self, wx.NewId(), min=1, value=str(20) ) + self.numEta_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, value=str(36) ) + self.numRho_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, value=str(20) ) return @@ -783,45 +783,45 @@ def __makeObjects(self): # # Integer inputs # - self.lump_lab = wx.StaticText(self, wx.NewId(), '# lumped frames (omega)', style=wx.ALIGN_CENTER) - self.lump_spn = wx.SpinCtrl(self, wx.NewId(), min=1, max=1000, initial=1) + self.lump_lab = wx.StaticText(self, wx.NewIdRef(), '# lumped frames (omega)', style=wx.ALIGN_CENTER) + self.lump_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, max=1000, initial=1) - self.bins_lab = wx.StaticText(self, wx.NewId(), 'azimuthal bins (eta)', style=wx.ALIGN_CENTER) - self.bins_spn = wx.SpinCtrl(self, wx.NewId(), min=1, max=10000, initial=600) + self.bins_lab = wx.StaticText(self, wx.NewIdRef(), 'azimuthal bins (eta)', style=wx.ALIGN_CENTER) + self.bins_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, max=10000, initial=600) - self.thresh_lab = wx.StaticText(self, wx.NewId(), 'threshold', + self.thresh_lab = wx.StaticText(self, wx.NewIdRef(), 'threshold', style=wx.ALIGN_CENTER) - self.thresh_spn = wx.SpinCtrl(self, wx.NewId(), min=0, max=10000, initial=20) + self.thresh_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=0, max=10000, initial=20) # # Material and HKLs selector # exp = wx.GetApp().ws - self.matl_cho = wx.Choice(self, wx.NewId(), choices=exp.matNames) + self.matl_cho = wx.Choice(self, wx.NewIdRef(), choices=exp.matNames) self.matl_cho.SetSelection(0) - self.read_cho = wx.Choice(self, wx.NewId(), choices=exp.readerNames) + self.read_cho = wx.Choice(self, wx.NewIdRef(), choices=exp.readerNames) self.read_cho.SetSelection(0) - self.hkls_but = wx.Button(self, wx.NewId(), 'Select HKL') + self.hkls_but = wx.Button(self, wx.NewIdRef(), 'Select HKL') # # Angle/axis # name = 'angle' - self.angle_lab = wx.StaticText(self, wx.NewId(), name, style=wx.ALIGN_CENTER) - self.angle_flt = FloatControl(self, wx.NewId()) + self.angle_lab = wx.StaticText(self, wx.NewIdRef(), name, style=wx.ALIGN_CENTER) + self.angle_flt = FloatControl(self, wx.NewIdRef()) self.angle_flt.SetValue(1.0) name = 'axis x' - self.axis1_lab = wx.StaticText(self, wx.NewId(), name, style=wx.ALIGN_CENTER) - self.axis1_flt = FloatControl(self, wx.NewId()) + self.axis1_lab = wx.StaticText(self, wx.NewIdRef(), name, style=wx.ALIGN_CENTER) + self.axis1_flt = FloatControl(self, wx.NewIdRef()) self.axis1_flt.SetValue(1.0) name = 'axis y' - self.axis2_lab = wx.StaticText(self, wx.NewId(), name, style=wx.ALIGN_CENTER) - self.axis2_flt = FloatControl(self, wx.NewId()) + self.axis2_lab = wx.StaticText(self, wx.NewIdRef(), name, style=wx.ALIGN_CENTER) + self.axis2_flt = FloatControl(self, wx.NewIdRef()) self.axis2_flt.SetValue(1.0) name = 'axis z' - self.axis3_lab = wx.StaticText(self, wx.NewId(), name, style=wx.ALIGN_CENTER) - self.axis3_flt = FloatControl(self, wx.NewId()) + self.axis3_lab = wx.StaticText(self, wx.NewIdRef(), name, style=wx.ALIGN_CENTER) + self.axis3_flt = FloatControl(self, wx.NewIdRef()) self.axis3_flt.SetValue(1.0) @@ -904,7 +904,7 @@ def OnSelectHKLs(self, evt): exp = app.ws mat = exp.activeMaterial - dlg = hklsDlg(self, wx.NewId(), mat) + dlg = hklsDlg(self, wx.NewIdRef(), mat) if dlg.ShowModal() == wx.ID_OK: mat.planeData.exclusions = dlg.getExclusions() diff --git a/hexrd/wx/cakingcanvas.py b/hexrd/wx/cakingcanvas.py index d3bcea27..49b98dbe 100644 --- a/hexrd/wx/cakingcanvas.py +++ b/hexrd/wx/cakingcanvas.py @@ -106,16 +106,16 @@ def __makeObjects(self): self.tbarSizer = makeTitleBar(self, 'cakeCanvas') # if self.cakeType == prOpts.CAKE_IMG: - self.opt_pan = imgOpts(self, wx.NewId()) + self.opt_pan = imgOpts(self, wx.NewIdRef()) # full image caking panel pass elif self.cakeType == prOpts.CAKE_RNG: # multiring panel - self.opt_pan = rngOpts(self, wx.NewId()) + self.opt_pan = rngOpts(self, wx.NewIdRef()) pass elif self.cakeType == prOpts.CAKE_SPH: # omega-eta panel - self.opt_pan = sphOpts(self, wx.NewId()) + self.opt_pan = sphOpts(self, wx.NewIdRef()) pass self._makeFigureCanvas() @@ -126,7 +126,7 @@ def __makeObjects(self): def _makeFigureCanvas(self): """Build figure canvas""" self.figure = Figure() - self.canvas = FigureCanvas(self, wx.NewId(), self.figure) + self.canvas = FigureCanvas(self, wx.NewIdRef(), self.figure) self.axes = self.figure.gca() self.axes.set_aspect('equal') @@ -217,7 +217,7 @@ def __makeObjects(self): # # Add canvas panel # - self.cpan = cakeCanvas(self, wx.NewId(), self.cakeType, self.data) + self.cpan = cakeCanvas(self, wx.NewIdRef(), self.cakeType, self.data) # # A Statusbar in the bottom of the window # @@ -298,8 +298,8 @@ def __makeObjects(self): self.tbarSizer = makeTitleBar(self, 'Multiring Rebinning Results') # - self.unit_cho = wx.Choice(self, wx.NewId(), choices=rngOpts.unitList) - self.exp_but = wx.Button(self, wx.NewId(), 'Export') + self.unit_cho = wx.Choice(self, wx.NewIdRef(), choices=rngOpts.unitList) + self.exp_but = wx.Button(self, wx.NewIdRef(), 'Export') #self.Bind(wx.EVT_CHOICE, self.OnChoice, self.choice) return @@ -408,8 +408,8 @@ def __init__(self, parent, id, **kwargs): def __makeObjects(self): """Add interactors""" self.tbarSizer = makeTitleBar(self, 'Full Image Rebinning Results') - self.cmPanel = cmapPanel(self, wx.NewId()) - self.exp_but = wx.Button(self, wx.NewId(), 'Export') + self.cmPanel = cmapPanel(self, wx.NewIdRef()) + self.exp_but = wx.Button(self, wx.NewIdRef(), 'Export') # return @@ -574,24 +574,24 @@ def __init__(self, parent, id, **kwargs): def __makeObjects(self): """Add interactors""" exp = wx.GetApp().ws - self.cmPanel = cmapPanel(self, wx.NewId()) + self.cmPanel = cmapPanel(self, wx.NewIdRef()) self.tbarSizer = makeTitleBar(self, 'Omega-Eta Plots', color=WP.BG_COLOR_TITLEBAR_PANEL1) # choice interactor for HKL hkls = exp.activeMaterial.planeData.getHKLs(asStr=True) - self.hkl_cho = wx.Choice(self, wx.NewId(), choices=hkls) + self.hkl_cho = wx.Choice(self, wx.NewIdRef(), choices=hkls) self.hkl_cho.SetSelection(0) - self.disp_cho = wx.Choice(self, wx.NewId(), choices=self.DISP_METHODS) + self.disp_cho = wx.Choice(self, wx.NewIdRef(), choices=self.DISP_METHODS) self.disp_cho.SetSelection(0) self.idata = 0 self.dispm = self.DISP_RAW self.coms = None # centers of mass from optional labeling - self.exp_but = wx.Button(self, wx.NewId(), 'Export') - self.lab_but = wx.Button(self, wx.NewId(), 'Label Spots') + self.exp_but = wx.Button(self, wx.NewIdRef(), 'Export') + self.lab_but = wx.Button(self, wx.NewIdRef(), 'Label Spots') return def __makeBindings(self): diff --git a/hexrd/wx/canvaspanel.py b/hexrd/wx/canvaspanel.py index 344df48c..d10777d2 100644 --- a/hexrd/wx/canvaspanel.py +++ b/hexrd/wx/canvaspanel.py @@ -95,7 +95,7 @@ def __makeObjects(self): # # * show image # - self.showImage_box = wx.CheckBox(self, wx.NewId(), + self.showImage_box = wx.CheckBox(self, wx.NewIdRef(), 'Show Image') self.showImage_box.SetValue(True) self.optSizer.Add(self.showImage_box, 0, wx.LEFT | wx.EXPAND) @@ -103,7 +103,7 @@ def __makeObjects(self): # # * show rings # - self.showCalRings_box = wx.CheckBox(self, wx.NewId(), + self.showCalRings_box = wx.CheckBox(self, wx.NewIdRef(), 'Show Rings') self.showCalRings_box.SetValue(False) # default self.optSizer.Add(self.showCalRings_box, 0, wx.LEFT | wx.EXPAND) @@ -111,7 +111,7 @@ def __makeObjects(self): # # * show ranges # - self.showCalRanges_box = wx.CheckBox(self, wx.NewId(), + self.showCalRanges_box = wx.CheckBox(self, wx.NewIdRef(), 'Show Ranges') self.showCalRanges_box.SetValue(False) # default self.optSizer.Add(self.showCalRanges_box, 0, wx.LEFT | wx.EXPAND) @@ -119,23 +119,23 @@ def __makeObjects(self): # # Add image list management # - self.ail_lab = wx.StaticText(self, wx.NewId(), 'Load Image', style=wx.ALIGN_CENTER) - self.ail_cho = wx.Choice(self, wx.NewId(), choices=[]) - self.nam_lab = wx.StaticText(self, wx.NewId(), 'Name Image', style=wx.ALIGN_CENTER) - self.nam_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.ail_lab = wx.StaticText(self, wx.NewIdRef(), 'Load Image', style=wx.ALIGN_CENTER) + self.ail_cho = wx.Choice(self, wx.NewIdRef(), choices=[]) + self.nam_lab = wx.StaticText(self, wx.NewIdRef(), 'Name Image', style=wx.ALIGN_CENTER) + self.nam_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.eil_but = wx.Button(self, wx.NewId(), 'Edit List') + self.eil_but = wx.Button(self, wx.NewIdRef(), 'Edit List') # # Add colormap panel # - self.cmPanel = cmapPanel(self, wx.NewId()) + self.cmPanel = cmapPanel(self, wx.NewIdRef()) # # ===== FIGURE CANVAS # self.figure = Figure() self.axes = self.figure.gca() self.axes.set_aspect('equal') - self.canvas = FigureCanvas(self, wx.NewId(), self.figure) + self.canvas = FigureCanvas(self, wx.NewIdRef(), self.figure) self.__add_toolbar() # comment this out for no toolbar @@ -148,8 +148,8 @@ def on_press(event): mainFrame = wx.GetApp().GetTopWindow() if hasattr(event, 'xdata') and event.xdata: - x = event.xdata; xadj = x + 0.5; xint = numpy.floor(xadj) - y = event.ydata; yadj = y + 0.5; yint = numpy.floor(yadj) + x = event.xdata; xadj = x + 0.5; xint = int(numpy.floor(xadj)) + y = event.ydata; yadj = y + 0.5; yint = int(numpy.floor(yadj)) tth, eta = numpy.array(det.xyoToAng(y, x)) cartx, carty = det.cartesianCoordsOfPixelIndices(y, x) cx = (cartx - det.xc)/det.pixelPitch @@ -289,12 +289,14 @@ def update(self, **kwargs): li = kwargs['loadImage'] ui = kwargs['updateImage'] oninit = kwargs['onInit'] + print 'li: ', li # # Show image if box is checked. # app = wx.GetApp() exp = app.ws img = exp.active_img + if img is None: # no active image, but possibly one on the axes @@ -304,9 +306,11 @@ def update(self, **kwargs): img0.set_visible(False) else: si = self.showImage_box.IsChecked() - + if ni or ui: # not using axes image list + if ni: self.axes.set_autoscale_on(True) + self.axes.images = [] self.axes.imshow(img, @@ -339,7 +343,10 @@ def update(self, **kwargs): # #rcho = self.rings_cho #ResetChoice(rcho, exp.matNames, rcho.GetStringSelection) - # + if li: + print 'loading image: working with axes' + self.axes.set_autoscale_on(True) + self.draw() # # Update image list @@ -419,7 +426,7 @@ def OnEditImg(self, evt): ssel = self.ail_cho.GetStringSelection() - dlg = ListEditDlg(self, wx.NewId(), nilist) + dlg = ListEditDlg(self, wx.NewIdRef(), nilist) dlg.ShowModal() dlg.Destroy() diff --git a/hexrd/wx/canvasutil.py b/hexrd/wx/canvasutil.py index a2656cbb..98f11d48 100644 --- a/hexrd/wx/canvasutil.py +++ b/hexrd/wx/canvasutil.py @@ -95,7 +95,7 @@ def __makeObjects(self): # # * choose colormap and vmin and vmax # - self.cmap_lab = wx.StaticText(self, wx.NewId(), + self.cmap_lab = wx.StaticText(self, wx.NewIdRef(), 'Colormap: ', style=wx.ALIGN_RIGHT) @@ -103,39 +103,39 @@ def __makeObjects(self): 'flag', 'gray', 'gray_r', 'hot', 'hot_r', 'hsv', 'jet', 'pink', 'prism', 'spring', 'summer', 'winter', 'spectral'] - self.cmap_cho = wx.Choice(self, wx.NewId(), + self.cmap_cho = wx.Choice(self, wx.NewIdRef(), choices=self.cmap_nameList) self.cmap_name = 'bone' self.cmap_cho.SetStringSelection(self.cmap_name) self.cmin_val = 0 - self.cmin_lab = wx.StaticText(self, wx.NewId(), + self.cmin_lab = wx.StaticText(self, wx.NewIdRef(), 'Minimum: ', style=wx.ALIGN_RIGHT) - self.cmin_txt = wx.TextCtrl(self, wx.NewId(), + self.cmin_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(self.cmin_val), style=wx.RAISED_BORDER | wx.TE_PROCESS_ENTER) - self.cmUnder_box = wx.CheckBox(self, wx.NewId(), 'show under') + self.cmUnder_box = wx.CheckBox(self, wx.NewIdRef(), 'show under') self.cmax_val = 2000 - self.cmax_lab = wx.StaticText(self, wx.NewId(), + self.cmax_lab = wx.StaticText(self, wx.NewIdRef(), 'Maximum: ', style=wx.ALIGN_RIGHT) - self.cmax_txt = wx.TextCtrl(self, wx.NewId(), + self.cmax_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(self.cmax_val), style=wx.RAISED_BORDER | wx.TE_PROCESS_ENTER) - self.cmOver_box = wx.CheckBox(self, wx.NewId(), 'show over') + self.cmOver_box = wx.CheckBox(self, wx.NewIdRef(), 'show over') self.apply_filter = False self.filter_val = 0.8 - self.applyFilter_txt = wx.TextCtrl(self, wx.NewId(), + self.applyFilter_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(self.filter_val), style=wx.RAISED_BORDER | wx.TE_PROCESS_ENTER) - self.applyFilter_lab = wx.StaticText(self, wx.NewId(), + self.applyFilter_lab = wx.StaticText(self, wx.NewIdRef(), 'Apply filter: ', style=wx.ALIGN_RIGHT) - self.applyFilter_box = wx.CheckBox(self, wx.NewId(), 'apply filter') + self.applyFilter_box = wx.CheckBox(self, wx.NewIdRef(), 'apply filter') return diff --git a/hexrd/wx/detectorpanel.py b/hexrd/wx/detectorpanel.py index 88e83941..25c3f981 100644 --- a/hexrd/wx/detectorpanel.py +++ b/hexrd/wx/detectorpanel.py @@ -109,14 +109,14 @@ def __makeObjects(self): # # Material Selection # - self.mats_lab = wx.StaticText(self, wx.NewId(), + self.mats_lab = wx.StaticText(self, wx.NewIdRef(), 'Active Material', style=wx.ALIGN_CENTER) - self.mats_cho = wx.Choice(self, wx.NewId(), + self.mats_cho = wx.Choice(self, wx.NewIdRef(), choices=[m.name for m in exp.matList]) # # Rings panel # - self.ring_pan = ringPanel(self, wx.NewId()) + self.ring_pan = ringPanel(self, wx.NewIdRef()) # # II. Geometry # @@ -128,42 +128,47 @@ def __makeObjects(self): app = wx.GetApp() det = app.ws.detector + self.nrows_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(det.nrows), style=wx.RAISED_BORDER) + self.ncols_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(det.ncols), style=wx.RAISED_BORDER) + self.pixel_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(det.pixelPitch), style=wx.RAISED_BORDER) + self.pixel_txt_s = wx.TextCtrl(self, wx.NewIdRef(), value=str(det.pixelPitch), style=wx.RAISED_BORDER|wx.TE_READONLY) + name = 'x Center' - self.cbox_xc = wx.CheckBox(self, wx.NewId(), name) - self.float_xc = FloatControl(self, wx.NewId()) + self.cbox_xc = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_xc = FloatControl(self, wx.NewIdRef()) self.float_xc.SetValue(det.xc) self.float_xc.SetDelta(0.5*det.pixelPitch) name = 'y Center' - self.cbox_yc = wx.CheckBox(self, wx.NewId(), name) - self.float_yc = FloatControl(self, wx.NewId()) + self.cbox_yc = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_yc = FloatControl(self, wx.NewIdRef()) self.float_yc.SetValue(det.yc) self.float_yc.SetDelta(0.5*det.pixelPitch) name = 'Distance' - self.cbox_D = wx.CheckBox(self, wx.NewId(), name) - self.float_D = FloatControl(self, wx.NewId()) + self.cbox_D = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_D = FloatControl(self, wx.NewIdRef()) self.float_D.SetValue(det.workDist) self.float_D.SetDelta(10*det.pixelPitch) name = 'x Tilt' - self.cbox_xt = wx.CheckBox(self, wx.NewId(), name) - self.float_xt = FloatControl(self, wx.NewId()) + self.cbox_xt = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_xt = FloatControl(self, wx.NewIdRef()) self.float_xt.SetValue(det.xTilt) name = 'y Tilt' - self.cbox_yt = wx.CheckBox(self, wx.NewId(), name) - self.float_yt = FloatControl(self, wx.NewId()) + self.cbox_yt = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_yt = FloatControl(self, wx.NewIdRef()) self.float_yt.SetValue(det.yTilt) name = 'z Tilt' - self.cbox_zt = wx.CheckBox(self, wx.NewId(), name) - self.float_zt = FloatControl(self, wx.NewId()) + self.cbox_zt = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_zt = FloatControl(self, wx.NewIdRef()) self.float_zt.SetValue(det.zTilt) name = 'chi Tilt' - self.cbox_ct = wx.CheckBox(self, wx.NewId(), name) - self.float_ct = FloatControl(self, wx.NewId()) + self.cbox_ct = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_ct = FloatControl(self, wx.NewIdRef()) self.float_ct.SetValue(det.chiTilt) # @@ -174,57 +179,57 @@ def __makeObjects(self): # number (if any at all) will change for each # detector type. name = 'p0' - self.cbox_d1 = wx.CheckBox(self, wx.NewId(), name) - self.float_d1 = FloatControl(self, wx.NewId()) + self.cbox_d1 = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_d1 = FloatControl(self, wx.NewIdRef()) self.float_d1.SetValue(det.dparms[0]) name = 'p1' - self.cbox_d2 = wx.CheckBox(self, wx.NewId(), name) - self.float_d2 = FloatControl(self, wx.NewId()) + self.cbox_d2 = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_d2 = FloatControl(self, wx.NewIdRef()) self.float_d2.SetValue(det.dparms[1]) name = 'p2' - self.cbox_d3 = wx.CheckBox(self, wx.NewId(), name) - self.float_d3 = FloatControl(self, wx.NewId()) + self.cbox_d3 = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_d3 = FloatControl(self, wx.NewIdRef()) self.float_d3.SetValue(det.dparms[2]) name = 'n0' - self.cbox_d4 = wx.CheckBox(self, wx.NewId(), name) - self.float_d4 = FloatControl(self, wx.NewId()) + self.cbox_d4 = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_d4 = FloatControl(self, wx.NewIdRef()) self.float_d4.SetValue(det.dparms[3]) name = 'n1' - self.cbox_d5 = wx.CheckBox(self, wx.NewId(), name) - self.float_d5 = FloatControl(self, wx.NewId()) + self.cbox_d5 = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_d5 = FloatControl(self, wx.NewIdRef()) self.float_d5.SetValue(det.dparms[4]) name = 'n2' - self.cbox_d6 = wx.CheckBox(self, wx.NewId(), name) - self.float_d6 = FloatControl(self, wx.NewId()) + self.cbox_d6 = wx.CheckBox(self, wx.NewIdRef(), name) + self.float_d6 = FloatControl(self, wx.NewIdRef()) self.float_d6.SetValue(det.dparms[5]) # # Fitting method # self.fitLabelSizer = makeTitleBar(self, 'Fitting Method', color=WP.TITLEBAR_BG_COLOR_PANEL1) - self.fitDir_rb = wx.RadioButton(self, wx.NewId(), 'Direct Fit', + self.fitDir_rb = wx.RadioButton(self, wx.NewIdRef(), 'Direct Fit', style=wx.RB_GROUP) - self.fitBin_rb = wx.RadioButton(self, wx.NewId(), 'Binned Fit') + self.fitBin_rb = wx.RadioButton(self, wx.NewIdRef(), 'Binned Fit') # # III. Caking # - self.numEta_lab = wx.StaticText(self, wx.NewId(), + self.numEta_lab = wx.StaticText(self, wx.NewIdRef(), 'Azimuthal bins', style=wx.ALIGN_RIGHT) - self.numRho_lab = wx.StaticText(self, wx.NewId(), + self.numRho_lab = wx.StaticText(self, wx.NewIdRef(), 'Radial bins per ring', style=wx.ALIGN_RIGHT) - self.numEta_spn = wx.SpinCtrl(self, wx.NewId(), min=12, initial=36) - self.numRho_spn = wx.SpinCtrl(self, wx.NewId(), min=10, initial=20) + self.numEta_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=12, initial=36) + self.numRho_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=10, initial=20) # # Fit button with options (at some point) # - self.runFit_but = wx.Button(self, wx.NewId(), 'Run Fit') + self.runFit_but = wx.Button(self, wx.NewIdRef(), 'Run Fit') return @@ -237,6 +242,10 @@ def __makeBindings(self): self.Bind(wx.EVT_SPINCTRL, self.OnNumEta, self.numEta_spn) # detector section + self.Bind(wx.EVT_TEXT_ENTER, self.OnChangeRows, self.nrows_txt) + self.Bind(wx.EVT_TEXT_ENTER, self.OnChangeCols, self.ncols_txt) + self.Bind(wx.EVT_TEXT_ENTER, self.OnChangePixl, self.pixel_txt) + self.Bind(EVT_FLOAT_CTRL, self.OnFloatXC, self.float_xc) self.Bind(EVT_FLOAT_CTRL, self.OnFloatYC, self.float_yc) self.Bind(EVT_FLOAT_CTRL, self.OnFloatD, self.float_D) @@ -287,10 +296,15 @@ def __makeSizers(self): # # Geometry sizer # - nrow = 13; ncol = 2; padx = 5; pady = 5 + nrow = 15; ncol = 2; padx = 5; pady = 5 self.geoSizer = wx.FlexGridSizer(nrow, ncol, padx, pady) self.geoSizer.AddGrowableCol(0, 1) self.geoSizer.AddGrowableCol(1, 1) + # * row/col hack + self.geoSizer.Add(self.nrows_txt, 1, wx.ALIGN_RIGHT) + self.geoSizer.Add(self.ncols_txt, 1, wx.ALIGN_LEFT) + self.geoSizer.Add(self.pixel_txt, 1, wx.ALIGN_RIGHT) + self.geoSizer.Add(self.pixel_txt_s, 1, wx.ALIGN_LEFT) # * x-center self.geoSizer.Add(self.cbox_xc, 1, wx.EXPAND) self.geoSizer.Add(self.float_xc, 1, wx.EXPAND) @@ -484,7 +498,7 @@ def OnRunFit(self, evt): # 'args': (), # 'kwargs': dict() # } - # logwin = logWindow(self, wx.NewId(), action, 'Fitting Log') + # logwin = logWindow(self, wx.NewIdRef(), action, 'Fitting Log') # logwin.ShowModal() # # except Exception as e: @@ -516,6 +530,51 @@ def OnFitBin(self, e): # # Detector Parameters # + def OnChangeRows(self, evt): + """Callback for float_xc choice""" + try: + a = wx.GetApp() + nrows = int(self.nrows_txt.GetValue()) + a.ws.detector.nrows = nrows + a.getCanvas().update() + + except Exception as e: + msg = 'Failed to set nrows: \n%s' % str(e) + wx.MessageBox(msg) + pass + + return + + def OnChangeCols(self, evt): + """Callback for float_xc choice""" + try: + a = wx.GetApp() + ncols = int(self.ncols_txt.GetValue()) + a.ws.detector.ncols = ncols + a.getCanvas().update() + + except Exception as e: + msg = 'Failed to set ncols: \n%s' % str(e) + wx.MessageBox(msg) + pass + + return + + def OnChangePixl(self, evt): + """Callback for float_xc choice""" + try: + a = wx.GetApp() + pixelPitch = float(self.pixel_txt.GetValue()) + a.ws.detector.pixelPitch = pixelPitch + a.getCanvas().update() + + except Exception as e: + msg = 'Failed to set pixel pitch: \n%s' % str(e) + wx.MessageBox(msg) + pass + + return + def OnFloatXC(self, evt): """Callback for float_xc choice""" try: diff --git a/hexrd/wx/fitparampanel.py b/hexrd/wx/fitparampanel.py index e65b7933..d4490b90 100644 --- a/hexrd/wx/fitparampanel.py +++ b/hexrd/wx/fitparampanel.py @@ -87,8 +87,8 @@ def __makeObjects(self): for p in self.fParams: name = p.getProp('name') valu = p.getProp('value') - cbox = wx.CheckBox(self, wx.NewId(), name) - spin = wx.SpinCtrl(self, wx.NewId(), str(valu), initial=50, name=name) + cbox = wx.CheckBox(self, wx.NewIdRef(), name) + spin = wx.SpinCtrl(self, wx.NewIdRef(), str(valu), initial=50, name=name) self.rowDict[name] = [cbox, spin] pass @@ -97,12 +97,12 @@ def __makeObjects(self): def __makeTitleBar(self, t): """Add titlebar""" self.titlebar = wx.StaticText(self, -1, t, - style=wx.ALIGN_CENTER|wx.SIMPLE_BORDER) + style=wx.ALIGN_CENTER|wx.SIMPLE_BORDER) self.titlebar.SetBackgroundColour(WP.TITLEBAR_BG_COLOR) myToolTip = r""" -PANEL FOR managing data for fit parameters -""" - self.titlebar.SetToolTipString(myToolTip) + PANEL FOR managing data for fit parameters + """ + self.titlebar.SetToolTip(myToolTip) return diff --git a/hexrd/wx/floatcontrol.py b/hexrd/wx/floatcontrol.py index 70255eb3..d3829449 100644 --- a/hexrd/wx/floatcontrol.py +++ b/hexrd/wx/floatcontrol.py @@ -98,7 +98,7 @@ def __init__(self, parent, id, **kwargs): The spinner increment is shown in the gray box to the right of the spinner. """ - self.SetToolTipString(myToolTip) + self.SetToolTip(myToolTip) self.SetAutoLayout(True) self.SetSizerAndFit(self.sizer) @@ -109,14 +109,14 @@ def __init__(self, parent, id, **kwargs): # def __makeObjects(self): """Add interactors""" - self.value_txt = wx.TextCtrl(self, wx.NewId(), + self.value_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(self.value), style=wx.RAISED_BORDER| wx.TE_PROCESS_ENTER) - self.delta_txt = wx.TextCtrl(self, wx.NewId(), + self.delta_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(self.delta), style=wx.RAISED_BORDER| wx.TE_PROCESS_ENTER) self.delta_txt.SetBackgroundColour( (230, 230, 230) ) - self.spin_but = wx.SpinButton(self, wx.NewId()) + self.spin_but = wx.SpinButton(self, wx.NewIdRef()) self.spin_but.SetRange(-1,1) return diff --git a/hexrd/wx/gereader.py b/hexrd/wx/gereader.py index a430d0d9..28a19a15 100644 --- a/hexrd/wx/gereader.py +++ b/hexrd/wx/gereader.py @@ -32,12 +32,13 @@ import wx import wx.lib.mixins.listctrl as listMixins -from hexrd.xrd import detector +from hexrd.xrd.image_io import ReadGE from hexrd.xrd.experiment import ImageModes, ReaderInput from hexrd.wx.guiconfig import WindowParameters as WP from hexrd.wx.guiutil import ResetChoice, makeTitleBar from hexrd.wx.canvaspanel import CanvasPanel +from hexrd.wx.readerinfo_dlg import ReaderInfoDialog # # DATA # @@ -54,17 +55,6 @@ } IMAGE_MODE_DICT_SEL = dict(zip(IMG_MODES, range(len(MODE_CHOICES)))) # -# * Dark file choices -# -DARK_CHO_NONE = 'no dark image' -DARK_CHO_FILE = 'dark image file' -DARK_CHO_ARRAY = 'dark frame array' -DARK_CHO_EMPTY = 'empty frames' -DARK_CHOICES = [DARK_CHO_NONE, DARK_CHO_FILE, DARK_CHO_ARRAY, DARK_CHO_EMPTY] -DARK_MODES = [ReaderInput.DARK_MODE_NONE, ReaderInput.DARK_MODE_FILE, ReaderInput.DARK_MODE_ARRAY, ReaderInput.DARK_MODE_EMPTY] -DARK_MODE_DICT = dict(zip(DARK_CHOICES, DARK_MODES)) -DARK_MODE_DICT_INV = dict(zip(DARK_MODES, DARK_CHOICES)) -# # * Aggregation choices # AGG_CHO_NONE = 'SINGLE FRAMES' @@ -75,18 +65,6 @@ AGG_MODE_DICT = dict(zip(AGG_CHOICES, ReaderInput.AGG_MODES)) AGG_MODE_DICT_INV = dict(zip(ReaderInput.AGG_MODES, AGG_CHOICES)) # -# * FLIP choices -# -FLIP_CHO_NONE = 'no flip' -FLIP_CHO_V = 'vertical' -FLIP_CHO_H = 'horizontal' -FLIP_CHO_180 = '180 degrees' -FLIP_CHO_M90 = '-90 degrees' -FLIP_CHO_P90 = '+90 degrees' -FLIP_CHOICES = [FLIP_CHO_NONE, FLIP_CHO_V, FLIP_CHO_H, FLIP_CHO_180, FLIP_CHO_M90, FLIP_CHO_P90] -FLIP_MODE_DICT = dict(zip(FLIP_CHOICES, ReaderInput.FLIP_MODES)) -FLIP_MODE_DICT_INV = dict(zip(ReaderInput.FLIP_MODES, FLIP_CHOICES)) -# # Utility vFunctions # def getValStr(r, i): @@ -173,69 +151,58 @@ def __makeObjects(self): # # Reader List # - self.curr_lab = wx.StaticText(self, wx.NewId(), + self.curr_lab = wx.StaticText(self, wx.NewIdRef(), 'Current Reader', style=wx.ALIGN_CENTER) - self.rdrs_cho = wx.Choice(self, wx.NewId(), + self.rdrs_cho = wx.Choice(self, wx.NewIdRef(), choices=[r.name for r in exp.savedReaders]) - #self.save_but = wx.Button(self, wx.NewId(), 'Save Reader') - self.new_but = wx.Button(self, wx.NewId(), 'New Reader') + self.new_but = wx.Button(self, wx.NewIdRef(), 'New Reader') # # Reader Name # - self.name_lab = wx.StaticText(self, wx.NewId(), + self.name_lab = wx.StaticText(self, wx.NewIdRef(), 'READER NAME', style=wx.ALIGN_CENTER) - self.name_txt = wx.TextCtrl(self, wx.NewId(), value=ReaderInput.DFLT_NAME, + self.name_txt = wx.TextCtrl(self, wx.NewIdRef(), value=ReaderInput.DFLT_NAME, style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) # # Mode interactors # - self.mode_lab = wx.StaticText(self, wx.NewId(), 'Image Mode', + self.mode_lab = wx.StaticText(self, wx.NewIdRef(), 'Image Mode', style=wx.ALIGN_RIGHT) - self.mode_cho = wx.Choice(self, wx.NewId(), choices=MODE_CHOICES) - # + self.mode_cho = wx.Choice(self, wx.NewIdRef(), choices=MODE_CHOICES) # - # Image and dark file names + # Aggregation # - self.img_but = wx.Button(self, wx.NewId(), 'Select Image Files') - self.dir_but = wx.Button(self, wx.NewId(), 'Change Image Folder') - - self.drk_lab = wx.StaticText(self, wx.NewId(), 'Dark Mode', + self.agg_lab = wx.StaticText(self, wx.NewIdRef(), 'Frame Aggregation', style=wx.ALIGN_RIGHT) - self.drk_cho = wx.Choice(self, wx.NewId(), choices=DARK_CHOICES) - self.drk_but = wx.Button(self, wx.NewId(), 'Select Dark File') + self.agg_cho = wx.Choice(self, wx.NewIdRef(), choices=AGG_CHOICES) # - # Aggregation # - self.agg_lab = wx.StaticText(self, wx.NewId(), 'Frame Aggregation', - style=wx.ALIGN_RIGHT) - self.agg_cho = wx.Choice(self, wx.NewId(), choices=AGG_CHOICES) + # Image and dark file names + # + self.img_but = wx.Button(self, wx.NewIdRef(), 'Select Imageseries File') + self.dir_but = wx.Button(self, wx.NewIdRef(), 'Change Image Folder') + # # Action buttons # - self.files_lab = wx.StaticText(self, wx.NewId(), 'Image Files', + self.files_lab = wx.StaticText(self, wx.NewIdRef(), 'Image Files', style=wx.ALIGN_RIGHT) - self.read_lab = wx.StaticText(self, wx.NewId(), 'Read', + self.read_lab = wx.StaticText(self, wx.NewIdRef(), 'Read', style=wx.ALIGN_RIGHT) - self.read_but = wx.Button(self, wx.NewId(), 'Load') - self.browse_lab = wx.StaticText(self, wx.NewId(), 'Browse Frames', + self.read_but = wx.Button(self, wx.NewIdRef(), 'Load') + self.browse_lab = wx.StaticText(self, wx.NewIdRef(), 'Browse Frames', style=wx.ALIGN_RIGHT) - self.browse_spn = wx.SpinCtrl(self, wx.NewId(), min=0, initial=0) - self.browse_inf = wx.TextCtrl(self, wx.NewId(), value='', + self.browse_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=0, initial=0) + self.browse_inf = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_READONLY) self.sizer = wx.BoxSizer(wx.VERTICAL) # - # Orientation - # - self.flip_lab = wx.StaticText(self, wx.NewId(), 'Image Orientation', - style=wx.ALIGN_RIGHT) - self.flip_cho = wx.Choice(self, wx.NewId(), choices=FLIP_CHOICES) - # # Subpanels # - self.sp_single = SF_Subpanel(self, wx.NewId()) - self.sp_multi = MF_Subpanel(self, wx.NewId()) - self.sp_info = infoPanel(self, wx.NewId()) + self.sp_single = SF_Subpanel(self, wx.NewIdRef()) + self.sp_multi = MF_Subpanel(self, wx.NewIdRef()) + self.sp_info = infoPanel(self, wx.NewIdRef()) return @@ -245,23 +212,17 @@ def __makeBindings(self): self.Bind(wx.EVT_TEXT_ENTER, self.OnNameChange, self.name_txt) - self.Bind(wx.EVT_BUTTON, self.OnDarkBut, self.drk_but) self.Bind(wx.EVT_BUTTON, self.OnImgBut, self.img_but) self.Bind(wx.EVT_BUTTON, self.OnImgDirBut, self.dir_but) self.Bind(wx.EVT_BUTTON, self.OnReadBut, self.read_but) - self.Bind(wx.EVT_CHOICE, self.OnDarkChoice, self.drk_cho) self.Bind(wx.EVT_CHOICE, self.OnAggChoice, self.agg_cho) - self.Bind(wx.EVT_CHOICE, self.OnFlipChoice, self.flip_cho) self.Bind(wx.EVT_SPINCTRL, self.OnBrowseSpin, self.browse_spn) self.Bind(wx.EVT_CHOICE, self.OnReaderChoice, self.rdrs_cho) - #self.Bind(wx.EVT_BUTTON, self.OnReaderSave, self.save_but) self.Bind(wx.EVT_BUTTON, self.OnReaderNew, self.new_but) - return - def __makeSizers(self): """Lay out the interactors""" @@ -291,16 +252,6 @@ def __makeSizers(self): self.fgsizer.Add(wx.Window(self, -1), 0, wx.EXPAND|wx.ALIGN_CENTER) self.fgsizer.Add(self.agg_cho, 0, wx.EXPAND|wx.ALIGN_CENTER) - self.fgsizer.Add(self.flip_lab, 0, wx.ALIGN_RIGHT) - self.fgsizer.Add(wx.Window(self, -1), 0, wx.EXPAND|wx.ALIGN_CENTER) - self.fgsizer.Add(wx.Window(self, -1), 0, wx.EXPAND|wx.ALIGN_CENTER) - self.fgsizer.Add(self.flip_cho, 0, wx.EXPAND|wx.ALIGN_CENTER) - - self.fgsizer.Add(self.drk_lab, 0, wx.ALIGN_RIGHT) - self.fgsizer.Add(wx.Window(self, -1), 0, wx.EXPAND|wx.ALIGN_CENTER) - self.fgsizer.Add(self.drk_cho, 0, wx.EXPAND|wx.ALIGN_CENTER) - self.fgsizer.Add(self.drk_but, 0, wx.ALIGN_RIGHT) - self.fgsizer.Add(self.files_lab, 0, wx.ALIGN_RIGHT) self.fgsizer.AddSpacer(1) self.fgsizer.Add(self.dir_but, 0, wx.ALIGN_RIGHT) @@ -348,10 +299,6 @@ def update(self): self.mode_cho.SetSelection(IMAGE_MODE_DICT_SEL[mode]) # Agg choice self.agg_cho.SetStringSelection(AGG_MODE_DICT_INV[rdr.aggMode]) - # Image Orientation - self.flip_cho.SetStringSelection(FLIP_MODE_DICT_INV[rdr.flipMode]) - # Dark mode - self.drk_cho.SetStringSelection(DARK_MODE_DICT_INV[rdr.darkMode]) # Mode Subpanel self.sizer.Show(self.sp_single, (mode == ImageModes.SINGLE_FRAME)) @@ -375,7 +322,9 @@ def update(self): # add total number of frames available try: d = rdr.imageDir - r = detector.ReadGE((os.path.join(d, n), 0)) + r = ReadGE( + os.path.join(d, n), + fmt=exp.activeReader.imageFmt) nframe = r.getNFrames() lctrl.SetStringItem(index, 2, str(nframe)) except: @@ -472,24 +421,7 @@ def OnBrowseSpin(self, e): app = wx.GetApp() exp = app.ws exp.readImage(self.browse_spn.GetValue()) - app.getCanvas().update(newImage=True) - - return - - def OnDarkChoice(self, e): - """Dark mode choice has been made""" - val = e.GetString() - mode = DARK_MODE_DICT[val] - exp = wx.GetApp().ws - exp.activeReader.darkMode = mode - # - # Enable/disable other interactors - # - self.drk_but.Enable(mode == ReaderInput.DARK_MODE_FILE or mode == ReaderInput.DARK_MODE_ARRAY) - - # Update info window - - self.sp_info.update() + app.getCanvas().update(updateImage=True) return @@ -522,72 +454,23 @@ def OnModeChoice(self, e): return - def OnFlipChoice(self, e): - """Flip mode chosen""" - print 'flip mode: ', e.GetString() - wx.GetApp().ws.activeReader.flipMode = FLIP_MODE_DICT[e.GetString()] - return - - def OnDarkBut(self, e): - """Load dark file names with file dialogue""" - # - # !! Check that "subtract dark" is true - # - dlg = wx.FileDialog(self, 'Select Dark Image') - if dlg.ShowModal() == wx.ID_OK: - dir = str(dlg.GetDirectory()) - fil = str(dlg.GetFilename()) - if (fil): - # - # Set dark file and display name in info box. - # - exp = wx.GetApp().ws - exp.activeReader.darkDir = dir - exp.activeReader.darkName = fil - self.sp_info.update() - pass - pass - dlg.Destroy() - - return - def OnImgBut(self, e): """Load image file names with file dialogue NOTE: converts filenames to str from unicode -""" - dlg = wx.FileDialog(self, 'Select Images', - style=wx.FD_MULTIPLE) + """ + dlg = ReaderInfoDialog(self, -1) if dlg.ShowModal() == wx.ID_OK: - d = str(dlg.GetDirectory()) - fnames = [str(p) for p in dlg.GetFilenames()] - if (fnames): - # - # Set image file list and display name in box. - # - fnames.sort() - exp = wx.GetApp().ws - print d, fnames - exp.activeReader.imageDir = d - exp.activeReader.imageNames = fnames - - pass + d = dlg.GetInfo() + exp = wx.GetApp().ws + exp.activeReader.imageDir = d.pop('directory') + exp.activeReader.imageNames = [d.pop('file')] + exp.activeReader.imageFmt = d.pop('format') + exp.activeReader.imageOpts = d self.update() - pass dlg.Destroy() - return - - def OnDrkSubtract(self, e): - """Subtract dark checkbox - - * No other effects until read image button is pressed -""" - wx.GetApp().ws.drkSubtract = self.drk_box.GetValue() - - return - def OnReadBut(self, e): """Read the frames""" @@ -679,7 +562,7 @@ def __makeListCtrl(self): # LStyle = wx.LC_REPORT|wx.LC_SINGLE_SEL # - listctrl = myListCtrl(self, wx.NewId(), style=LStyle) + listctrl = myListCtrl(self, wx.NewIdRef(), style=LStyle) listctrl.InsertColumn(0, 'Image File') listctrl.InsertColumn(1, 'Empty Frames') listctrl.InsertColumn(2, 'Total Frames') @@ -791,7 +674,7 @@ def __makeListCtrl(self): # LStyle = wx.LC_REPORT|wx.LC_SINGLE_SEL # - listctrl = wx.ListCtrl(self, wx.NewId(), style=LStyle) + listctrl = wx.ListCtrl(self, wx.NewIdRef(), style=LStyle) listctrl.InsertColumn(0, 'Image File') listctrl.InsertColumn(1, 'Omega') listctrl.SetColumnWidth(1, wx.LIST_AUTOSIZE_USEHEADER) @@ -867,14 +750,9 @@ def __makeObjects(self): # File lists for display. # - self.drk_txt_lab = wx.StaticText(self, wx.NewId(), 'Dark File', - style=wx.ALIGN_CENTER) - self.drk_txt = wx.TextCtrl(self, wx.NewId(), value='', - style=wx.RAISED_BORDER) - - self.img_txt_lab = wx.StaticText(self, wx.NewId(), 'Image Directory', + self.img_txt_lab = wx.StaticText(self, wx.NewIdRef(), 'Image Directory', style=wx.ALIGN_CENTER) - self.img_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.img_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER) return @@ -893,8 +771,6 @@ def __makeSizers(self): self.fgsizer.Add(self.img_txt_lab, 0, wx.ALIGN_RIGHT|wx.RIGHT, 5) self.fgsizer.Add(self.img_txt, 1, wx.EXPAND|wx.ALIGN_CENTER) - self.fgsizer.Add(self.drk_txt_lab, 0, wx.ALIGN_RIGHT|wx.RIGHT, 5) - self.fgsizer.Add(self.drk_txt, 1, wx.EXPAND|wx.ALIGN_CENTER) # # Main sizer # @@ -912,7 +788,6 @@ def __makeSizers(self): def update(self): """Update information""" exp = wx.GetApp().ws - self.drk_txt.SetValue(exp.activeReader.darkFile) self.img_txt.SetValue(exp.activeReader.imageDir) return diff --git a/hexrd/wx/guiutil.py b/hexrd/wx/guiutil.py index 4b88623f..e92e71af 100644 --- a/hexrd/wx/guiutil.py +++ b/hexrd/wx/guiutil.py @@ -105,13 +105,13 @@ def makeTitleBar(p, t, **kwargs): We use a workaround by creating a sizer with colored boxes on either side. """ - titlebar = wx.StaticText(p, wx.NewId(), t, + titlebar = wx.StaticText(p, wx.NewIdRef(), t, style=wx.ALIGN_CENTER|wx.SIMPLE_BORDER) # # Keyword args # tt = 'tooltip' - if tt in kwargs: titlebar.SetToolTipString(kwargs[tt]) + if tt in kwargs: titlebar.SetToolTip(kwargs[tt]) cl = 'color' if cl in kwargs: @@ -142,7 +142,7 @@ def makeTitleBar(p, t, **kwargs): def callJoel(p): """Return message to display on empty pages""" - hpage = wx.html.HtmlWindow(p, wx.NewId()) + hpage = wx.html.HtmlWindow(p, wx.NewIdRef()) msg = r""" diff --git a/hexrd/wx/indexpanel.py b/hexrd/wx/indexpanel.py index bbe69467..b892117a 100644 --- a/hexrd/wx/indexpanel.py +++ b/hexrd/wx/indexpanel.py @@ -86,13 +86,13 @@ def __makeObjects(self): ind_opts = exp.index_opts self.sz_titlebar = makeTitleBar(self, 'Indexing') - self.method_cho = wx.Choice(self, wx.NewId(), + self.method_cho = wx.Choice(self, wx.NewIdRef(), choices=ind_opts.INDEX_CHOICES) self.method_cho.SetSelection(ind_opts.IND_FIBER) - self.run_but = wx.Button(self, wx.NewId(), 'Run Indexer') + self.run_but = wx.Button(self, wx.NewIdRef(), 'Run Indexer') - self.fiber_pan = FiberSearchPanel(self, wx.NewId()) - self.gspot_pan = GrainSpotterPanel(self, wx.NewId()) + self.fiber_pan = FiberSearchPanel(self, wx.NewIdRef()) + self.gspot_pan = GrainSpotterPanel(self, wx.NewIdRef()) return @@ -202,59 +202,59 @@ def __makeObjects(self): # checkboxes - self.friedel_cbox = wx.CheckBox(self, wx.NewId(), 'Friedel Only') + self.friedel_cbox = wx.CheckBox(self, wx.NewIdRef(), 'Friedel Only') self.friedel_cbox.SetValue(iopts.friedelOnly) - self.claims_cbox = wx.CheckBox(self, wx.NewId(), 'Preserve Claiims') + self.claims_cbox = wx.CheckBox(self, wx.NewIdRef(), 'Preserve Claims') self.claims_cbox.SetValue(iopts.preserveClaims) - self.refine_cbox = wx.CheckBox(self, wx.NewId(), 'Do Refinement') + self.refine_cbox = wx.CheckBox(self, wx.NewIdRef(), 'Do Refinement') self.refine_cbox.SetValue(iopts.doRefinement) - self.multi_cbox = wx.CheckBox(self, wx.NewId(), 'Use Multiprocessing') + self.multi_cbox = wx.CheckBox(self, wx.NewIdRef(), 'Use Multiprocessing') self.multi_cbox.SetValue(iopts.doMultiProc) # value boxes - self.etol_lab = wx.StaticText(self, wx.NewId(), 'Eta Tolerance', + self.etol_lab = wx.StaticText(self, wx.NewIdRef(), 'Eta Tolerance', style=wx.ALIGN_RIGHT) - self.etol_txt = wx.TextCtrl(self, wx.NewId(), value=str(iopts.etaTol), + self.etol_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(iopts.etaTol), style=wx.RAISED_BORDER) - self.otol_lab = wx.StaticText(self, wx.NewId(), 'Omega Tolerance', + self.otol_lab = wx.StaticText(self, wx.NewIdRef(), 'Omega Tolerance', style=wx.ALIGN_RIGHT) - self.otol_txt = wx.TextCtrl(self, wx.NewId(), value=str(iopts.omeTol), + self.otol_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(iopts.omeTol), style=wx.RAISED_BORDER) - self.steps_lab = wx.StaticText(self, wx.NewId(), 'Number of Steps', + self.steps_lab = wx.StaticText(self, wx.NewIdRef(), 'Number of Steps', style=wx.ALIGN_RIGHT) - self.steps_spn = wx.SpinCtrl(self, wx.NewId(), + self.steps_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=36, max=36000, initial=iopts.nsteps) label = 'Minimum Completeness' - self.comp_lab = wx.StaticText(self, wx.NewId(), label, + self.comp_lab = wx.StaticText(self, wx.NewIdRef(), label, style=wx.ALIGN_RIGHT) - self.comp_txt = wx.TextCtrl(self, wx.NewId(), + self.comp_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(iopts.minCompleteness), style=wx.RAISED_BORDER) label = 'Minimum Fraction Claimed' - self.claim_lab = wx.StaticText(self, wx.NewId(), label, + self.claim_lab = wx.StaticText(self, wx.NewIdRef(), label, style=wx.ALIGN_RIGHT) - self.claim_txt = wx.TextCtrl(self, wx.NewId(), + self.claim_txt = wx.TextCtrl(self, wx.NewIdRef(), value=str(iopts.minPctClaimed), style=wx.RAISED_BORDER) label = 'Number of CPUs' - self.ncpus_lab = wx.StaticText(self, wx.NewId(), label, + self.ncpus_lab = wx.StaticText(self, wx.NewIdRef(), label, style=wx.ALIGN_RIGHT) - self.ncpus_spn = wx.SpinCtrl(self, wx.NewId(), + self.ncpus_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, max=ncpus_DFLT, initial=ncpus_DFLT) label = 'Quit After This Many' - self.qafter_lab = wx.StaticText(self, wx.NewId(), label, + self.qafter_lab = wx.StaticText(self, wx.NewIdRef(), label, style=wx.ALIGN_RIGHT) - self.qafter_spn = wx.SpinCtrl(self, wx.NewId(), + self.qafter_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=0, max=100000, initial=0) - self.hkls_but = wx.Button(self, wx.NewId(), 'HKLs') + self.hkls_but = wx.Button(self, wx.NewIdRef(), 'HKLs') return @@ -344,7 +344,7 @@ def OnRunHKLs(self, evt): iopts = exp.index_opts pd = exp.activeMaterial.planeData - hkls_dlg = HklsDlg(self, wx.NewId(), exp.activeMaterial) + hkls_dlg = HklsDlg(self, wx.NewIdRef(), exp.activeMaterial) if hkls_dlg.ShowModal() == wx.ID_OK: # pd.exclusions = hkls_dlg.getExclusions() @@ -394,29 +394,29 @@ def __makeObjects(self): self.tbarSizer = makeTitleBar(self, 'Grain Spotter Options', color=WP.BG_COLOR_PANEL1_TITLEBAR) - self.pfit_cbox = wx.CheckBox(self, wx.NewId(), 'Position Fit') + self.pfit_cbox = wx.CheckBox(self, wx.NewIdRef(), 'Position Fit') label = 'Minimum Completeness' - self.comp_lab = wx.StaticText(self, wx.NewId(), label, + self.comp_lab = wx.StaticText(self, wx.NewIdRef(), label, style=wx.ALIGN_CENTER) - self.comp_txt = wx.TextCtrl(self, wx.NewId(), value='0.5', + self.comp_txt = wx.TextCtrl(self, wx.NewIdRef(), value='0.5', style=wx.RAISED_BORDER) label = 'Minimum Fraction of G-Vectors' - self.fracG_lab = wx.StaticText(self, wx.NewId(), label, + self.fracG_lab = wx.StaticText(self, wx.NewIdRef(), label, style=wx.ALIGN_CENTER) - self.fracG_txt = wx.TextCtrl(self, wx.NewId(), value='0.5', + self.fracG_txt = wx.TextCtrl(self, wx.NewIdRef(), value='0.5', style=wx.RAISED_BORDER) label = 'Sigmas' - self.sigmas_lab = wx.StaticText(self, wx.NewId(), label, + self.sigmas_lab = wx.StaticText(self, wx.NewIdRef(), label, style=wx.ALIGN_CENTER) - self.sigmas_txt = wx.TextCtrl(self, wx.NewId(), value='2.0', + self.sigmas_txt = wx.TextCtrl(self, wx.NewIdRef(), value='2.0', style=wx.RAISED_BORDER) - self.trials_lab = wx.StaticText(self, wx.NewId(), 'Number of Trials', + self.trials_lab = wx.StaticText(self, wx.NewIdRef(), 'Number of Trials', style=wx.ALIGN_CENTER) - self.trials_spn = wx.SpinCtrl(self, wx.NewId(), + self.trials_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1000, max=1000000, initial=100000) diff --git a/hexrd/wx/listeditor.py b/hexrd/wx/listeditor.py index 7502e720..cf3e862b 100644 --- a/hexrd/wx/listeditor.py +++ b/hexrd/wx/listeditor.py @@ -83,13 +83,13 @@ def __makeObjects(self): """Add interactors""" #self.__makeTitleBar('List Editor') - self.main_lbx = wx.ListBox(self, wx.NewId(), + self.main_lbx = wx.ListBox(self, wx.NewIdRef(), style = wx.LB_SINGLE, choices = [item.name for item in self.mylist]) - self.up_but = wx.Button(self, wx.NewId(), 'up') - self.down_but = wx.Button(self, wx.NewId(), 'down') - self.del_but = wx.Button(self, wx.NewId(), 'del') - self.copy_but = wx.Button(self, wx.NewId(), 'copy') + self.up_but = wx.Button(self, wx.NewIdRef(), 'up') + self.down_but = wx.Button(self, wx.NewIdRef(), 'down') + self.del_but = wx.Button(self, wx.NewIdRef(), 'del') + self.copy_but = wx.Button(self, wx.NewIdRef(), 'copy') return @@ -101,7 +101,7 @@ def __makeTitleBar(self, t): myToolTip = r""" PANEL FOR ... """ - self.titlebar.SetToolTipString(myToolTip) + self.titlebar.SetToolTip(myToolTip) return @@ -237,7 +237,7 @@ def __init__(self, parent, id, mylist, **kwargs): self.titlebar = wx.StaticText(self, -1, 'List Editor', style=wx.ALIGN_CENTER|wx.SIMPLE_BORDER) # - self.list_ed = ListEditor(self, wx.NewId(), mylist) + self.list_ed = ListEditor(self, wx.NewIdRef(), mylist) # # Bindings. # diff --git a/hexrd/wx/logwindows.py b/hexrd/wx/logwindows.py index 72434f6a..bd399fa5 100644 --- a/hexrd/wx/logwindows.py +++ b/hexrd/wx/logwindows.py @@ -84,7 +84,7 @@ def __makeObjects(self): self.tbarSizer = makeTitleBar(self, 'Log') # - self.log_pan = logPanel(self, wx.NewId()) + self.log_pan = logPanel(self, wx.NewIdRef()) # return @@ -166,7 +166,7 @@ def __init__(self, parent, id): def __makeObjects(self): """Add interactors""" - self.log_txt = wx.TextCtrl(self, wx.NewId(), value='', size=(500,700), + self.log_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', size=(500,700), style=wx.RAISED_BORDER|wx.TE_MULTILINE|wx.TE_READONLY) # return diff --git a/hexrd/wx/mainapp.py b/hexrd/wx/mainapp.py index 1c33a681..e27e8e9d 100644 --- a/hexrd/wx/mainapp.py +++ b/hexrd/wx/mainapp.py @@ -11,9 +11,9 @@ # # Please also see the file LICENSE. # -# This program is free software; you can redistribute it and/or modify it under the -# terms of the GNU Lesser General Public License (as published by the Free Software -# Foundation) version 2.1 dated February 1999. +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY @@ -28,37 +28,31 @@ # """Main application file """ -import os, sys +import os +import sys import wx +from wx.adv import SplashScreen -from hexrd.wx import guiconfig from hexrd.wx.mainframe import MainFrame -# -# mdef Modules -# -from hexrd.xrd import detector as detectorModule - from hexrd.xrd.experiment import loadExp, ImageModes -# + + # ---------------------------------------------------CLASS: xrdApp # -class xrdApp(wx.PySimpleApp): +class xrdApp(wx.App): """xrdApp""" def __init__(self, *args): """Constructor for xrdApp""" - # - wx.PySimpleApp.__init__(self) - # - # No command args for now, due to mac build issue (64bit, argv emulation) - # + wx.App.__init__(self) + # No command args for now, due to mac build issue + # (64bit, argv emulation) f = '' - #if len(args) == 0: - # f = '' - #else: - # f = args[0] - # pass - + # if len(args) == 0: + # f = '' + # else: + # f = args[0] + # pass self.__makeData(f) self.mframe = None @@ -76,8 +70,8 @@ def __makeData(self, inpFile): # # * Image Information # - self.imgMode = ImageModes.SINGLE_FRAME - self.imgCal = None + self.imgMode = ImageModes.SINGLE_FRAME + self.imgCal = None self.imgSweep = None return @@ -90,6 +84,7 @@ def __getNotebook(self): # # ============================== API # + @property def imgFrame(self): """Image frame according to image mode""" @@ -109,23 +104,24 @@ def updateFromExp(self): return - pass # end class + pass # end class # # -----------------------------------------------END CLASS: xrdApp + def execute(*args): # # Run program stand-alone. # app = xrdApp(*args) - app.mframe = MainFrame(None, wx.NewId()) + app.mframe = MainFrame(None, wx.NewIdRef()) app.SetTopWindow(app.mframe) - #if len(sys.argv) == 1: - # app = xrdApp() - #else: - # app = xrdApp(*sys.argv[1:]) - # pass + # if len(sys.argv) == 1: + # app = xrdApp() + # else: + # app = xrdApp(*sys.argv[1:]) + # pass # # The main window cannot be imported until after the app # is instantiated due to the wx.ColourDatabase() call. @@ -135,10 +131,13 @@ def execute(*args): # splashFile = 'hexrd.png' splashDir = os.path.dirname(__file__) - splashImage = wx.Bitmap(os.path.join(splashDir, splashFile)) + splashImage = wx.Bitmap(os.path.join(splashDir, splashFile), + wx.BITMAP_TYPE_PNG) # - wx.SplashScreen(splashImage, wx.SPLASH_CENTRE_ON_PARENT|wx.SPLASH_TIMEOUT, - 1000, app.mframe) + splash = SplashScreen( + splashImage, + wx.adv.SPLASH_CENTRE_ON_PARENT | wx.adv.SPLASH_TIMEOUT, + 1000, app.mframe) # # Main frame # diff --git a/hexrd/wx/mainframe.py b/hexrd/wx/mainframe.py index 9b059d92..f2fab900 100644 --- a/hexrd/wx/mainframe.py +++ b/hexrd/wx/mainframe.py @@ -96,11 +96,11 @@ def __makeObjects(self): # Canvas panel will update on all pages. Create this before # creating the notebook. # - self.canvasPanel = CanvasPanel(self, wx.NewId()) + self.canvasPanel = CanvasPanel(self, wx.NewIdRef()) # # Notebook # - self.nBook = xrdNoteBook(self, wx.NewId()) + self.nBook = xrdNoteBook(self, wx.NewIdRef()) # # A Statusbar in the bottom of the window # @@ -162,19 +162,19 @@ def __makeMaterialMenu(self): # Load, save and edit the material list # # ===== Load List - self.materialMenu.IDload = wx.NewId() + self.materialMenu.IDload = wx.NewIdRef() self.materialMenu.Append(self.materialMenu.IDload, "Load material list", "Load a saved material list") self.Bind(wx.EVT_MENU, self.OnMaterialsLoad, id=self.materialMenu.IDload) # ===== Edit List - self.materialMenu.IDedit = wx.NewId() + self.materialMenu.IDedit = wx.NewIdRef() self.materialMenu.Append(self.materialMenu.IDedit, "Edit material list", "Rearrange/remove list items") self.Bind(wx.EVT_MENU, self.OnMaterialsEdit, id=self.materialMenu.IDedit) # ===== Save List - self.materialMenu.IDsave = wx.NewId() + self.materialMenu.IDsave = wx.NewIdRef() self.materialMenu.Append(self.materialMenu.IDsave, "Save material list", "Save the material list to a file.") @@ -187,19 +187,19 @@ def __makeReaderMenu(self): self.readerMenu = wx.Menu('Readers') # ===== Load List - self.readerMenu.IDloadl = wx.NewId() + self.readerMenu.IDloadl = wx.NewIdRef() self.readerMenu.Append(self.readerMenu.IDloadl, "Load reader list", "Load the reader list to from a file") self.Bind(wx.EVT_MENU, self.OnReadersLoad, id=self.readerMenu.IDloadl) # ===== Edit - self.readerMenu.IDedit = wx.NewId() + self.readerMenu.IDedit = wx.NewIdRef() self.readerMenu.Append(self.readerMenu.IDedit, "Edit reader list", "Rearrange/remove list items") self.Bind(wx.EVT_MENU, self.OnReadersEdit, id=self.readerMenu.IDedit) # ===== Save List - self.readerMenu.IDsave = wx.NewId() + self.readerMenu.IDsave = wx.NewIdRef() self.readerMenu.Append(self.readerMenu.IDsave, "Save reader list", "Save the reader list to a file") @@ -209,7 +209,7 @@ def __makeReaderMenu(self): # # ===== Hydra # - self.readerMenu.IDhydra = wx.NewId() + self.readerMenu.IDhydra = wx.NewIdRef() self.readerMenu.Append(self.readerMenu.IDhydra, "Hydra interface", "Open the hydra interface") @@ -223,13 +223,13 @@ def __makeDetectorMenu(self): self.detectorMenu = wx.Menu('Detector') # ===== Load - self.detectorMenu.IDload = wx.NewId() + self.detectorMenu.IDload = wx.NewIdRef() self.detectorMenu.Append(self.detectorMenu.IDload, "Load detector", "Load a saved detector from a file") self.Bind(wx.EVT_MENU, self.OnDetectorLoad, id=self.detectorMenu.IDload) # ===== Save - self.detectorMenu.IDsave = wx.NewId() + self.detectorMenu.IDsave = wx.NewIdRef() self.detectorMenu.Append(self.detectorMenu.IDsave, "Save detector", "Save the detector to a file") @@ -240,7 +240,7 @@ def __makeDetectorMenu(self): # Polar Rebin # # ===== Save - self.detectorMenu.IDcake = wx.NewId() + self.detectorMenu.IDcake = wx.NewIdRef() self.detectorMenu.Append(self.detectorMenu.IDcake, "Polar Rebinning", "Bring up a window for polar rebinning (caking)") @@ -251,31 +251,31 @@ def __makeDetectorMenu(self): def __makeSpotsMenu(self): self.spotsMenu = wx.Menu('Spots') # - self.spotsMenu.IDloadRaw = wx.NewId() + self.spotsMenu.IDloadRaw = wx.NewIdRef() self.spotsMenu.Append(self.spotsMenu.IDloadRaw, "Load raw spots", "Load the raw spots to a file") self.Bind(wx.EVT_MENU, self.OnSpotsLoadRaw, id=self.spotsMenu.IDloadRaw) # - self.spotsMenu.IDsaveRaw = wx.NewId() + self.spotsMenu.IDsaveRaw = wx.NewIdRef() self.spotsMenu.Append(self.spotsMenu.IDsaveRaw, "Save raw spots", "Save the raw spots to a file") self.Bind(wx.EVT_MENU, self.OnSpotsSaveRaw, id=self.spotsMenu.IDsaveRaw) # - ## self.spotsMenu.IDsave = wx.NewId() + ## self.spotsMenu.IDsave = wx.NewIdRef() ## self.spotsMenu.Append(self.spotsMenu.IDsave, ## "Save post-processed spots", ## "Save the post-processed Spots class") ## self.Bind(wx.EVT_MENU, self.OnSpotsSave, id=self.spotsMenu.IDsave) ## # - ## self.spotsMenu.IDexportFLT = wx.NewId() + ## self.spotsMenu.IDexportFLT = wx.NewIdRef() ## self.spotsMenu.Append(self.spotsMenu.IDexportFLT, ## "Export flt", ## "Export a fable flt file") ## self.Bind(wx.EVT_MENU, self.OnSpotsExportFLT, id=self.spotsMenu.IDexportFLT) ## # - ## self.spotsMenu.IDexportGVE = wx.NewId() + ## self.spotsMenu.IDexportGVE = wx.NewIdRef() ## self.spotsMenu.Append(self.spotsMenu.IDexportGVE, ## "Export gve", ## "Export a fable gve file") @@ -285,25 +285,25 @@ def __makeSpotsMenu(self): def __makeIndexerMenu(self): self.indexerMenu = wx.Menu('Indexing') # - ## self.indexerMenu.IDloadRaw = wx.NewId() + ## self.indexerMenu.IDloadRaw = wx.NewIdRef() ## self.indexerMenu.Append(self.indexerMenu.IDloadRMats, ## "Load rMats", ## "Load an array of rotation matrices") ## self.Bind(wx.EVT_MENU, self.OnLoadRMats, id=self.indexerMenu.IDloadRMats) # - self.indexerMenu.IDsaveRMats = wx.NewId() + self.indexerMenu.IDsaveRMats = wx.NewIdRef() self.indexerMenu.Append(self.indexerMenu.IDsaveRMats, "Save rMats array", "Save the indexed rotations matrices to binary (.npy)") self.Bind(wx.EVT_MENU, self.OnSaveRMats, id=self.indexerMenu.IDsaveRMats) - self.indexerMenu.IDexportGrainLog = wx.NewId() + self.indexerMenu.IDexportGrainLog = wx.NewIdRef() self.indexerMenu.Append(self.indexerMenu.IDexportGrainLog, "Export grains log file", "Export the log file for all indexed rotations to ASCII") self.Bind(wx.EVT_MENU, self.OnExportGrainLog, id=self.indexerMenu.IDexportGrainLog) - self.indexerMenu.IDdumpGrainList = wx.NewId() + self.indexerMenu.IDdumpGrainList = wx.NewIdRef() self.indexerMenu.Append(self.indexerMenu.IDdumpGrainList, "Dump grain list", "Export the grainList to a cPickle") @@ -493,7 +493,7 @@ def OnCaking(self, e): wx.MessageBox('No Image is loaded') return - dlg = cakingDialog(self, wx.NewId()) + dlg = cakingDialog(self, wx.NewIdRef()) dlg.ShowModal() app.getCanvas().update() @@ -622,7 +622,7 @@ def OnReadersEdit(self, e): """Edit Reader list""" exp = wx.GetApp().ws - dlg = ListEditDlg(self, wx.NewId(), exp.savedReaders) + dlg = ListEditDlg(self, wx.NewIdRef(), exp.savedReaders) dlg.ShowModal() dlg.Destroy() @@ -677,7 +677,7 @@ def OnReadersLoad(self, e): def OnHydra(self, e): """Raise hydra interface""" - h = HydraControlFrame(self, wx.NewId()) + h = HydraControlFrame(self, wx.NewIdRef()) return # # ========== FILE MENU @@ -813,7 +813,7 @@ def OnMaterialsEdit(self, e): """Edit the materials list""" exp = wx.GetApp().ws - dlg = ListEditDlg(self, wx.NewId(), exp.matList) + dlg = ListEditDlg(self, wx.NewIdRef(), exp.matList) dlg.ShowModal() dlg.Destroy() diff --git a/hexrd/wx/materialspanel.py b/hexrd/wx/materialspanel.py index 3a82fbb4..7475c95d 100644 --- a/hexrd/wx/materialspanel.py +++ b/hexrd/wx/materialspanel.py @@ -83,55 +83,55 @@ def __makeObjects(self): # # Material List # - self.curr_lab = wx.StaticText(self, wx.NewId(), + self.curr_lab = wx.StaticText(self, wx.NewIdRef(), 'Active Material', style=wx.ALIGN_CENTER) - self.mats_cho = wx.Choice(self, wx.NewId(), + self.mats_cho = wx.Choice(self, wx.NewIdRef(), choices=[m.name for m in exp.matList]) - self.new_but = wx.Button(self, wx.NewId(), 'New Material') + self.new_but = wx.Button(self, wx.NewIdRef(), 'New Material') # # Material Name # - self.name_lab = wx.StaticText(self, wx.NewId(), + self.name_lab = wx.StaticText(self, wx.NewIdRef(), 'MATERIAL NAME', style=wx.ALIGN_CENTER) - self.name_txt = wx.TextCtrl(self, wx.NewId(), value=Material.DFLT_NAME, + self.name_txt = wx.TextCtrl(self, wx.NewIdRef(), value=Material.DFLT_NAME, style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) # # Rings panel # - self.ring_pan = ringPanel(self, wx.NewId()) + self.ring_pan = ringPanel(self, wx.NewIdRef()) # # Categories # # ========== Lattice Params # - self.lp_a_lab = wx.StaticText(self, wx.NewId(), 'a', style=wx.ALIGN_CENTER) - self.lp_a_txt = wx.TextCtrl(self, wx.NewId(), value='0', + self.lp_a_lab = wx.StaticText(self, wx.NewIdRef(), 'a', style=wx.ALIGN_CENTER) + self.lp_a_txt = wx.TextCtrl(self, wx.NewIdRef(), value='0', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.lp_b_lab = wx.StaticText(self, wx.NewId(), 'b', style=wx.ALIGN_CENTER) - self.lp_b_txt = wx.TextCtrl(self, wx.NewId(), value='0', + self.lp_b_lab = wx.StaticText(self, wx.NewIdRef(), 'b', style=wx.ALIGN_CENTER) + self.lp_b_txt = wx.TextCtrl(self, wx.NewIdRef(), value='0', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.lp_c_lab = wx.StaticText(self, wx.NewId(), 'c', style=wx.ALIGN_CENTER) - self.lp_c_txt = wx.TextCtrl(self, wx.NewId(), value='0', + self.lp_c_lab = wx.StaticText(self, wx.NewIdRef(), 'c', style=wx.ALIGN_CENTER) + self.lp_c_txt = wx.TextCtrl(self, wx.NewIdRef(), value='0', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.alpha_lab = wx.StaticText(self, wx.NewId(), 'alpha', style=wx.ALIGN_CENTER) - self.alpha_txt = wx.TextCtrl(self, wx.NewId(), value='90', + self.alpha_lab = wx.StaticText(self, wx.NewIdRef(), 'alpha', style=wx.ALIGN_CENTER) + self.alpha_txt = wx.TextCtrl(self, wx.NewIdRef(), value='90', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.beta_lab = wx.StaticText(self, wx.NewId(), 'beta', style=wx.ALIGN_CENTER) - self.beta_txt = wx.TextCtrl(self, wx.NewId(), value='90', + self.beta_lab = wx.StaticText(self, wx.NewIdRef(), 'beta', style=wx.ALIGN_CENTER) + self.beta_txt = wx.TextCtrl(self, wx.NewIdRef(), value='90', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.gamma_lab = wx.StaticText(self, wx.NewId(), 'gamma', style=wx.ALIGN_CENTER) - self.gamma_txt = wx.TextCtrl(self, wx.NewId(), value='90', + self.gamma_lab = wx.StaticText(self, wx.NewIdRef(), 'gamma', style=wx.ALIGN_CENTER) + self.gamma_txt = wx.TextCtrl(self, wx.NewIdRef(), value='90', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.units_lab = wx.StaticText(self, wx.NewId(), 'UNITS', style=wx.ALIGN_CENTER) - self.dunits_cho = wx.Choice(self, wx.NewId(), choices=['angstroms']) + self.units_lab = wx.StaticText(self, wx.NewIdRef(), 'UNITS', style=wx.ALIGN_CENTER) + self.dunits_cho = wx.Choice(self, wx.NewIdRef(), choices=['angstroms']) self.dunits_cho.SetSelection(0) - self.aunits_cho = wx.Choice(self, wx.NewId(), choices=['degrees']) + self.aunits_cho = wx.Choice(self, wx.NewIdRef(), choices=['degrees']) self.aunits_cho.SetSelection(0) # # Save list of lattice parameter windows. @@ -150,33 +150,33 @@ def __makeObjects(self): # # ========== Space group info # - self.sg_lab = wx.StaticText(self, wx.NewId(), 'Space Group', + self.sg_lab = wx.StaticText(self, wx.NewIdRef(), 'Space Group', style=wx.ALIGN_CENTER) - self.sg_spn = wx.SpinCtrl(self, wx.NewId(), min=1, max=230, initial=mat.spaceGroup.sgnum) + self.sg_spn = wx.SpinCtrl(self, wx.NewIdRef(), min=1, max=230, initial=mat.spaceGroup.sgnum) - self.hall_lab = wx.StaticText(self, wx.NewId(), 'Hall Symbol', + self.hall_lab = wx.StaticText(self, wx.NewIdRef(), 'Hall Symbol', style=wx.ALIGN_CENTER) - self.hall_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.hall_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_READONLY) - self.herm_lab = wx.StaticText(self, wx.NewId(), 'Hermann-Mauguin', + self.herm_lab = wx.StaticText(self, wx.NewIdRef(), 'Hermann-Mauguin', style=wx.ALIGN_CENTER) - self.herm_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.herm_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_READONLY) - self.laue_lab = wx.StaticText(self, wx.NewId(), 'Laue Group', + self.laue_lab = wx.StaticText(self, wx.NewIdRef(), 'Laue Group', style=wx.ALIGN_CENTER) - self.laue_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.laue_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_READONLY) - self.ltype_lab = wx.StaticText(self, wx.NewId(), 'Lattice Type', + self.ltype_lab = wx.StaticText(self, wx.NewIdRef(), 'Lattice Type', style=wx.ALIGN_CENTER) - self.ltype_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.ltype_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_READONLY) - self.hkls_lab = wx.StaticText(self, wx.NewId(), 'HKLs Max (sum of squares)', + self.hkls_lab = wx.StaticText(self, wx.NewIdRef(), 'HKLs Max (sum of squares)', style=wx.ALIGN_CENTER) - self.hkls_txt = wx.TextCtrl(self, wx.NewId(), value='10', + self.hkls_txt = wx.TextCtrl(self, wx.NewIdRef(), value='10', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) return @@ -189,7 +189,7 @@ def __makeTitleBar(self, t): myToolTip = r""" PANEL FOR ... """ - self.titlebar.SetToolTipString(myToolTip) + self.titlebar.SetToolTip(myToolTip) return diff --git a/hexrd/wx/planedataeditor.py b/hexrd/wx/planedataeditor.py index 5968f3d4..68caa2d3 100644 --- a/hexrd/wx/planedataeditor.py +++ b/hexrd/wx/planedataeditor.py @@ -96,70 +96,70 @@ def __makeObjects(self): # # Text control for name # - self.name_lab = wx.StaticText(self, wx.NewId(), 'Name', style=wx.ALIGN_CENTER) - self.name_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.name_lab = wx.StaticText(self, wx.NewIdRef(), 'Name', style=wx.ALIGN_CENTER) + self.name_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) self.name_txt.ChangeValue(self.mat.name) # # Two-Theta and wavelength selectors, with units. # - self.tthmin_lab = wx.StaticText(self, wx.NewId(), 'Two Theta Min', + self.tthmin_lab = wx.StaticText(self, wx.NewIdRef(), 'Two Theta Min', style=wx.ALIGN_CENTER) - self.tthmin_txt = wx.TextCtrl(self, wx.NewId(), value='0.0', + self.tthmin_txt = wx.TextCtrl(self, wx.NewIdRef(), value='0.0', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.tthmin_uni = wx.Choice(self, wx.NewId(), choices=AngleUnits) + self.tthmin_uni = wx.Choice(self, wx.NewIdRef(), choices=AngleUnits) #self.Bind(wx.EVT_CHOICE, self.OnChoice, self.choice) - self.tthmax_lab = wx.StaticText(self, wx.NewId(), 'Two Theta Max', + self.tthmax_lab = wx.StaticText(self, wx.NewIdRef(), 'Two Theta Max', style=wx.ALIGN_CENTER) - self.tthmax_txt = wx.TextCtrl(self, wx.NewId(), value='20.0', + self.tthmax_txt = wx.TextCtrl(self, wx.NewIdRef(), value='20.0', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.tthmax_uni = wx.Choice(self, wx.NewId(), choices=AngleUnits) + self.tthmax_uni = wx.Choice(self, wx.NewIdRef(), choices=AngleUnits) - self.wave_lab = wx.StaticText(self, wx.NewId(), 'Wavelength', + self.wave_lab = wx.StaticText(self, wx.NewIdRef(), 'Wavelength', style=wx.ALIGN_CENTER) - self.wave_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.wave_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) self.wave_txt.ChangeValue(str(self.pData.wavelength)) - self.wave_uni = wx.Choice(self, wx.NewId(), choices=AngleUnits) + self.wave_uni = wx.Choice(self, wx.NewIdRef(), choices=AngleUnits) # # Group selectors # - self.laue_lab = wx.StaticText(self, wx.NewId(), + self.laue_lab = wx.StaticText(self, wx.NewIdRef(), 'Select the Laue group', style=wx.ALIGN_RIGHT) - self.laue_cho = wx.Choice(self, wx.NewId(), choices=['Laue Groups']) + self.laue_cho = wx.Choice(self, wx.NewIdRef(), choices=['Laue Groups']) - self.space_lab = wx.StaticText(self, wx.NewId(), + self.space_lab = wx.StaticText(self, wx.NewIdRef(), 'Select the Space group', style=wx.ALIGN_RIGHT) - self.space_cho = wx.Choice(self, wx.NewId(), choices=['Space Groups']) + self.space_cho = wx.Choice(self, wx.NewIdRef(), choices=['Space Groups']) # # Add HKL list # - self.hkls_clb = wx.CheckListBox(self, wx.NewId(), choices = self.__getHKLs()) + self.hkls_clb = wx.CheckListBox(self, wx.NewIdRef(), choices = self.__getHKLs()) [self.hkls_clb.Check(i, not self.exclude[i]) for i in range(len(self.exclude))] # # Lattice Parameters # - self.lp_a_lab = wx.StaticText(self, wx.NewId(), 'a', style=wx.ALIGN_CENTER) - self.lp_a_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.lp_a_lab = wx.StaticText(self, wx.NewIdRef(), 'a', style=wx.ALIGN_CENTER) + self.lp_a_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.lp_b_lab = wx.StaticText(self, wx.NewId(), 'b', style=wx.ALIGN_CENTER) - self.lp_b_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.lp_b_lab = wx.StaticText(self, wx.NewIdRef(), 'b', style=wx.ALIGN_CENTER) + self.lp_b_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.lp_c_lab = wx.StaticText(self, wx.NewId(), 'c', style=wx.ALIGN_CENTER) - self.lp_c_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.lp_c_lab = wx.StaticText(self, wx.NewIdRef(), 'c', style=wx.ALIGN_CENTER) + self.lp_c_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.lp_alpha_lab = wx.StaticText(self, wx.NewId(), 'alpha', style=wx.ALIGN_CENTER) - self.lp_alpha_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.lp_alpha_lab = wx.StaticText(self, wx.NewIdRef(), 'alpha', style=wx.ALIGN_CENTER) + self.lp_alpha_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.lp_beta_lab = wx.StaticText(self, wx.NewId(), 'beta', style=wx.ALIGN_CENTER) - self.lp_beta_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.lp_beta_lab = wx.StaticText(self, wx.NewIdRef(), 'beta', style=wx.ALIGN_CENTER) + self.lp_beta_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) - self.lp_gamma_lab = wx.StaticText(self, wx.NewId(), 'gamma', style=wx.ALIGN_CENTER) - self.lp_gamma_txt = wx.TextCtrl(self, wx.NewId(), value='', + self.lp_gamma_lab = wx.StaticText(self, wx.NewIdRef(), 'gamma', style=wx.ALIGN_CENTER) + self.lp_gamma_txt = wx.TextCtrl(self, wx.NewIdRef(), value='', style=wx.RAISED_BORDER|wx.TE_PROCESS_ENTER) return @@ -172,7 +172,7 @@ def __makeTitleBar(self, t): myToolTip = r""" FRAME FOR editing the list of calibrants """ - self.titlebar.SetToolTipString(myToolTip) + self.titlebar.SetToolTip(myToolTip) return @@ -290,8 +290,8 @@ def __init__(self, parent, id, mat, **kwargs): # self.titlebar = wx.StaticText(self, -1, 'PlaneDataDialog', style=wx.ALIGN_CENTER|wx.SIMPLE_BORDER) - self.pdPanel = PlaneDataPanel(self, wx.NewId(), self.mat) - self.quitBut = wx.Button(self, wx.NewId(), 'QUIT') + self.pdPanel = PlaneDataPanel(self, wx.NewIdRef(), self.mat) + self.quitBut = wx.Button(self, wx.NewIdRef(), 'QUIT') # # Bindings. # diff --git a/hexrd/wx/readerinfo_dlg.py b/hexrd/wx/readerinfo_dlg.py new file mode 100644 index 00000000..44ddebe0 --- /dev/null +++ b/hexrd/wx/readerinfo_dlg.py @@ -0,0 +1,194 @@ +"""panel for reader input""" +import wx + +from guiconfig import WindowParameters as WP +from guiutil import makeTitleBar +# +# ---------------------------------------------------CLASS: ReaderInfoPanel +# +class ReaderInfoPanel(wx.Panel): + """ReaderInfoPanel """ + def __init__(self, parent, id, **kwargs): + + wx.Panel.__init__(self, parent, id, **kwargs) + # + # Data + # + self.image_dir = '' + self.image_fname = '' + # + # Window Objects. + # + self.__make_objects() + # + # Bindings. + # + self.__make_bindings() + # + # Sizing. + # + self.__make_sizers() + # + self.SetAutoLayout(True) + self.SetSizerAndFit(self.sizer) + # + return + # + # ============================== Internal Methods + # + def __make_objects(self): + """Add interactors""" + + self.tbarSizer = makeTitleBar(self, 'Reader Info') + self.file_but = wx.Button(self, wx.NewIdRef(), + 'File' + ) + self.file_txt = wx.TextCtrl(self, wx.NewIdRef(), + value="", + style=wx.RAISED_BORDER|wx.TE_READONLY + ) + self.format_lab = wx.StaticText(self, wx.NewIdRef(), + 'Format', style=wx.ALIGN_RIGHT + ) + self.format_cho = wx.Choice(self, wx.NewIdRef(), + choices=['hdf5', 'frame-cache'] + ) + self.pixel_lab = wx.StaticText(self, wx.NewIdRef(), + 'Pixel Pitch', style=wx.ALIGN_RIGHT + ) + self.pixel_txt = wx.TextCtrl(self, wx.NewIdRef(), + value='0.2', + style=wx.RAISED_BORDER + ) + self.option_lab = wx.StaticText(self, wx.NewIdRef(), + 'Option', style=wx.ALIGN_RIGHT + ) + self.value_lab = wx.StaticText(self, wx.NewIdRef(), + 'Value', style=wx.ALIGN_LEFT + ) + self.option_cho = wx.Choice(self, wx.NewIdRef(), + choices=['path', 'pixel pitch'] + ) + self.value_txt = wx.TextCtrl(self, wx.NewIdRef(), + value="/imageseries", + style=wx.RAISED_BORDER + ) + + def __make_bindings(self): + """Bind interactors""" + self.Bind(wx.EVT_BUTTON, self.OnFileBut, self.file_but) + + def __make_sizers(self): + """Lay out the interactors""" + + self.sizer = wx.BoxSizer(wx.VERTICAL) + self.sizer.Add(self.tbarSizer, 0, wx.EXPAND|wx.ALIGN_CENTER) + + nrow = 5; ncol = 2; padx = 5; pady = 5 + self.info_sz = wx.FlexGridSizer(nrow, ncol, padx, pady) + self.info_sz.AddGrowableCol(0, 0) + self.info_sz.AddGrowableCol(1, 1) + self.info_sz.Add(self.file_but, 0, wx.ALIGN_RIGHT) + self.info_sz.Add(self.file_txt, 0, wx.ALIGN_LEFT|wx.EXPAND) + self.info_sz.Add(self.format_lab, 0, wx.ALIGN_RIGHT) + self.info_sz.Add(self.format_cho, 0, wx.ALIGN_LEFT|wx.EXPAND) + self.info_sz.Add(self.pixel_lab, 0, wx.ALIGN_RIGHT) + self.info_sz.Add(self.pixel_txt, 0, wx.ALIGN_LEFT|wx.EXPAND) + self.info_sz.Add(self.option_lab, 0, wx.ALIGN_RIGHT) + self.info_sz.Add(self.value_lab, 0, wx.ALIGN_LEFT) + self.info_sz.Add(self.option_cho, 0, wx.ALIGN_RIGHT) + self.info_sz.Add(self.value_txt, 0, wx.ALIGN_LEFT|wx.EXPAND) + + + self.sizer.Add(self.info_sz, 1, wx.EXPAND) + # + # ============================== API + # + # ========== *** Access Methods + # + + # + # ========== *** Event Callbacks + # + def OnFileBut(self, e): + """Load image file name with file dialogue + + NOTE: converts filenames to str from unicode + """ + dlg = wx.FileDialog(self, 'Select Imageseries File', + style=wx.FD_FILE_MUST_EXIST) + if dlg.ShowModal() == wx.ID_OK: + self.image_dir = str(dlg.GetDirectory()) + self.image_fname = dlg.GetFilename() + self.file_txt.SetValue(self.image_fname) + dlg.Destroy() + + pass # end class +# -----------------------------------------------END CLASS: ReaderInfoPanel +# ---------------------------------------------------CLASS: ReaderInfoDlg +# +class ReaderInfoDialog(wx.Dialog): + """Pop-Up for reader file, format and options """ + def __init__(self, parent, id, **kwargs): + """Constructor""" + # + myStyle = wx.RESIZE_BORDER|wx.DEFAULT_DIALOG_STYLE + wx.Dialog.__init__(self, parent, id, style=myStyle) + # + # Data Objects. + # + + # + # Windows. + # + self.tbarSizer = makeTitleBar(self, 'Reader Info', + color=WP.BG_COLOR_TITLEBAR_FRAME) + self.infopanel = ReaderInfoPanel(self, -1) + # + # Bindings. + # + self._makeBindings() + # + # Sizing. + # + self._makeSizers() + # + self.SetAutoLayout(True) + self.SetSizerAndFit(self.sizer) + # + # Events. + # + + # + return + # + # ============================== Internal Methods + # + def _makeBindings(self): + """Bind interactors to functions""" + + def _makeSizers(self): + """Lay out windows""" + self.sizer = wx.BoxSizer(wx.VERTICAL) + self.sizer.Add(self.tbarSizer, 0, wx.EXPAND|wx.ALIGN_CENTER) + self.sizer.Add(self.CreateSeparatedButtonSizer(wx.OK | wx.CANCEL)) + self.sizer.Add(self.infopanel, 1, wx.EXPAND|wx.ALIGN_CENTER) + # + # ============================== API + # + # ========== *** Access Methods + # + def GetInfo(self): + p = self.infopanel + d = dict( + directory = p.image_dir, + file = p.image_fname, + format = p.format_cho.GetStringSelection(), + path = p.value_txt.GetValue(), + pixel_size = p.pixel_txt.GetValue()) + return d + + pass # end class +# +# -----------------------------------------------END CLASS: ReaderInfoDlg +# diff --git a/hexrd/wx/readerpanel.py b/hexrd/wx/readerpanel.py index da46b791..a3598d9d 100644 --- a/hexrd/wx/readerpanel.py +++ b/hexrd/wx/readerpanel.py @@ -82,9 +82,9 @@ def __makeObjects(self): # # Add detector choice # - self.det_cho = wx.Choice(self, wx.NewId(), + self.det_cho = wx.Choice(self, wx.NewIdRef(), choices=DET_CHOICES) - self.rdr_pan = geReaderPanel(self, wx.NewId()) + self.rdr_pan = geReaderPanel(self, wx.NewIdRef()) return diff --git a/hexrd/wx/ringsubpanel.py b/hexrd/wx/ringsubpanel.py index d403d045..674ee4cc 100644 --- a/hexrd/wx/ringsubpanel.py +++ b/hexrd/wx/ringsubpanel.py @@ -94,40 +94,40 @@ def __makeObjects(self): # # b. Wavelength # - self.dfwv_but = wx.Button(self, wx.NewId(), 'Make Default') - self.dfwv_but.SetToolTipString(dfltToolTip) + self.dfwv_but = wx.Button(self, wx.NewIdRef(), 'Make Default') + self.dfwv_but.SetToolTip(dfltToolTip) - self.wave_lab = wx.StaticText(self, wx.NewId(), + self.wave_lab = wx.StaticText(self, wx.NewIdRef(), 'Wavelength:', style=wx.ALIGN_RIGHT) - self.waveAng_txt = wx.TextCtrl(self, wx.NewId(), + self.waveAng_txt = wx.TextCtrl(self, wx.NewIdRef(), value='0', style=wx.RAISED_BORDER | wx.TE_PROCESS_ENTER) - self.waveAng_lab = wx.StaticText(self, wx.NewId(), + self.waveAng_lab = wx.StaticText(self, wx.NewIdRef(), WAVELENGTH_UNIT, style=wx.ALIGN_RIGHT) - self.waveKEV_txt = wx.TextCtrl(self, wx.NewId(), + self.waveKEV_txt = wx.TextCtrl(self, wx.NewIdRef(), value='0', style=wx.RAISED_BORDER | wx.TE_PROCESS_ENTER) - self.waveKEV_lab = wx.StaticText(self, wx.NewId(), + self.waveKEV_lab = wx.StaticText(self, wx.NewIdRef(), 'keV', style=wx.ALIGN_RIGHT) # # c. Edit HKLs # - self.hkl_but = wx.Button(self, wx.NewId(), 'Edit HKLs') + self.hkl_but = wx.Button(self, wx.NewIdRef(), 'Edit HKLs') # # d. Ring widths # - self.dfwd_but = wx.Button(self, wx.NewId(), 'Make Default') - self.dfwd_but.SetToolTipString(dfltToolTip) + self.dfwd_but = wx.Button(self, wx.NewIdRef(), 'Make Default') + self.dfwd_but.SetToolTip(dfltToolTip) - self.width_lab = wx.StaticText(self, wx.NewId(), + self.width_lab = wx.StaticText(self, wx.NewIdRef(), 'Ring Width:', style=wx.ALIGN_RIGHT) - self.width_cho = wx.Choice(self, wx.NewId(), choices=widChoices) + self.width_cho = wx.Choice(self, wx.NewIdRef(), choices=widChoices) - self.width_txt = wx.TextCtrl(self, wx.NewId(), + self.width_txt = wx.TextCtrl(self, wx.NewIdRef(), value='1.0e-3', style=wx.RAISED_BORDER | wx.TE_PROCESS_ENTER) # @@ -242,7 +242,7 @@ def OnEditHKLs(self, evt): exp = app.ws mat = exp.activeMaterial - dlg = hklsDlg(self, wx.NewId(), mat) + dlg = hklsDlg(self, wx.NewIdRef(), mat) if dlg.ShowModal() == wx.ID_OK: mat.planeData.exclusions = dlg.getExclusions() diff --git a/hexrd/wx/selecthkls.py b/hexrd/wx/selecthkls.py index ab7b77bb..8e013ca1 100644 --- a/hexrd/wx/selecthkls.py +++ b/hexrd/wx/selecthkls.py @@ -87,7 +87,7 @@ def __makeTitleBar(self, t): myToolTip = r""" PANEL FOR ... """ - self.titlebar.SetToolTipString(myToolTip) + self.titlebar.SetToolTip(myToolTip) return @@ -96,7 +96,7 @@ def __makeListCtrl(self): # LStyle = wx.LC_REPORT # - listctrl = wx.ListView(self, wx.NewId(), style=LStyle) + listctrl = wx.ListView(self, wx.NewIdRef(), style=LStyle) listctrl.InsertColumn(0, 'HKL') listctrl.InsertColumn(1, 'd-spacing') listctrl.InsertColumn(2, '2-theta (deg)') @@ -119,12 +119,12 @@ def __makeListCtrl(self): hklData = hkls[i] hkl = hklData['hkl'] hklStr = '(%d, %d, %d)' % (hkl[0], hkl[1], hkl[2]) - index = listctrl.InsertStringItem(sys.maxint, hklStr) + index = listctrl.InsertItem(sys.maxint, hklStr) dspace = '%.6g' % hklData['dSpacings'] tth = hklData['tTheta'] * (180/math.pi) tTheta = '%.6g' % tth - listctrl.SetStringItem(index, 1, dspace) - listctrl.SetStringItem(index, 2, tTheta) + listctrl.SetItem(index, 1, dspace) + listctrl.SetItem(index, 2, tTheta) # # Show exclusions by background color # @@ -211,7 +211,7 @@ def __init__(self, parent, id, mat, **kwargs): # self.titlebar = wx.StaticText(self, -1, 'selectHKLsDialog', style=wx.ALIGN_CENTER|wx.SIMPLE_BORDER) - self.dataPanel = selectHKLsPanel(self, wx.NewId(), mat) + self.dataPanel = selectHKLsPanel(self, wx.NewIdRef(), mat) self.dataPanel.SetMinSize((400,400)) # # Bindings. diff --git a/hexrd/wx/spotspanel.py b/hexrd/wx/spotspanel.py index 13643fb0..2860ffca 100644 --- a/hexrd/wx/spotspanel.py +++ b/hexrd/wx/spotspanel.py @@ -85,78 +85,78 @@ def __makeObjects(self): # Booleans - self.disc_box = wx.CheckBox(self, wx.NewId(), 'Discard at bounds') - self.bbox_box = wx.CheckBox(self, wx.NewId(), 'Keep in bounding box') - self.pado_box = wx.CheckBox(self, wx.NewId(), 'Pad Omega') - self.pads_box = wx.CheckBox(self, wx.NewId(), 'Pad Spots') + self.disc_box = wx.CheckBox(self, wx.NewIdRef(), 'Discard at bounds') + self.bbox_box = wx.CheckBox(self, wx.NewIdRef(), 'Keep in bounding box') + self.pado_box = wx.CheckBox(self, wx.NewIdRef(), 'Pad Omega') + self.pads_box = wx.CheckBox(self, wx.NewIdRef(), 'Pad Spots') # Threshold self.thresh_lab = wx.StaticText( - self, wx.NewId(), + self, wx.NewIdRef(), 'Threshold', style=wx.ALIGN_CENTER) self.thresh_txt = wx.TextCtrl( - self, wx.NewId(), value='500', + self, wx.NewIdRef(), value='500', style=wx.RAISED_BORDER) # Min PX self.minpx_lab = wx.StaticText( - self, wx.NewId(), + self, wx.NewIdRef(), 'Min PX', style=wx.ALIGN_CENTER) self.minpx_txt = wx.TextCtrl( - self, wx.NewId(), value='4', + self, wx.NewIdRef(), value='4', style=wx.RAISED_BORDER) # Spots info self.aread_lab = wx.StaticText( - self, wx.NewId(), + self, wx.NewIdRef(), 'Active Reader', style=wx.ALIGN_RIGHT) - self.aread_cho = wx.Choice(self, wx.NewId(), choices=['reader list']) + self.aread_cho = wx.Choice(self, wx.NewIdRef(), choices=['reader list']) self.rdr_lab = wx.StaticText( - self, wx.NewId(), + self, wx.NewIdRef(), 'Used Readers', style=wx.ALIGN_RIGHT) - self.rdr_lbx = wx.ListBox(self, wx.NewId(), choices = ['r1', 'r2']) + self.rdr_lbx = wx.ListBox(self, wx.NewIdRef(), choices = ['r1', 'r2']) self.nspot_lab = wx.StaticText( - self, wx.NewId(), + self, wx.NewIdRef(), 'Number of Spots', style=wx.ALIGN_RIGHT) self.nspot_txt = wx.TextCtrl( - self, wx.NewId(), value='0', + self, wx.NewIdRef(), value='0', style=wx.RAISED_BORDER) # Run button - self.run = wx.Button(self, wx.NewId(), 'Add Spots') - self.clear_but = wx.Button(self, wx.NewId(), 'Clear Spots') + self.run = wx.Button(self, wx.NewIdRef(), 'Add Spots') + self.clear_but = wx.Button(self, wx.NewIdRef(), 'Clear Spots') # Spots for Indexing info self.amat_lab = wx.StaticText( - self, wx.NewId(), + self, wx.NewIdRef(), 'Active Material', style=wx.ALIGN_RIGHT) - self.amat_cho = wx.Choice(self, wx.NewId(), choices=['mat list']) + self.amat_cho = wx.Choice(self, wx.NewIdRef(), choices=['mat list']) self.Bind(wx.EVT_CHOICE, self.OnMatChoice, self.aread_cho) self.hkls_lab = wx.StaticText( - self, wx.NewId(), + self, wx.NewIdRef(), '', style=wx.ALIGN_RIGHT) - self.hkls_but = wx.Button(self, wx.NewId(), 'HKLs') + self.hkls_but = wx.Button(self, wx.NewIdRef(), 'HKLs') self.nspotind_lab = wx.StaticText( - self, wx.NewId(), + self, wx.NewIdRef(), 'Number of Spots', style=wx.ALIGN_RIGHT) self.nspotind_txt = wx.TextCtrl( - self, wx.NewId(), value='0', + self, wx.NewIdRef(), value='0', style=wx.RAISED_BORDER) # Run button for indexing spots - self.run_ind = wx.Button(self, wx.NewId(), 'Process Spots\nfor Indexing') + self.run_ind = wx.Button(self, wx.NewIdRef(), 'Process Spots\nfor Indexing') return @@ -340,7 +340,7 @@ def OnRun(self, evt): 'args' : (), 'kwargs': dict() } - logwin = logWindow(self, wx.NewId(), action, 'Finding Spots') + logwin = logWindow(self, wx.NewIdRef(), action, 'Finding Spots') logwin.ShowModal() self.updateFromExp() @@ -357,7 +357,7 @@ def OnRunInd(self, evt): def OnRunHKLs(self, evt): """Select HKLs to use for indexing""" exp = wx.GetApp().ws - hkls_dlg = HklsDlg(self, wx.NewId(), exp.activeMaterial) + hkls_dlg = HklsDlg(self, wx.NewIdRef(), exp.activeMaterial) if hkls_dlg.ShowModal() == wx.ID_OK: exp.activeMaterial.planeData.exclusions = hkls_dlg.getExclusions() diff --git a/hexrd/wx/xrdnotebook.py b/hexrd/wx/xrdnotebook.py index ead1a11d..9ff729e9 100644 --- a/hexrd/wx/xrdnotebook.py +++ b/hexrd/wx/xrdnotebook.py @@ -70,30 +70,30 @@ def __init__(self, parent, id, **kwargs): self.pageDict = dict() # title = 'Materials' - panel = matPanel(self, wx.NewId()) + panel = matPanel(self, wx.NewIdRef()) self.materialsPanel = panel self.AddPage(panel, title) self.pageDict[title] = panel # title = 'Reader' - self.readerPanel = readerPanel(self, wx.NewId()) + self.readerPanel = readerPanel(self, wx.NewIdRef()) self.AddPage(self.readerPanel, title) self.pageDict[title] = self.readerPanel # title = 'Detector' - self.detectorPanel = detectorPanel(self, wx.NewId()) + self.detectorPanel = detectorPanel(self, wx.NewIdRef()) self.AddPage(self.detectorPanel, title) self.pageDict[title] = self.detectorPanel # title = 'Spots' - self.spotsPanel = spotsPanel(self, wx.NewId()) + self.spotsPanel = spotsPanel(self, wx.NewIdRef()) self.AddPage(self.spotsPanel, title) self.pageDict[title] = self.spotsPanel # # - self.AddPage(indexPanel(self, wx.NewId()), + self.AddPage(indexPanel(self, wx.NewIdRef()), 'Indexing') - self.AddPage(grainPanel(self, wx.NewId()), + self.AddPage(grainPanel(self, wx.NewIdRef()), 'Grains') # # Make sure page is updated on page change. diff --git a/hexrd/xrd/crystallography.py b/hexrd/xrd/crystallography.py index 049b4cc3..8a08fcba 100644 --- a/hexrd/xrd/crystallography.py +++ b/hexrd/xrd/crystallography.py @@ -33,10 +33,7 @@ import csv import os -from IPython import embed - -from scipy import constants as C - +from hexrd import constants from hexrd.matrixutil import sqrt, unitVector, columnNorm, sum from hexrd.xrd.rotations import rotMatOfExpMap, mapAngle from hexrd.xrd import symmetry @@ -90,21 +87,17 @@ def processWavelength(arg): if arg.isLength(): retval = arg.getVal(dUnit) elif arg.isEnergy(): - try: - speed = C.c - planck = C.h - except: - raise NotImplementedError, 'scipy does not have constants' - # speed = ... - # planck = ... - e = arg.getVal('J') - retval = valunits.valWUnit('wavelength', 'length', planck*speed/e, 'm').getVal(dUnit) + e = arg.getVal('keV') + retval = valunits.valWUnit( + 'wavelength', 'length', constants.keVToAngstrom(e), 'angstrom' + ).getVal(dUnit) else: - raise RuntimeError, 'do not know what to do with '+str(arg) + raise RuntimeError('do not know what to do with '+str(arg)) else: - keV2J = 1.e3*C.e - e = keV2J * arg - retval = valunits.valWUnit('wavelength', 'length', C.h*C.c/e, 'm').getVal(dUnit) + # !!! assuming arg is in keV + retval = valunits.valWUnit( + 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' + ).getVal(dUnit) return retval diff --git a/hexrd/xrd/detector.py b/hexrd/xrd/detector.py index c80bd9c8..2ab37b1f 100644 --- a/hexrd/xrd/detector.py +++ b/hexrd/xrd/detector.py @@ -47,23 +47,22 @@ from matplotlib import mlab from matplotlib.widgets import Slider, Button, RadioButtons -from hexrd import xrd -from hexrd.xrd.xrdbase import getGaussNDParams, dataToFrame -from hexrd.xrd.crystallography import processWavelength -from hexrd.xrd import distortion -from hexrd.xrd import transforms_CAPI as xfcapi -from hexrd.xrd.rotations import mapAngle -from hexrd.xrd.rotations import rotMatOfExpMap -from hexrd.xrd.rotations import rotMatOfExpMap, arccosSafe -from hexrd.quadrature import q1db -from hexrd.quadrature import q2db -from hexrd.matrixutil import unitVector -from hexrd import valunits -from hexrd.xrd import transforms_CAPI as xfcapi +from .image_io import ReadGeneric, ReadGE, ReaderDeprecationWarning, Framer2DRC +from .xrdbase import getGaussNDParams, dataToFrame +from .crystallography import processWavelength +from .rotations import mapAngle +from .rotations import rotMatOfExpMap +from .rotations import rotMatOfExpMap, arccosSafe +from ..quadrature import q1db +from ..quadrature import q2db +from ..matrixutil import unitVector +from .. import valunits +from . import distortion +from . import transforms_CAPI as xfcapi havePlotWrap = True try: - from hexrd import plotwrap + from .. import plotwrap except: havePlotWrap = False @@ -80,9 +79,14 @@ ####### # GE, Perkin -NROWS = 2048 -NCOLS = 2048 -PIXEL = 0.2 +#NROWS = 2048 +#NCOLS = 2048 +#PIXEL = 0.2 + +# dexela, horizontal +NROWS = 3888 +NCOLS = 3072 +PIXEL = 0.0748 # MAR345 #NROWS = 3450 @@ -201,1182 +205,6 @@ def getCMap(spec): raise RuntimeError, 'unknown: '+str(spec) return cmap - -class Framer2DRC(object): - """ - Base class for readers. - - You can make an instance of this class and use it for most of the - things a reader would do, other than actually reading frames - """ - def __init__(self, - ncols, nrows, - dtypeDefault='int16', dtypeRead='uint16', dtypeFloat='float64'): - self.__ncols = ncols - self.__nrows = nrows - self.__frame_dtype_dflt = dtypeDefault - self.__frame_dtype_read = dtypeRead - self.__frame_dtype_float = dtypeFloat - - self.__nbytes_frame = num.nbytes[dtypeRead]*nrows*ncols - - return - - def get_ncols(self): - return self.__ncols - ncols = property(get_ncols, None, None) - - def get_nbytesFrame(self): - return self.__nbytes_frame - nbytesFrame = property(get_nbytesFrame, None, None) - - def get_nrows(self): - return self.__nrows - nrows = property(get_nrows, None, None) - - def get_dtypeDefault(self): - return self.__frame_dtype_dflt - dtypeDefault = property(get_dtypeDefault, None, None) - def get_dtypeRead(self): - return self.__frame_dtype_read - dtypeRead = property(get_dtypeRead, None, None) - def get_dtypeFloat(self): - return self.__frame_dtype_float - dtypeFloat = property(get_dtypeFloat, None, None) - - def getOmegaMinMax(self): - raise NotImplementedError - def getDeltaOmega(self): - 'needed in findSpotsOmegaStack' - raise NotImplementedError - def getNFrames(self): - """ - number of total frames with real data, not number remaining - needed in findSpotsOmegaStack - """ - raise NotImplementedError - def read(self, nskip=0, nframes=1, sumImg=False): - 'needed in findSpotsOmegaStack' - raise NotImplementedError - def getDark(self): - 'needed in findSpotsOmegaStack' - raise NotImplementedError - def getFrameOmega(self, iFrame=None): - 'needed in findSpotsOmegaStack' - raise NotImplementedError - - - @classmethod - def maxVal(cls, dtypeRead): - """ - maximum value that can be stored in the image pixel data type; - redefine as desired - """ - maxInt = num.iinfo(dtypeRead).max - return maxInt - - def getEmptyMask(self): - """ - convenience method for getting an emtpy mask or bin frame - """ - # this used to be a class method - mask = num.zeros([self.nrows, self.ncols], dtype=bool) - return mask - - def getSize(self): - retval = (self.nrows, self.ncols) - return retval - - def frame(self, nframes=None, dtype=None, buffer=None, mask=None): - if buffer is not None and dtype is None: - if hasattr(buffer,'dtype'): - dtype = buffer.dtype - if dtype is None: - dtype = self.__frame_dtype_dflt - if nframes is None: - shape = (self.nrows, self.ncols) - else: - assert mask is None,\ - 'not coded: multiframe with mask' - shape = (nframes, self.nrows, self.ncols) - if buffer is None: - retval = num.zeros(shape, dtype=dtype) - else: - retval = num.array(buffer, dtype=dtype).reshape(shape) - if mask is not None: - retval = num.ma.masked_array(retval, mask, hard_mask=True, copy=False) - return retval - - @classmethod - def display(cls, - thisframe, - roi = None, - pw = None, - **kwargs - ): - # ... interpolation method that looks like max() so that do not miss peak pixels? - - if roi is not None: - dROI = thisframe[ roi[0][0]:roi[0][1], roi[1][0]:roi[1][1] ] - else: - dROI = thisframe - vmin, vmax, cmap = cls.getDisplayArgs(dROI, kwargs) - - if havePlotWrap: - if pw is None: - p = plotwrap.PlotWrap(**kwargs) - kwargs = {} - else: - p = pw - p(dROI, vmin=vmin, vmax=vmax, cmap=cmap, **kwargs) - # 'turn off format_coord because have not made this one report correctly' - # p.a.format_coord = lambda x,y: '' - # elif havePylab: - # assert pw is None, 'do not specify pw without plotwrap' - # retval = pylab.imshow(dROI, vmin=vmin, vmax=vmax, cmap=cm.bone) - else: - raise RuntimeError, 'no plotting pacakge available' - - retval = p - return retval - - @classmethod - def getDisplayArgs(cls, dROI, kwargs): - range = kwargs.pop('range',None) - cmap = kwargs.pop('cmap',None) - dtypeRead = kwargs.pop('dtypeRead','uint16') - - roiMin = dROI.min() - roiMax = dROI.max() - # - centered = getCentered(roiMin, roiMax) - if dROI.dtype == 'bool' and range is None: - centered = False - vmin = 0 - vmax = 1 - elif dROI.dtype == 'float64' and \ - centered and \ - range is None: - range = 2.0*num.max(num.abs(dROI)) - thr = 0.0 - vmin = thr-range/2 - vmax = thr+range/2 - else: - centered = False - vmin, vmax = cls.getVMM(dROI, range=range, dtypeRead=dtypeRead) - # - if cmap is None: - cmap = getCMap(centered) - - return vmin, vmax, cmap - - @classmethod - def getVMM(cls, dROI, range=None, dtypeRead='uint16'): - if range is None: - range = 200. - if hasattr(range,'__len__'): - assert len(range) == 2, 'wrong length for value range' - vmin = range[0] - vmax = range[1] - else: - thr = dROI.mean() - vmin = max(0, thr-range/2) # max(dROI.min(), thr-range/2) - vmax = min(cls.maxVal(dtypeRead), thr+range/2) - return vmin, vmax - -def omeRangeToFrameRange(omeA, omeB, omegaStart, omegaDelta, nFrames, checkWrap=True, slicePad=1): - """ - assumes omegas are evenly spaced - omegaDelta may be negative - """ - retval = None - - wrapsAround = abs( ( nFrames * abs(omegaDelta) ) - ( 2.0 * num.pi ) ) < 1.0e-6 - iFA = int((omeA - omegaStart) / omegaDelta) - iFB = int((omeB - omegaStart) / omegaDelta) - - if checkWrap and wrapsAround: - iFAW = iFA % nFrames - shift = iFAW - iFA - iFBW = iFB + shift - if iFBW < 0: - retval = [ (iFBW+nFrames, nFrames-1 + slicePad), (0, iFAW + slicePad) ] - # print '...*** making split range ...*** %g %g %g %g ' % (iFA, iFB, iFAW, iFBW) +str(retval) - elif iFBW >= nFrames: - retval = [ (iFA, nFrames-1 + slicePad), (0, iFBW-nFrames + slicePad) ] - # print '...*** making split range ...*** %g %g %g %g ' % (iFA, iFB, iFAW, iFBW) +str(retval) - else: - iFA = iFAW - iFB = iFBW - retval = None - - if retval is None: - rawFrameRange = num.sort(num.hstack( (iFA, iFB) )) - retval = ( - num.hstack( (rawFrameRange, 0) )[0], - num.hstack( (nFrames-1, rawFrameRange ) )[-1] + slicePad, - ) - return retval -# -def frameInRange(iFrame, frameRange): - """ - for use with output from omeRangeToFrameRange; - trust that slicePad=1 was used in omeRangeToFrameRange - """ - retval = False - if hasattr(frameRange[0],'index'): - for frameRangeThis in frameRange: - if iFrame >= frameRangeThis[0] and iFrame < frameRangeThis[1]: - retval = True - # print '...*** found in range for split range ...***' - break - else: - if iFrame >= frameRange[0] and iFrame < frameRange[1]: - retval = True - return retval - -def getNFramesFromBytes(fileBytes, nbytesHeader, nbytesFrame): - assert (fileBytes - nbytesHeader) % nbytesFrame == 0,\ - 'file size not correct' - nFrames = int((fileBytes - nbytesHeader) / nbytesFrame) - if nFrames*nbytesFrame + nbytesHeader != fileBytes: - raise RuntimeError, 'file size not correctly calculated' - return nFrames - -class FrameWriter(Framer2DRC): - def __init__(self, *args, **kwargs): - self.filename = kwargs.pop('filename') - self.__nbytes_header = kwargs.pop('nbytesHeader', 0) - self.__nempty = kwargs.pop('nempty', 0) - - Framer2DRC.__init__(self, *args, **kwargs) - - self.nFrame = 0 - self.img = open(self.filename, mode='wb') - - # skip header for now - self.img.seek(self.__nbytes_header, 0) - if self.__nempty > 0: - self.img.seek(self.nbytesFrame*self.__nempty, 1) - - return - def write(self, data, doAllChecks=True): - - # if nskip > 0: - # self.img.seek(self.__nbytes_frame*nskip, 1) - - assert len(data.shape) == 2, 'data is not 2D' - assert data.shape[0] == self.nrows, 'number of rows is wrong' - assert data.shape[1] == self.ncols, 'number of rows is wrong' - - intType = False - - if num.result_type(self.dtypeRead).kind == 'u': - intType = True - if data.dtype.kind == 'u': - 'all set' - else: - if num.any(data < 0): - raise RuntimeError, 'trying to write negative data to unsigned type' - data = data.astype(self.dtypeRead) - elif num.result_type(self.dtypeRead).kind == 'i': - intType = True - data = data.astype(self.dtypeRead) - else: - data = data.astype(self.dtypeRead) - - if doAllChecks and intType: - dataMax = data.max() - readMax = num.iinfo(self.dtypeRead).max - if dataMax > readMax : - raise RuntimeError, 'max of %g greater than supported value of %g' % (dataMax, readMax) - - data.tofile(self.img) - - return - def __call__(self, *args, **kwargs): - return self.write(*args, **kwargs) - def close(self): - self.img.close() - return - -class ReadGeneric(Framer2DRC): - ''' - may eventually want ReadGE to inherit from this, or pull common things - off to a base class - ''' - def __init__(self, filename, ncols, nrows, *args, **kwargs): - self.filename = filename - self.__nbytes_header = kwargs.pop('nbytes_header', 0) - self.__nempty = kwargs.pop('nempty', 0) - doFlip = kwargs.pop('doFlip', False) - self.subtractDark = kwargs.pop('subtractDark', False) - - 'keep things for makeNew convenience' - self.__args = args - self.__kwargs = kwargs - - if doFlip is not False: - raise NotImplementedError, 'doFlip not False' - if self.subtractDark is not False: - raise NotImplementedError, 'subtractDark not False' - - Framer2DRC.__init__(self, ncols, nrows, **kwargs) - - self.dark = None - self.dead = None - self.mask = None - - self.__wrapsAround = False # default - - self.omegaStart = None - self.omegaDelta = None - self.omegas = None - # - if len(args) == 0: - pass - elif len(args) == 2: - self.omegaStart = omegaStart = args[0] - self.omegaDelta = omegaDelta = args[1] - else: - raise RuntimeError, 'do not know what to do with args: '+str(args) - self.omegas = None - if self.omegaStart is not None: - if hasattr(omegaStart, 'getVal'): - omegaStart = omegaStart.getVal('radians') - if hasattr(omegaDelta, 'getVal'): - omegaDelta = omegaDelta.getVal('radians') - nFramesTot = self.getNFrames() - self.omegas = \ - num.arange(omegaStart, omegaStart+omegaDelta*(nFramesTot-0.5), omegaDelta) + \ - 0.5 * omegaDelta # put omegas at mid-points of omega range for frame - omegaEnd = omegaStart+omegaDelta*(nFramesTot) - self.omegaMin = min(omegaStart, omegaEnd) - self.omegaMax = max(omegaStart, omegaEnd) - self.omegaDelta = omegaDelta - self.omegaStart = omegaStart - self.__wrapsAround = abs( ( nFramesTot * abs(omegaDelta) ) / ( 2.0 * num.pi ) - 1.0 ) < 1.0e-6 - - if len(kwargs) > 0: - raise RuntimeError, 'unparsed kwargs : %s' + str(kwargs.keys()) - - self.iFrame = -1 # counter for last global frame that was read - - self.img = None - if self.filename is not None: - if self.filename.split('.') == 'bz2': - self.img = bz2.BZ2File(self.filename, mode='rb') - else: - self.img = open(self.filename, mode='rb') - # skip header for now - self.img.seek(self.__nbytes_header, 0) - if self.__nempty > 0: - self.img.seek(self.nbytesFrame*self.__nempty, 1) - - return - - def makeNew(self): - """return a clean instance for the same data files - useful if want to start reading from the beginning""" - newSelf = self.__class__(self.filename, self.ncols, self.nrows, *self.__args, **self.__kwargs) - return newSelf - def get_wrapsAround(self): - return self.__wrapsAround - wrapsAround = property(get_wrapsAround, None, None) - - def getFrameUseMask(self): - return False - def __flip(self, thisframe): - return thisframe - - ''' - def read(self, nskip=0, nframes=1, sumImg=False): - - if not nframes == 1: - raise NotImplementedError, 'nframes != 1' - if not sumImg == False: - raise NotImplementedError, 'sumImg != False' - - data = self.__readNext(nskip=nskip) - - self.iFrame += nskip + 1 - - return data - ''' - def __call__(self, *args, **kwargs): - return self.read(*args, **kwargs) - def read(self, nskip=0, nframes=1, sumImg=False): - """ - sumImg can be set to True or to something like numpy.maximum - """ - - if self.img is None: - raise RuntimeError, 'no image file open' - - 'get iFrame ready for how it is used here' - self.iFrame = num.atleast_1d(self.iFrame)[-1] - iFrameList = [] - multiframe = nframes > 1 - - nFramesInv = 1.0 / nframes - doDarkSub = self.subtractDark # and self.dark is not None - - if doDarkSub: - assert self.dark is not None, 'self.dark is None' - - # assign storage array - if sumImg: - sumImgCallable = hasattr(sumImg,'__call__') - imgOut = self.frame(dtype=self.dtypeFloat, mask=self.dead) - elif multiframe: - imgOut = self.frame(nframes=nframes, dtype=self.dtypeDflt, mask=self.dead) - - - # now read data frames - for i in range(nframes): - - #data = self.__readNext(nskip=nskip) - #thisframe = data.reshape(self.__nrows, self.__ncols) - data = self.__readNext(nskip=nskip) # .reshape(self.__nrows, self.__ncols) - self.iFrame += nskip + 1 - nskip=0 # all done skipping once have the first frame! - iFrameList.append(self.iFrame) - # dark subtraction - if doDarkSub: - 'used to have self.dtypeFloat here, but self.dtypeDflt does the trick' - thisframe = self.frame(buffer=data, - dtype=self.dtypeDflt, mask=self.dead) - self.dark - else: - thisframe = self.frame(buffer=data, - mask=self.dead) - - # flipping - thisframe = self.__flip(thisframe) - - # masking (True get zeroed) - if self.mask is not None: - if self.getFrameUseMask(): - thisframe[self.mask] = 0 - - # assign output - if sumImg: - if sumImgCallable: - imgOut = sumImg(imgOut, thisframe) - else: - imgOut = imgOut + thisframe * nFramesInv - elif multiframe: - imgOut[i, :, :] = thisframe[:, :] - 'end of loop over nframes' - - if sumImg: - # imgOut = imgOut / nframes # now taken care of above - pass - elif not multiframe: - imgOut = thisframe - - if multiframe: - 'make iFrame a list so that omega or whatever can be averaged appropriately' - self.iFrame = iFrameList - return imgOut - - def getNFrames(self, lessEmpty=True): - fileBytes = os.stat(self.filename).st_size - nFrames = getNFramesFromBytes(fileBytes, self.__nbytes_header, self.nbytesFrame) - if lessEmpty: - nFrames -= self.__nempty - return nFrames - - def getOmegaMinMax(self): - assert self.omegas is not None,\ - """instance does not have omega information""" - return self.omegaMin, self.omegaMax - def getDeltaOmega(self, nframes=1): - assert self.omegas is not None,\ - """instance does not have omega information""" - return self.omegaDelta * nframes - def getDark(self): - 'no dark yet supported' - return 0 - def frameToOmega(self, frame): - scalar = num.isscalar(frame) - frames = num.asarray(frame) - if frames.dtype == int: - retval = self.omegas[frames] - else: - retval = (frames + 0.5) * self.omegaDelta + self.omegaStart - if scalar: - retval = num.asscalar(retval) - return retval - def getFrameOmega(self, iFrame=None): - """if iFrame is none, use internal counter""" - assert self.omegas is not None,\ - """instance does not have omega information""" - if iFrame is None: - iFrame = self.iFrame - if hasattr(iFrame, '__len__'): - 'take care of case nframes>1 in last call to read' - retval = num.mean(self.omegas[iFrame]) - else: - retval = self.omegas[iFrame] - return retval - - def __readNext(self, nskip=0): - if self.img is None: - raise RuntimeError, 'no image file open' - - if nskip > 0: - self.img.seek(self.nbytesFrame*nskip, 1) - data = num.fromfile(self.img, - dtype=self.dtypeRead, - count=self.nrows*self.ncols) - return data - - def getNFrames(self, lessEmpty=True): - fileBytes = os.stat(self.filename).st_size - nFrames = getNFramesFromBytes(fileBytes, self.__nbytes_header, self.nbytesFrame) - if lessEmpty: - nFrames -= self.__nempty - return nFrames - - def getOmegaMinMax(self): - assert self.omegas is not None,\ - """instance does not have omega information""" - return self.omegaMin, self.omegaMax - def getDeltaOmega(self, nframes=1): - assert self.omegas is not None,\ - """instance does not have omega information""" - return self.omegaDelta * nframes - def getDark(self): - 'no dark yet supported' - return 0 - def getFrameOmega(self, iFrame=None): - """if iFrame is none, use internal counter""" - assert self.omegas is not None,\ - """instance does not have omega information""" - if iFrame is None: - iFrame = self.iFrame - if hasattr(iFrame, '__len__'): - 'take care of case nframes>1 in last call to read' - retval = num.mean(self.omegas[iFrame]) - else: - retval = self.omegas[iFrame] - return retval - - def getWriter(self, filename): - # if not self.doFlip is False: - # raise NotImplementedError, 'doFlip true not coded' - new = FrameWriter(self.ncols, self.nrows, - filename=filename, - dtypeDefault=self.dtypeDefault, - dtypeRead=self.dtypeRead, - dtypeFloat=self.dtypeFloat, - nbytesHeader=self.__nbytes_header) - return new - -class ReadGE(Framer2DRC): - """ - Read in raw GE files; this is the class version of the foregoing functions - - NOTES - - *) The flip axis ('v'ertical) was verified on 06 March 2009 by - JVB and UL. This should be rechecked if the configuration of the GE - changes or you are unsure. - - *) BE CAREFUL! nframes should be < 10 or so, or you will run out of - memory in the namespace on a typical machine. - - *) The header is currently ignored - - *) If a dark is specified, this overrides the use of empty frames as - background; dark can be a file name or frame - - *) In multiframe images where background subtraction is requested but no - dark is specified, attempts to use the - empty frame(s). An error is returned if there are not any specified. - If there are multiple empty frames, the average is used. - - - NOTES: - - It is likely that some of the methods here should be moved up to a base class - """ - __nbytes_header = 8192 - __idim = min(NROWS, NCOLS) - __nrows = NROWS - __ncols = NCOLS - __frame_dtype_dflt = 'int16' # good for doing subtractions - __frame_dtype_read = 'uint16' - __frame_dtype_float = 'float64' - __nbytes_frame = num.nbytes[num.uint16]*__nrows*__ncols # = 2*__nrows*__ncols - __debug = False - __location = ' ReadGE' - __readArgs = { - 'dtype' : __frame_dtype_read, - 'count' : __nrows*__ncols - } - __castArgs = { - 'dtype' : __frame_dtype_dflt - } - __inParmDict = { - 'omegaStart':None, - 'omegaDelta':None, - 'subtractDark':False, - 'mask':None, - 'useMask':None, - 'dark':None, - 'dead':None, - 'nDarkFrames':1, - 'doFlip':True, - 'flipArg':'v', - } - # 'readHeader':False - def __init__(self, - fileInfo, - *args, - **kwargs): - """ - meant for reading a series of frames from an omega sweep, with - fixed delta-omega for each frame - - omegaStart and omegaDelta can follow fileInfo or be specified - in whatever order by keyword - - fileInfo: string, (string, nempty), or list of (string, - nempty) for multiple files - - for multiple files and no dark, dark is formed only from empty - frames in the first file - """ - - # parse kwargs first - self.__kwPassed = {} - for parm, val in self.__inParmDict.iteritems(): - self.__kwPassed[parm] = kwargs.has_key(parm) - if kwargs.has_key(parm): - val = kwargs.pop(parm) - self.__setattr__(parm, val) - if len(kwargs) > 0: - raise RuntimeError, 'unparsed keyword arguments: '+str(kwargs.keys()) - - Framer2DRC.__init__(self, - self.__ncols, self.__nrows, - dtypeDefault = self.__frame_dtype_dflt, - dtypeRead = self.__frame_dtype_read, - dtypeFloat = self.__frame_dtype_float, - ) - - # omega information - if len(args) == 0: - pass - elif len(args) == 2: - self.omegaStart = args[0] - self.omegaDelta = args[1] - else: - raise RuntimeError, 'do not know what to do with args : '+str(args) - - # initialization - self.omegas = None - self.img = None - self.th = None - self.fileInfo = None - self.fileInfoR = None - self.nFramesRemain = None # remaining in current file - self.iFrame = -1 # counter for last global frame that was read - self.__wrapsAround = False # default - - if self.dark is not None: - if not self.__kwPassed['subtractDark']: - 'subtractDark was not explicitly passed, set it True' - self.subtractDark = True - if isinstance(self.dark, str): - darkFile = self.dark - self.dark = ReadGE.readDark(darkFile, nframes=self.nDarkFrames) - self.__log('got dark from %d frames in file %s' % (self.nDarkFrames, darkFile)) - elif isinstance(self.dark, num.ndarray): - assert self.dark.size == self.__nrows * self.__ncols, \ - 'self.dark wrong size' - self.dark.shape = (self.__nrows, self.__ncols) - if self.dark.dtype.name == self.__frame_dtype_read: - 'protect against unsigned-badness when subtracting' - self.dark = self.dark.astype(self.__frame_dtype_dflt) - self.__log('got dark from ndarray input') - else: - raise RuntimeError, 'do not know what to do with dark of type : '+str(type(self.dark)) - - if fileInfo is not None: - self.__setupRead(fileInfo, self.subtractDark, self.mask, self.omegaStart, self.omegaDelta) - - return - - @classmethod - def display(cls, - thisframe, - roi = None, - pw = None, - **kwargs - ): - 'this is a bit ugly in that it sidesteps the dtypeRead property' - retval = Framer2DRC.display(thisframe, roi=roi, pw=pw, dtypeRead=cls.__frame_dtype_read) - return retval - - @classmethod - def readRaw(cls, fname, mode='raw', headerlen=0): - ''' - read a raw binary file; - if specified, headerlen is in bytes; - does not do any flipping - ''' - print cls - if hasattr(cls, 'doFlip'): - print 'has doFlip' - img = open(fname, mode='rb') - if headerlen > 0: - img.seek(headerlen, 0) - if mode == 'raw' or mode == 'avg': - dtype = cls.__frame_dtype_read - elif mode == 'sum': - dtype = 'float32' - else: - raise RuntimeError, 'unknown mode : '+str(mode) - thisframe = num.fromfile(img, dtype=dtype, count=cls.__nrows*cls.__ncols).reshape(cls.__nrows, cls.__ncols) - return thisframe - def rawRead(self, *args, **kwargs): - ''' - wrapper around readRaw that does the same flipping as the reader instance from which it is called - ''' - thisframe = self.__flip(self.readRaw(*args, **kwargs)) - return thisframe - @classmethod - def readDark(cls, darkFile, nframes=1): - 'dark subtraction is done before flipping, so do not flip when reading either' - darkReader = ReadGE(darkFile, doFlip=False) - dark = darkReader.read(nframes=nframes, sumImg=True).astype(cls.__frame_dtype_dflt) - darkReader.close() - return dark - def makeNew(self): - """return a clean instance for the same data files - useful if want to start reading from the beginning""" - inParmDict = {} - inParmDict.update(self.__inParmDict) - for key in self.__inParmDict.keys(): - inParmDict[key] = eval("self."+key) - newSelf = self.__class__(self.fileInfo, **inParmDict) - return newSelf - def getRawReader(self, doFlip=False): - new = self.__class__(self.fileInfo, doFlip=doFlip) - return new - - def get_nbytes_header(self): - return self.__nbytes_header - nbytesHeader = property(get_nbytes_header, None, None) - - def getWriter(self, filename): - if not self.doFlip is False: - raise NotImplementedError, 'doFlip true not coded' - new = FrameWriter(self.ncols, self.nrows, - filename=filename, - dtypeDefault=self.dtypeDefault, - dtypeRead=self.dtypeRead, - dtypeFloat=self.dtypeFloat, - nbytesHeader=self.nbytesHeader) - return new - - def __setupRead(self, fileInfo, subtractDark, mask, omegaStart, omegaDelta): - - self.fileInfo = fileInfo - self.fileListR = self.__convertFileInfo(self.fileInfo) - self.fileListR.reverse() # so that pop reads in order - - self.subtractDark = subtractDark - self.mask = mask - - if self.dead is not None: - self.deadFlipped = self.__flip(self.dead) - - assert (omegaStart is None) == (omegaDelta is None),\ - 'must provide either both or neither of omega start and delta' - if omegaStart is not None: - if hasattr(omegaStart, 'getVal'): - omegaStart = omegaStart.getVal('radians') - if hasattr(omegaDelta, 'getVal'): - omegaDelta = omegaDelta.getVal('radians') - nFramesTot = self.getNFrames() - self.omegas = \ - num.arange(omegaStart, omegaStart+omegaDelta*(nFramesTot-0.5), omegaDelta) + \ - 0.5 * omegaDelta # put omegas at mid-points of omega range for frame - omegaEnd = omegaStart+omegaDelta*(nFramesTot) - self.omegaMin = min(omegaStart, omegaEnd) - self.omegaMax = max(omegaStart, omegaEnd) - self.omegaDelta = omegaDelta - self.omegaStart = omegaStart - self.__wrapsAround = abs( ( nFramesTot * abs(omegaDelta) ) / ( 2.0 * num.pi ) - 1.0 ) < 1.0e-6 - - self.__nextFile() - - return - - def get_wrapsAround(self): - return self.__wrapsAround - wrapsAround = property(get_wrapsAround, None, None) - - def getNFrames(self): - """number of total frames with real data, not number remaining""" - nFramesTot = self.getNFramesFromFileInfo(self.fileInfo) - return nFramesTot - def getDeltaOmega(self, nframes=1): - assert self.omegas is not None,\ - """instance does not have omega information""" - return self.omegaDelta * nframes - def getOmegaMinMax(self): - assert self.omegas is not None,\ - """instance does not have omega information""" - return self.omegaMin, self.omegaMax - def frameToOmega(self, frame): - scalar = num.isscalar(frame) - frames = num.asarray(frame) - if frames.dtype == int: - retval = self.omegas[frames] - else: - retval = (frames + 0.5) * self.omegaDelta + self.omegaStart - if scalar: - retval = num.asscalar(retval) - return retval - def getFrameOmega(self, iFrame=None): - """if iFrame is none, use internal counter""" - assert self.omegas is not None,\ - """instance does not have omega information""" - if iFrame is None: - iFrame = self.iFrame - if hasattr(iFrame, '__len__'): - 'take care of case nframes>1 in last call to read' - retval = num.mean(self.omegas[iFrame]) - else: - retval = self.omegas[iFrame] - return retval - def omegaToFrame(self, omega, float=False): - assert self.omegas is not None,\ - 'instance does not have omega information' - if self.__wrapsAround: - 'need to map omegas into range in case omega spans the branch cut' - omega = self.omegaMin + omega % (2.0*num.pi) - if float: - assert omega >= self.omegaMin and omega <= self.omegaMax,\ - 'omega %g is outside of the range [%g,%g] for the reader' % (omega, self.omegaMin, self.omegaMax) - retval = (omega - self.omegaStart)/self.omegaDelta - 0.5*self.omegaDelta - else: - # temp = num.where(self.omegas == omega)[0] - temp = num.where( num.abs(self.omegas - omega) < 0.1*abs(self.omegaDelta) )[0] - assert len(temp) == 1, 'omega not found, or found more than once' - retval = temp[0] - return retval - def getFrameUseMask(self): - """this is an optional toggle to turn the mask on/off""" - assert isinstance(self.iFrame, int), \ - 'self.iFrame needs to be an int for calls to getFrameUseMask' - if self.useMask is None: - retval = True - else: - assert len(self.useMask) == self.getNFrames(),\ - "len(useMask) must be %d; yours is %d" % (self.getNFrames(), len(self.useMask)) - retval = self.useMask[self.iFrame] - return retval - @classmethod - def __getNFrames(cls, fileBytes): - retval = getNFramesFromBytes(fileBytes, cls.__nbytes_header, cls.__nbytes_frame) - return retval - def __nextFile(self): - - # close in case already have a file going - self.close() - - fname, nempty = self.fileListR.pop() - - # open file - fileBytes = os.stat(fname).st_size - self.img = open(fname, mode='rb') - - # skip header for now - self.img.seek(self.__nbytes_header, 0) - - # figure out number of frames - self.nFramesRemain = self.__getNFrames(fileBytes) - - if nempty > 0: # 1 or more empty frames - if self.dark is None: - scale = 1.0 / nempty - self.dark = self.frame(dtype=self.__frame_dtype_float) - for i in range(nempty): - self.dark = self.dark + num.fromfile( - self.img, **self.__readArgs - ).reshape(self.__nrows, self.__ncols) * scale - self.dark.astype(self.__frame_dtype_dflt) - self.__log('got dark from %d empty frames in file %s' % (nempty, fname)) - else: - self.img.seek(self.nbytesFrame*nempty, 1) - self.nFramesRemain -= nempty - - if self.subtractDark and self.dark is None: - raise RuntimeError, "Requested dark field subtraction, but no file or empty frames specified!" - - return - @staticmethod - def __convertFileInfo(fileInfo): - if isinstance(fileInfo,str): - fileList = [(fileInfo, 0)] - elif hasattr(fileInfo,'__len__'): - assert len(fileInfo) > 0, 'length zero' - if hasattr(fileInfo[0],'__iter__'): # checking __len__ bad because has len attribute - fileList = copy.copy(fileInfo) - else: - assert len(fileInfo) == 2, 'bad file info' - fileList = [fileInfo] - else: - raise RuntimeError, 'do not know what to do with fileInfo '+str(fileInfo) - # fileList.reverse() - return fileList - def readBBox(self, bbox, raw=True, doFlip=None): - """ - with raw=True, read more or less raw data, with bbox = [(iLo,iHi),(jLo,jHi),(fLo,fHi)] - - careful: if raw is True, must set doFlip if want frames - potentially flipped; can set it to a reader instance to pull - the doFlip value from that instance - """ - - if raw: - if hasattr(doFlip,'doFlip'): - 'probably a ReadGe instance, pull doFlip from it' - doFlip = doFlip.doFlip - doFlip = doFlip or False # set to False if is None - reader = self.getRawReader(doFlip=doFlip) - else: - assert doFlip is None, 'do not specify doFlip if raw is True' - reader = self.makeNew() - - nskip = bbox[2][0] - bBox = num.array(bbox) - sl_i = slice(*bBox[0]) - sl_j = slice(*bBox[1]) - 'plenty of performance optimization might be possible here' - if raw: - retval = num.empty( tuple(bBox[:,1] - bBox[:,0]), dtype=self.__frame_dtype_read ) - else: - retval = num.empty( tuple(bBox[:,1] - bBox[:,0]), dtype=self.__frame_dtype_dflt ) - for iFrame in range(retval.shape[2]): - thisframe = reader.read(nskip=nskip) - nskip = 0 - retval[:,:,iFrame] = copy.deepcopy(thisframe[sl_i, sl_j]) - if not raw and self.dead is not None: - 'careful: have already flipped, so need deadFlipped instead of dead here' - mask = num.tile(self.deadFlipped[sl_i, sl_j].T, (retval.shape[2],1,1)).T - retval = num.ma.masked_array(retval, mask, hard_mask=True, copy=False) - return retval - def __flip(self, thisframe): - if self.doFlip: - if self.flipArg == 'v': - thisframe = thisframe[:, ::-1] - elif self.flipArg == 'h': - thisframe = thisframe[::-1, :] - elif self.flipArg == 'vh' or self.flipArg == 'hv': - thisframe = thisframe[::-1, ::-1] - elif self.flipArg == 'cw90': - thisframe = thisframe.T[:, ::-1] - elif self.flipArg == 'ccw90': - thisframe = thisframe.T[::-1, :] - else: - raise RuntimeError, "unrecognized flip token." - return thisframe - def getDark(self): - if self.dark is None: - retval = 0 - else: - retval = self.dark - return retval - def read(self, nskip=0, nframes=1, sumImg=False, mask=None): - """ - sumImg can be set to True or to something like numpy.maximum - """ - - 'get iFrame ready for how it is used here' - self.iFrame = num.atleast_1d(self.iFrame)[-1] - iFrameList = [] - multiframe = nframes > 1 - - nFramesInv = 1.0 / nframes - doDarkSub = self.subtractDark # and self.dark is not None - - if doDarkSub: - assert self.dark is not None, 'self.dark is None' - - # assign storage array - if sumImg: - sumImgCallable = hasattr(sumImg,'__call__') - imgOut = self.frame(dtype=self.__frame_dtype_float, mask=self.dead) - elif multiframe: - imgOut = self.frame(nframes=nframes, dtype=self.__frame_dtype_dflt, mask=self.dead) - - - # now read data frames - for i in range(nframes): - - #data = self.__readNext(nskip=nskip) - #thisframe = data.reshape(self.__nrows, self.__ncols) - data = self.__readNext(nskip=nskip) # .reshape(self.__nrows, self.__ncols) - self.iFrame += nskip + 1 - nskip=0 # all done skipping once have the first frame! - iFrameList.append(self.iFrame) - # dark subtraction - if doDarkSub: - 'used to have self.__frame_dtype_float here, but self.__frame_dtype_dflt does the trick' - thisframe = self.frame(buffer=data, - dtype=self.__frame_dtype_dflt, mask=self.dead) - self.dark - else: - thisframe = self.frame(buffer=data, - mask=self.dead) - - # flipping - thisframe = self.__flip(thisframe) - - # masking (True get zeroed) - if self.mask is not None: - if self.getFrameUseMask(): - thisframe[self.mask] = 0 - elif self.mask is None and mask is not None: - thisframe[mask] = 0 - - # assign output - if sumImg: - if sumImgCallable: - imgOut = sumImg(imgOut, thisframe) - else: - imgOut = imgOut + thisframe * nFramesInv - elif multiframe: - imgOut[i, :, :] = thisframe[:, :] - 'end of loop over nframes' - - if sumImg: - # imgOut = imgOut / nframes # now taken care of above - pass - elif not multiframe: - imgOut = thisframe - - if multiframe: - 'make iFrame a list so that omega or whatever can be averaged appropriately' - self.iFrame = iFrameList - return imgOut - def __log(self, message): - if self.__debug: - print self.__location+' : '+message - return - def __readNext(self, nskip=0): - - if self.img is None: - raise RuntimeError, 'no image file set' - - nHave = 0 - - nskipThis = nskip - while self.nFramesRemain+nHave - nskipThis < 1: - 'not enough frames left in this file' - nskipThis = nskipThis - self.nFramesRemain - self.nFramesRemain = 0 # = self.nFramesRemain - self.nFramesRemain - self.__nextFile() - if nskipThis > 0: - # advance counter past empty frames - self.img.seek(self.nbytesFrame*nskipThis, 1) - self.nFramesRemain -= nskipThis - - # grab current frame - data = num.fromfile(self.img, **self.__readArgs) - data = num.array(data, **self.__castArgs) - self.nFramesRemain -= 1 - - return data - def __call__(self, *args, **kwargs): - return self.read(*args, **kwargs) - def close(self): - # if already have a file going, close it out - if self.img is not None: - self.img.close() - return - """ - getReadDtype function replaced by dtypeRead property - """ - @classmethod - def maxVal(cls, dummy): - 'maximum value that can be stored in the image pixel data type' - # dtype = reader._ReadGE__frame_dtype - # maxInt = num.iinfo(cls.__frame_dtype_read).max # bigger than it really is - maxInt = 2 ** 14 - return maxInt - @classmethod - def getNFramesFromFileInfo(cls, fileInfo, lessEmpty=True): - fileList = cls.__convertFileInfo(fileInfo) - nFramesTot = 0 - for fname, nempty in fileList: - fileBytes = os.stat(fname).st_size - nFrames = cls.__getNFrames(fileBytes) - if lessEmpty: - nFrames -= nempty - nFramesTot += nFrames - return nFramesTot - - def indicesToMask(self, indices): - """ - Indices can be a list of indices, as from makeIndicesTThRanges - """ - mask = self.getEmptyMask() - if hasattr(indices,'__len__'): - for indThese in indices: - mask[indThese] = True - else: - mask[indices] = True - return mask - -class ReadMar165(Framer2DRC): - """ - placeholder; not yet really implemented - - """ - __frame_dtype_read = 'uint16' - __frame_dtype_dflt = 'int16' # good for doing subtractions - def __init__(self, mode): - if not isinstance(mode, int) or not [1,2,4,8].count(mode): - raise RuntimeError, 'unknown mode : '+str(mode) - - self.__mode = mode - self.__idim = mar165IDim(mode) - return - def __call__(self, filename): - if not haveImageModule: - msg = "PIL Image module is required for this operation, "\ - "but not loaded\n" - raise NameError(msg) - - i = Image.open(filename, mode='r') - a = num.array(i, dtype=self.__frame_dtype_read) - frame = num.array(a, dtype=self.__frame_dtype_dflt) - return frame - - -class ReadMar165NB1(ReadMar165): - def __init__(self, *args, **kwargs): - ReadMar165.__init__(self, 1, *args, **kwargs) - return -class ReadMar165NB2(ReadMar165): - def __init__(self, *args, **kwargs): - ReadMar165.__init__(self, 2, *args, **kwargs) - return -class ReadMar165NB3(ReadMar165): - def __init__(self, *args, **kwargs): - ReadMar165.__init__(self, 3, *args, **kwargs) - return -class ReadMar165NB4(ReadMar165): - def __init__(self, *args, **kwargs): - ReadMar165.__init__(self, 4, *args, **kwargs) - return - class LineStyles: """ do not want to just cycle through default plot line colors, as end up with black lines @@ -3229,13 +2057,13 @@ def __init__(self, return def set_ncols(self, ncols): - raise RuntimeError, 'set of ncols not allowed' + self.__ncols = ncols def get_ncols(self): return self.__ncols ncols = property(get_ncols, set_ncols, None) def set_pixelPitch(self, pixelPitch): - raise RuntimeError, 'set of pixelPitch not allowed' + self.__pixelPitch = pixelPitch def get_pixelPitch(self): return self.__pixelPitch pixelPitch = property(get_pixelPitch, set_pixelPitch, None) @@ -3247,7 +2075,7 @@ def get_pixelPitchUnit(self): pixelPitchUnit = property(get_pixelPitchUnit, set_pixelPitchUnit, None) def set_nrows(self, nrows): - raise RuntimeError, 'set of nrows not allowed' + self.__nrows = nrows def get_nrows(self): return self.__nrows nrows = property(get_nrows, set_nrows, None) @@ -3536,7 +2364,7 @@ def pixelIndicesOfCartesianCoords(self, x, y, ROI=None): def get_tVec_d(self): """ calculate tVec_d from old [xc, yc, D] parameter spec - + as loaded the params have 2 columns: [val, bool] """ det_origin = 0.5*self.extent # (X, Y) (cols, rows) in this case @@ -3550,7 +2378,7 @@ def get_tVec_d(self): def set_tVec_d(self): raise RuntimeError, 'calculated property; cannot set' tVec_d = property(get_tVec_d, set_tVec_d, None) - + def makeNew(self, *args, **kwargs): kwargs.setdefault('getDParamDflt', self.getDParamDflt) kwargs.setdefault('setDParamZero', self.setDParamZero) @@ -3565,8 +2393,8 @@ def makeNew(self, *args, **kwargs): """ REAL GEOMETRIC STUFF BELOW -- PROCEED AT OWN RISK """ - - + + def angToXYO(self, tth, eta_l, *args, **kwargs): """ opposite of xyoToAng @@ -3619,7 +2447,7 @@ def angToXYO(self, tth, eta_l, *args, **kwargs): tVec_c = zvec3 else: tVec_c = num.atleast_1d(self.pVec).flatten().reshape(3, 1) - + if self.chiTilt is None: chi = 0. else: @@ -3655,7 +2483,7 @@ def angToXYO(self, tth, eta_l, *args, **kwargs): xy_det = num.empty(xy_cen.shape) xy_det[:, 0] = xy_cen[:, 0] + 0.5*self.extent[0] - self.xc xy_det[:, 1] = xy_cen[:, 1] + 0.5*self.extent[1] - self.yc - P4_d = num.vstack([xy_det.T, nzeros]) + P4_d = num.vstack([xy_det.T, nzeros]) # obselete # # make center-based cartesian coord's # obselete # # - SHOULD BE IN PIXEL PITCH UNITS (MM) @@ -3664,7 +2492,7 @@ def angToXYO(self, tth, eta_l, *args, **kwargs): # obselete # if self.pVec is None: # obselete # XC = nzeros # obselete # YC = nzeros - # obselete # + # obselete # # obselete # D = num.tile(self.workDist, numPts) # obselete # else: # obselete # assert len(ome) == numPts, 'with precession, omega argument consistent with ' \ @@ -3674,25 +2502,25 @@ def angToXYO(self, tth, eta_l, *args, **kwargs): # obselete # # - ome is taken as a CCW (+) rotation of the SAMPLE FRAME about Y # obselete # # - when the BASIS is transformed by R, vector comp's must transform by R' # obselete # R_s2l = rotMatOfExpMap( num.tile(ome, (3, 1)) * num.tile(self.Yl, (1, numPts)) ) - # obselete # + # obselete # # obselete # # array of rotated precession vector components # obselete # if not hasattr(self.pVec, '__len__'): # obselete # raise RuntimeError, 'pVec must be array-like' - # obselete # + # obselete # # obselete # self.pVec = num.asarray(self.pVec) - # obselete # + # obselete # # obselete # grainCM_l = num.dot(R_s2l, self.pVec.reshape(3, 1)) # obselete # if grainCM_l.ndim == 3: # obselete # grainCM_l = grainCM_l.squeeze().T - # obselete # + # obselete # # obselete # XC = grainCM_l[0, :] # obselete # YC = grainCM_l[1, :] - # obselete # + # obselete # # obselete # D = self.workDist + grainCM_l[2, :] # now array of D's - # obselete # + # obselete # # obselete # # make radii # obselete # rho_l = D * num.tan(tth) - # obselete # + # obselete # # obselete # # # obselete # # ------- ASSIGN POINT COORD'S AND FORM ROTATION # obselete # # @@ -3700,19 +2528,19 @@ def angToXYO(self, tth, eta_l, *args, **kwargs): # obselete # # - the common frame for arithmatic is the scattering frame # obselete # P1 = num.vstack([XC, YC, D]) # obselete # P2 = num.zeros((3, numPts)) - # obselete # + # obselete # # obselete # # tilt calculations moved into setTilt - # obselete # + # obselete # # obselete # # Convert to cartesian coord's in lab frame # obselete # P3 = num.vstack( [ rho_l * num.cos(eta_l) + XC, rho_l * num.sin(eta_l) + YC, nzeros ] ) - # obselete # + # obselete # # obselete # # # obselete # # ------- SOLVE FOR RAY-PLANE INTERSECTION # obselete # # # obselete # u = num.tile( num.dot(self.N.T, (P2 - P1)) / num.dot(self.N.T, P3 - P1), (3, 1) ) - # obselete # + # obselete # # obselete # P4_l = P1 + u * (P3 - P1) - # obselete # + # obselete # # obselete # P4_d = num.dot(self.ROT_l2d.T, P4_l) if applyRadialDistortion: @@ -3843,7 +2671,7 @@ def xyoToAng(self, x0, y0, *args, **kwargs): if applyRadialDistortion: X_d, Y_d = self.radialDistortion(x0, y0, invert=False) xy_det = num.vstack([X_d.flatten(), Y_d.flatten()]).T - + if len(ome) == 0: tth_eta, gVec_l = xfcapi.detectorXYToGvec(xy_det, self.ROT_l2d, I3, @@ -3869,14 +2697,14 @@ def xyoToAng(self, x0, y0, *args, **kwargs): # obselete # # where [0, 0] is the lower left corner of detector # obselete # if inputPixelUnits: # obselete # x0, y0 = self.cartesianCoordsOfPixelIndices(x0, y0) - # obselete # + # obselete # # obselete # if self.pVec is None: # obselete # X_d = x0 - self.xc # is 1-d! # obselete # Y_d = y0 - self.yc # is 1-d! - # obselete # + # obselete # # obselete # XC = nzeros # obselete # YC = nzeros - # obselete # + # obselete # # obselete # D = num.tile(self.workDist, numPts) # obselete # else: # obselete # assert len(ome) == numPts, 'with precession, omega argument consistent with ' \ @@ -3886,32 +2714,32 @@ def xyoToAng(self, x0, y0, *args, **kwargs): # obselete # # - ome is taken as a CCW (+) rotation of the SAMPLE FRAME about Y # obselete # # - when the BASIS is transformed by R, vector comp's must transform by R' # obselete # R_s2l = rotMatOfExpMap( num.tile(ome, (3, 1)) * num.tile(self.Yl, (1, numPts)) ) - # obselete # + # obselete # # obselete # if not hasattr(self.pVec, '__len__'): # obselete # raise RuntimeError, 'pVec must be array-like' - # obselete # + # obselete # # obselete # self.pVec = num.asarray(self.pVec) - # obselete # + # obselete # # obselete # # array of rotated precession vector components # obselete # grainCM_l = num.dot(R_s2l, self.pVec.reshape(3, 1)) # obselete # if grainCM_l.ndim == 3: # obselete # grainCM_l = grainCM_l.squeeze().T - # obselete # + # obselete # # obselete # # precession-corrected polar detector coord's # obselete # # X_d = x0 - (self.xc + grainCM_l[0, :]) # is 1-d! # obselete # # Y_d = y0 - (self.yc + grainCM_l[1, :]) # is 1-d! # obselete # X_d = x0 - self.xc # is 1-d! # obselete # Y_d = y0 - self.yc # is 1-d! - # obselete # + # obselete # # obselete # XC = grainCM_l[0, :] # obselete # YC = grainCM_l[1, :] - # obselete # + # obselete # # obselete # D = self.workDist + grainCM_l[2, :] # now array of D's - # obselete # + # obselete # # obselete # if applyRadialDistortion: # obselete # # apply distortion # obselete # X_d, Y_d = self.radialDistortion(X_d, Y_d, invert=False) - # obselete # + # obselete # # obselete # # # obselete # # ------- ASSIGN POINT COORD'S AND FORM ROTATION # obselete # # @@ -3920,38 +2748,38 @@ def xyoToAng(self, x0, y0, *args, **kwargs): # obselete # P1 = num.vstack([XC, YC, D]) # obselete # # P2 = num.vstack([XC, YC, nzeros]) # obselete # P2 = num.zeros((3, numPts)) - # obselete # + # obselete # # obselete # # tilt calculations moved into setTilt - # obselete # + # obselete # # obselete # # full 3-d components in tilted the detector frame # obselete # P4_d = num.vstack( (X_d, Y_d, nzeros) ) - # obselete # + # obselete # # obselete # # rotate components into the lab frame # obselete # P4_l = num.dot(self.ROT_l2d, P4_d) - # obselete # + # obselete # # obselete # # apply translation to get equations of diffracted rays in lab frame # obselete # rays = P4_l - P1 - # obselete # + # obselete # # obselete # # solve for P3 coord's in lab frame # obselete # u = num.tile( num.dot(self.N.T, (P2 - P1)) / num.dot(self.N.T, rays), (3, 1) ) - # obselete # - # obselete # + # obselete # + # obselete # # obselete # P3 = P1 + u * rays - # obselete # + # obselete # # obselete # # X-Y components of P3 in lab frame # obselete # X_l = P3[0, :] - XC # obselete # Y_l = P3[1, :] - YC - # obselete # + # obselete # # obselete # # polar coords in lab frame # obselete # rho_l = num.sqrt(X_l*X_l + Y_l*Y_l) # obselete # eta_l = num.arctan2(Y_l, X_l) - # obselete # + # obselete # # obselete # # get two-theta from dot products with lab-frame beam direction # obselete # dotProds = num.dot(-self.Zl.T, unitVector(rays)).squeeze() - # obselete # + # obselete # # obselete # # two-theta # obselete # measTTH = arccosSafe(dotProds) - # obselete # + # obselete # # obselete # # transform data # obselete # tmpData = num.vstack( [measTTH, eta_l] ) @@ -4040,9 +2868,6 @@ def makeTThRanges(self, planeData, cullDupl=False): def makeIndicesTThRanges(self, planeData, cullDupl=False): """ return a list of indices for sets of overlaping two-theta ranges; - to plot, can do something like: - mask = self.reader.getEmptyMask() - mask[indices] = True With cullDupl set true, eliminate HKLs with duplicate 2-thetas """ @@ -4902,7 +3727,7 @@ def __init__(self, *args, **kwargs): self.mode = mode Detector2DRC.__init__(self, - nrows, ncols, pixelPitch, + ncols, nrows, pixelPitch, self.__vfu, self.__vdk, reader, *args, **kwargs) @@ -4932,9 +3757,9 @@ class DetectorGeomGE(Detector2DRC): __vdk = 1800 # made up # 200 x 200 micron pixels __pixelPitch = PIXEL # in mm - __idim = ReadGE._ReadGE__idim - __nrows = ReadGE._ReadGE__nrows - __ncols = ReadGE._ReadGE__ncols + __idim = max(NROWS, NCOLS) + __nrows = NROWS + __ncols = NCOLS __dParamDflt = [ 0.0, 0.0, 0.0, 2.0, 2.0, 2.0] __dParamZero = [ 0.0, 0.0, 0.0, 2.0, 2.0, 2.0] __dParamScalings = [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] @@ -4946,7 +3771,15 @@ def __init__(self, *args, **kwargs): if reader is None: readerKWArgs = kwargs.pop('readerKWArgs', {}) reader = ReadGE(None, **readerKWArgs) - + else: + self.__nrows = self.__idim = reader.nrows + self.__ncols = reader.ncols + self.__pixelPitch = reader.pixelPitch + # self.__pixelPitch = kwargs.pop('pixelPitch', 0.2) + # self.__nrows = kwargs.pop('nrows', 2048) + # self.__ncols = kwargs.pop('ncols', 2048) + # self.__idim = max(self.__nrows, self.__ncols) + print self.__pixelPitch Detector2DRC.__init__(self, self.__ncols, self.__nrows, self.__pixelPitch, self.__vfu, self.__vdk, @@ -4988,9 +3821,7 @@ class DetectorGeomFrelon(Detector2DRC): # 50 X 50 micron pixels __pixelPitch = 0.05 # in mm - __idim = ReadGE._ReadGE__idim - __nrows = ReadGE._ReadGE__nrows - __ncols = ReadGE._ReadGE__ncols + __ncols = __nrows = __idim = 2048 __dParamDflt = [ 0.0, 0.0, 0.0, 2.0, 2.0, 2.0] __dParamZero = [ 0.0, 0.0, 0.0, 2.0, 2.0, 2.0] __dParamScalings = [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] @@ -5387,18 +4218,6 @@ def fitProcedureA(self, planeData, framesQuad, iRefQuad=0, self.setQuadOffsets(iRefQuad) return -def getOmegaMMReaderList(readerList, overall=False): - """ - get omega min/max information from a list of readers - """ - retval = [] - for reader in num.atleast_1d(readerList): - omegaMin, omegaMax = reader.getOmegaMinMax() - retval.append((omegaMin,omegaMax)) - if overall: - retval = (min(zip(*retval)[0]), max(zip(*retval)[1])) - return retval - # ============================== Utility functions for instantiating detectors # def detectorList(): @@ -5442,17 +4261,6 @@ def newDetector(detectorType, *args, **kwargs): return d -def newGenericReader(ncols, nrows, *args, **kwargs): - ''' - currently just returns a Framer2DRC - ''' - - # retval = Framer2DRC(ncols, nrows, **kwargs) - filename = kwargs.pop('filename', None) - retval = ReadGeneric(filename, ncols, nrows, *args, **kwargs) - - return retval - def newGenericDetector(ncols, nrows, pixelPitch, *args, **kwargs): """ If reader is passed as None, then a generic reader is created diff --git a/hexrd/xrd/distortion.py b/hexrd/xrd/distortion.py index 4e79d5fd..47f0766b 100644 --- a/hexrd/xrd/distortion.py +++ b/hexrd/xrd/distortion.py @@ -100,17 +100,22 @@ def _ge_41rt_inverse_distortion(out, in_, rhoMax, params): xi, yi = in_[:, 0], in_[:,1] ri = np.sqrt(xi*xi + yi*yi) - if ri < sqrt_epsf: - ri_inv = 0.0 - else: - ri_inv = 1.0/ri + # !!! adding fix TypeError when processings list of coords + zfix = [] + if np.any(ri) < sqrt_epsf: + zfix = ri < sqrt_epsf + ri[zfix] = 1.0 + ri_inv = 1.0/ri + ri_inv[zfix] = 0. + sinni = yi*ri_inv cosni = xi*ri_inv ro = ri cos2ni = cosni*cosni - sinni*sinni sin2ni = 2*sinni*cosni cos4ni = cos2ni*cos2ni - sin2ni*sin2ni - + + # FIXME: looks like we hae a problem here, should iterate over single coord pairs? for i in range(maxiter): # newton solver iteration ratio = ri*rxi fx = (p0*ratio**p3*cos2ni + @@ -136,11 +141,12 @@ def _ge_41rt_distortion(out, in_, rhoMax, params): rxi = 1.0/rhoMax xi, yi = in_[:, 0], in_[:,1] + + # !!! included fix on ValueError for array--like in_ ri = np.sqrt(xi*xi + yi*yi) - if ri < sqrt_epsf: - ri_inv = 0.0 - else: - ri_inv = 1.0/ri + ri[ri < sqrt_epsf] = np.inf + ri_inv = 1.0/ri + sinni = yi*ri_inv cosni = xi*ri_inv cos2ni = cosni*cosni - sinni*sinni diff --git a/hexrd/xrd/experiment.py b/hexrd/xrd/experiment.py index 0c588cfd..7b165606 100644 --- a/hexrd/xrd/experiment.py +++ b/hexrd/xrd/experiment.py @@ -1,5 +1,5 @@ #! /usr/bin/env python -# ============================================================ +# ============================================================================= # Copyright (c) 2012, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # Written by Joel Bernier and others. @@ -11,9 +11,9 @@ # # Please also see the file LICENSE. # -# This program is free software; you can redistribute it and/or modify it under the -# terms of the GNU Lesser General Public License (as published by the Free Software -# Foundation) version 2.1 dated February 1999. +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY @@ -24,51 +24,52 @@ # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA or visit . -# ============================================================ +# ============================================================================= # -###################################################################### -## TOP-LEVEL MODULES AND SOME GLOBALS -## -"""Module for wrapping the main functionality of the xrd package. +# ============================================================================= +# Module for wrapping the main functionality of the xrd package. +# +# The Experiment class is the primary interface. Other classes are helpers. +# ============================================================================= -The Experiment class is the primary interface. Other classes -are helpers. -""" -import sys, os, copy import cPickle -import numpy +import os +import sys -numpy.seterr(invalid='ignore') +import numpy -from scipy.linalg import inv -from scipy.linalg.matfuncs import logm -from scipy import optimize +from scipy import optimize from hexrd import matrixutil from hexrd import valunits from hexrd import data from hexrd.xrd import detector -from hexrd.xrd import grain as G +from hexrd.xrd import grain as G from hexrd.xrd import indexer -from hexrd.xrd import rotations as ROT +from hexrd.xrd import rotations as ROT from hexrd.xrd import spotfinder as SPT -from hexrd.xrd import xrdutil -from hexrd.xrd.hydra import Hydra +from hexrd.xrd.hydra import Hydra from hexrd.xrd.material import Material, loadMaterialList from math import pi + +numpy.seterr(invalid='ignore') r2d = 180. / pi d2r = pi / 180. +OMEGA_PERIOD = (-pi, pi) +coarse_ang_tol = valunits.valWUnit('coarse tol', 'angle', 1.0, 'degrees') +fine_ang_tol = valunits.valWUnit('fine tol', 'angle', 0.5, 'degrees') + +# ============================================================================= +# Defaults (will eventually make to a config file) +# ============================================================================= -# -# Defaults (will eventually make to a config file) -# HERE = os.path.dirname(__file__) -toMatFile = os.path.join(HERE, '..', 'data', 'materials.cfg') -DFLT_MATFILE = os.path.normpath(toMatFile) # check whether it exists -matfileOK = os.access(DFLT_MATFILE, os.F_OK) +toMatFile = os.path.join(HERE, '..', 'data', 'materials.cfg') +DFLT_MATFILE = os.path.normpath(toMatFile) # check whether it exists +matfileOK = os.access(DFLT_MATFILE, os.F_OK) if not matfileOK: # use relative path DFLT_MATFILE = os.path.join('data', 'materials.cfg') pass @@ -76,37 +77,40 @@ if not matfileOK: # set to null DFLT_MATFILE = '' pass -# -# __all__ = ['Experiment', 'FitModes', 'ImageModes', 'ReaderInput', 'CalibrationInput', 'PolarRebinOpts', 'saveExp', 'loadExp'] -# + + # ---------------------------------------------------CLASS: FitModes # class FitModes(object): """Indicators for single-frame or multiframe data files""" # - DIRECT = 0 + DIRECT = 0 MULTIRING = 1 # DEFAULT = MULTIRING # pass # end class # -# -----------------------------------------------END CLASS:FitModes +# -----------------------------------------------END CLASS: FitModes + + # ---------------------------------------------------CLASS: ImageModes # class ImageModes(object): """Indicators for single-frame or multiframe data files""" # SINGLE_FRAME = 0 - MULTI_FRAME = 1 + MULTI_FRAME = 1 # pass # end class # # -----------------------------------------------END CLASS: ImageModes + + # ---------------------------------------------------CLASS: Experiment # class Experiment(object): @@ -125,7 +129,7 @@ def __init__(self, cfgFile=data.materials, matFile=data.all_materials): # # Reader inputs and info # - self.__active_rdr = ReaderInput() + self.__active_rdr = ReaderInput() self.__savedReaders = [self.__active_rdr] # self.__active_img = None @@ -139,7 +143,7 @@ def __init__(self, cfgFile=data.materials, matFile=data.all_materials): # # Detector and calibration information. # - self._detInfo = DetectorInfo() + self._detInfo = DetectorInfo() self._calInput = CalibrationInput(self.matList[0]) # # Spots information. @@ -253,37 +257,40 @@ def refine_grains(self, minCompl, nSubIter=3, doFit=False, - etaTol=valunits.valWUnit('etaTol', 'angle', 1.0, 'degrees'), - omeTol=valunits.valWUnit('etaTol', 'angle', 1.0, 'degrees'), + etaTol=coarse_ang_tol, + omeTol=coarse_ang_tol, fineDspTol=5.0e-3, - fineEtaTol=valunits.valWUnit('etaTol', 'angle', 0.5, 'degrees'), - fineOmeTol=valunits.valWUnit('etaTol', 'angle', 0.5, 'degrees')): + fineEtaTol=fine_ang_tol, + fineOmeTol=fine_ang_tol): """ refine a grain list """ # refine grains formally using a multi-pass refinement - nGrains = self.rMats.shape[0] + nGrains = self.rMats.shape[0] grainList = [] for iG in range(nGrains): - #indexer.progress_bar(float(iG) / nGrains) + # indexer.progress_bar(float(iG) / nGrains) grain = G.Grain(self.spots_for_indexing, - rMat=self.rMats[iG, :, :], - etaTol=etaTol, - omeTol=omeTol, - claimingSpots=False) + rMat=self.rMats[iG, :, :], + etaTol=etaTol, + omeTol=omeTol, + claimingSpots=False) if grain.completeness > minCompl: for i in range(nSubIter): grain.fit() - s1, s2, s3 = grain.findMatches(etaTol=etaTol, omeTol=omeTol, strainMag=fineDspTol, - updateSelf=True, claimingSpots=False, doFit=doFit, - testClaims=True) + s1, s2, s3 = grain.findMatches( + etaTol=etaTol, omeTol=omeTol, strainMag=fineDspTol, + updateSelf=True, claimingSpots=False, doFit=doFit, + testClaims=True) if grain.completeness > minCompl: grainList.append(grain) pass pass pass self.grainList = grainList - self._fitRMats = numpy.array([self.grainList[i].rMat for i in range(len(grainList))]) + self._fitRMats = numpy.array( + [self.grainList[i].rMat for i in range(len(grainList))] + ) return def saveRMats(self, f): @@ -324,8 +331,9 @@ def export_grainList(self, f, omeTol = self.index_opts.omeTol * d2r if sort: - loop_idx = numpy.argsort([self.grainList[i].completeness - for i in range(len(self.grainList))])[::-1] + loop_idx = numpy.argsort( + [grain.completeness for grain in self.grainList] + )[::-1] else: loop_idx = range(len(self.grainList)) pass @@ -336,10 +344,11 @@ def export_grainList(self, f, # grain = self.grainList[iG] print >> fid, '#####################\n# grain %d\n#' % (iG) - s1, s2, s3 = grain.findMatches(etaTol=etaTol, omeTol=omeTol, strainMag=dspTol, - updateSelf=True, claimingSpots=True, doFit=doFit, filename=fid) - print >> fid, '#\n# final completeness for grain %d: %g%%\n' % (iG, grain.completeness*100) + \ - '#####################\n' + s1, s2, s3 = grain.findMatches( + etaTol=etaTol, omeTol=omeTol, strainMag=dspTol, + updateSelf=True, claimingSpots=True, doFit=doFit, filename=fid) + print >> fid, '#\n# final completeness for grain %d: %g%%\n'\ + % (iG, grain.completeness*100) + '#####################\n' pass fid.close() @@ -351,7 +360,7 @@ def simulateGrain(self, vMat=numpy.r_[1., 1., 1., 0., 0., 0.], planeData=None, detector=None, - omegaRanges=[(-pi, pi),], + omegaRanges=[OMEGA_PERIOD], output=None): """ Simulate a grain with choice of active material @@ -368,8 +377,8 @@ def simulateGrain(self, elif isinstance(output, str): fid = open(output, 'w') else: - raise RuntimeError, "output must be a file object or string" - sg.findMatches(filename=output) + raise RuntimeError("output must be a file object or string") + sg.findMatches(filename=fid) return sg def _run_grainspotter(self): @@ -395,7 +404,7 @@ def _run_fiber_search(self): nCPUs=iopts.nCPUs, quitAfter=iopts.quitAfter, outputGrainList=True) - iopts._fitRMats = retval[0] # HUH?! + iopts._fitRMats = retval[0] # WTF!!! self.rMats = retval[0] self.grainList = retval[1] return @@ -410,6 +419,7 @@ def run_indexer(self): self._run_grainspotter() return + # # ==================== Spots # @@ -467,7 +477,10 @@ def spots_for_indexing(self): @property def raw_spots(self): - """(get-only) spots from image before culling and association with rings""" + """ + (get-only) spots from image before culling + and association with rings + """ if not hasattr(self, '_spots'): self._spots = [] return self._spots @@ -539,7 +552,7 @@ def newDetector(self, gp, dp): *dp* - initial distortion parameters """ - self._detInfo = DetectorInfo(gParms=gp, dParms=dp) + self._detInfo = DetectorInfo(gParms=gp, dParms=dp) return @@ -581,13 +594,14 @@ def loadDetector(self, fname): det_class_str = lines[i] f.seek(0) if det_class_str is None: - raise RuntimeError, "detector class label not recongined in file!" + raise RuntimeError("detector class label not recongized in file!") else: plist_rflags = numpy.loadtxt(f) plist = plist_rflags[:, 0] rflag = numpy.array(plist_rflags[:, 1], dtype=bool) - exec_str = "DC = detector." + det_class_str.split('.')[-1].split("'")[0] + exec_str = "DC = detector." + \ + det_class_str.split('.')[-1].split("'")[0] exec(exec_str) gp = plist[:6].tolist() @@ -595,19 +609,17 @@ def loadDetector(self, fname): dp = None else: dp = plist[6:].tolist() - self._detInfo = DetectorInfo(gParms=gp, dParms=dp) + self._detInfo = DetectorInfo(gParms=gp, dParms=dp) self.detector.setupRefinement(rflag) self._detInfo.refineFlags = rflag f.close() return - # # ==================== Calibration Input # # property: calInput - @property def calInput(self): """(get only) Calibration input instance""" @@ -616,7 +628,6 @@ def calInput(self): # ==================== Hydra # # property: hydra - @property def hydra(self): """(read only) hydra image class""" @@ -668,8 +679,7 @@ def _set_matList(self, v): return - matList = property(_get_matList, _set_matList, None, - "List of materials") + matList = property(_get_matList, _set_matList, None, "List of materials") @property def matNames(self): @@ -686,7 +696,7 @@ def newMaterial(self): self._active_mat = Material() # find name not already in list - n = self._active_mat.name + n = self._active_mat.name self._active_mat.name = newName(n, self.matNames) # self._matList.append(self.activeMaterial) @@ -774,9 +784,9 @@ def newReader(self): Changes name if necessary. """ - self.__active_rdr = ReaderInput() + self.__active_rdr = ReaderInput() # find name not already in list - n = self.__active_rdr.name + n = self.__active_rdr.name nl = [r.name for r in self.__savedReaders] self.__active_rdr.name = newName(n, nl) # @@ -787,7 +797,7 @@ def newReader(self): def getSavedReader(self, which): """Get a specified reader""" if isinstance(which, int): - return self.__savedReaders[v] + return self.__savedReaders[which] else: # which is a string for r in self.__savedReaders: @@ -812,6 +822,7 @@ def savedReaders(self): def readerNames(self): """Return list of saved readers""" return [r.name for r in self.__savedReaders] + # # ==================== Image Info # @@ -826,24 +837,15 @@ def numFramesTotal(self): return self.__numFrame @property - def activeImage(self): # to be removed (use active_img instead) + def activeImage(self): # to be removed (use active_img instead) """Active image""" return self.active_img - # - # ==================== Calibration - # - # property: calInput - @property - def calInput(self): - """(read only) Calibration input data""" - return self._calInput # # ========== Public Methods # def readerListAddCurrent(self): """Add current list to list of saved readers""" - return def readImage(self, frameNum=1): @@ -858,7 +860,7 @@ def readImage(self, frameNum=1): # Now read the current frame # aggMode = self.activeReader.aggModeOp - nrFrame = self.activeReader.getNumberOfFrames() # number of reader frames + nrFrame = self.activeReader.getNumberOfFrames() if aggMode: rdFrames = nrFrame self.__numFrame = 1 @@ -878,22 +880,21 @@ def readImage(self, frameNum=1): % (frameNum, nrFrame) raise ValueError(msg) - #if (frameNum == self.__curFrame): return + # if (frameNum == self.__curFrame): return # NOTE: instantiate new reader even when requested frame is current # frame because reader properties may have changed - if haveReader and (frameNum > self.__curFrame): nskip = frameNum - self.__curFrame - 1 - self.__active_img = self.__active_reader.read(nframes= rdFrames, - nskip = nskip, - sumImg = aggMode) + self.__active_img = self.__active_reader.read(nframes=rdFrames, + nskip=nskip, + sumImg=aggMode) else: # instantiate new reader self.__active_reader = self.activeReader.makeReader() nskip = frameNum - 1 - self.__active_img = self.__active_reader.read(nframes= rdFrames, - nskip = nskip, - sumImg = aggMode) + self.__active_img = self.__active_reader.read(nframes=rdFrames, + nskip=nskip, + sumImg=aggMode) pass @@ -904,7 +905,8 @@ def readImage(self, frameNum=1): return def calibrate(self, log=None): - """Calibrate the detector + """ + Calibrate the detector Currently, uses polar rebin only. """ @@ -924,22 +926,24 @@ def calibrate(self, log=None): log.write('done') return + # # ==================== Polar Rebinning (Caking) # def polarRebin(self, opts): - """Rebin the image according to certain parameters + """ + Rebin the image according to certain parameters opts -- an instance of PolarRebinOpts """ - - img_info = det.polarRebin(self.activeImage, opts.kwArgs) - + img_info = self.detector.polarRebin(self.activeImage, opts.kwArgs) return img_info # pass # end class # # -----------------------------------------------END CLASS: Experiment + + # ---------------------------------------------------CLASS: geReaderInput # class ReaderInput(object): @@ -991,6 +995,9 @@ def __init__(self, name='reader', desc='no description'): self.imageDir = '' self.imageNames = [] self.imageNameD = dict() + self.imageFmt = None + self.imageOpts = {} + self.pixelPitch = 0.2 # Dark file self.darkMode = ReaderInput.DARK_MODE_NONE self.darkDir = '' @@ -1005,7 +1012,6 @@ def __init__(self, name='reader', desc='no description'): def _check(self): """Check that input is ok for making a reader instance """ - # * Empty frames = 0 for single frame mode return # # ============================== API @@ -1091,63 +1097,12 @@ def getNumberOfFrames(self): return n def makeReader(self): - """Return a reader instance based on self -""" + """Return a reader instance based on self""" # check validity of input self._check() - # - # Set up image names in right format - # - fullPath = lambda fn: os.path.join(self.imageDir, fn) - numEmpty = lambda fn: self.imageNameD[fn][0] - imgInfo = [(fullPath(f), numEmpty(f)) for f in self.imageNames] - ref_reader = self.RC(imgInfo) - # - # Check for omega info - # - nfile = len(imgInfo) - dinfo = [self.imageNameD[f] for f in self.imageNames] - omin = dinfo[0][1] - if omin is not None: - odel = dinfo[nfile - 1][3] - print "omega min and delta: ", omin, odel - omargs = (valunits.valWUnit('omin', 'angle', float(omin), 'degrees'), - valunits.valWUnit('odel', 'angle', float(odel), 'degrees')) - else: - omargs = () - pass - print 'omargs: ', omargs - # - # Dark file - # - subDark = not (self.darkMode == ReaderInput.DARK_MODE_NONE) - if (self.darkMode == ReaderInput.DARK_MODE_FILE): - drkFile = os.path.join(self.darkDir, self.darkName) - elif (self.darkMode == ReaderInput.DARK_MODE_ARRAY): - drkFileName = os.path.join(self.darkDir, self.darkName) - drkFile = ref_reader.frame( - buffer=numpy.fromfile(drkFileName, - dtype=ref_reader.dtypeRead - ) - ) - else: - drkFile = None - pass - # - # Flip options - # - doFlip = not (self.flipMode == ReaderInput.FLIP_NONE) - flipArg = ReaderInput.FLIP_DICT[self.flipMode] - # - # Make the reader - # - print 'reader: \n', imgInfo, subDark, drkFile, doFlip, flipArg - r = self.RC(imgInfo, *omargs, - subtractDark = subDark, - dark = drkFile, - doFlip = doFlip, - flipArg = flipArg) + imagePath = os.path.join(self.imageDir, self.imageNames[0]) + r = self.RC(imagePath, fmt=self.imageFmt, pixelPitch=self.pixelPitch, **self.imageOpts) return r # diff --git a/hexrd/xrd/fitting.py b/hexrd/xrd/fitting.py index 8c2aafb7..baca376d 100644 --- a/hexrd/xrd/fitting.py +++ b/hexrd/xrd/fitting.py @@ -11,9 +11,9 @@ # # Please also see the file LICENSE. # -# This program is free software; you can redistribute it and/or modify it under the -# terms of the GNU Lesser General Public License (as published by the Free Software -# Foundation) version 2.1 dated February 1999. +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY @@ -28,36 +28,37 @@ import numpy as np -# try: -# from scipy.optimize import basinhopping -# except: -# from scipy.optimize import leastsq from scipy import optimize -return_value_flag = None from hexrd import matrixutil as mutil -from hexrd.xrd import transforms as xf +from hexrd.xrd import transforms as xf from hexrd.xrd import transforms_CAPI as xfcapi -from hexrd.xrd import distortion as dFuncs +from hexrd.xrd import distortion as dFuncs + -epsf = np.finfo(float).eps # ~2.2e-16 -sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 +return_value_flag = None +epsf = np.finfo(float).eps # ~2.2e-16 +sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 -# ###################################################################### -# Module Data +# ============================================================================= +# ############## MODULE PARAMETERS +# ============================================================================= + +# FIXME: pull these things from hexrd.constants instead d2r = np.pi/180. r2d = 180./np.pi -bVec_ref = xf.bVec_ref -eta_ref = xf.eta_ref -vInv_ref = np.r_[1., 1., 1., 0., 0., 0.] +bVec_ref = xf.bVec_ref +eta_ref = xf.eta_ref +vInv_ref = np.r_[1., 1., 1., 0., 0., 0.] # for distortion -dFunc_ref = dFuncs.GE_41RT +# FIXME: distortion implementation must change +dFunc_ref = dFuncs.GE_41RT dParams_ref = [0., 0., 0., 2., 2., 2] -dFlag_ref = np.array([0, 0, 0, 0, 0, 0], dtype=bool) -dScl_ref = np.array([1, 1, 1, 1, 1, 1], dtype=float) +dFlag_ref = np.array([0, 0, 0, 0, 0, 0], dtype=bool) +dScl_ref = np.array([1, 1, 1, 1, 1, 1], dtype=float) # for sx detector cal pFlag_ref = np.array( @@ -84,21 +85,21 @@ ) # for grain parameters -gFlag_ref = np.ones(12, dtype=bool) -gScl_ref = np.ones(12, dtype=bool) +gFlag_ref = np.ones(12, dtype=bool) +gScl_ref = np.ones(12, dtype=bool) + + +# ============================================================================= +# ############## UTILITY FUNCTIONS +# ============================================================================= -""" -###################################################################### -############## UTILITY FUNCTIONS ############## -###################################################################### -""" def matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, vInv=vInv_ref, beamVec=bVec_ref, etaVec=eta_ref, omePeriod=None): """ - For a given list of (x, y, ome) points, outputs the index into the results from - oscillAnglesOfHKLs, including the calculated omega values. + For a given list of (x, y, ome) points, outputs the index into the results + from oscillAnglesOfHKLs, including the calculated omega values. """ # get omegas for rMat_s calculation if omePeriod is not None: @@ -106,47 +107,55 @@ def matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, else: meas_omes = xyo_det[:, 2] - oangs0, oangs1 = xfcapi.oscillAnglesOfHKLs(hkls_idx.T, chi, rMat_c, bMat, wavelength, - vInv=vInv, - beamVec=beamVec, - etaVec=etaVec) + oangs0, oangs1 = xfcapi.oscillAnglesOfHKLs( + hkls_idx.T, chi, rMat_c, bMat, wavelength, + vInv=vInv, + beamVec=beamVec, + etaVec=etaVec) if np.any(np.isnan(oangs0)): + # debugging + # TODO: remove this + import pdb + pdb.set_trace() nanIdx = np.where(np.isnan(oangs0[:, 0]))[0] errorString = "Infeasible parameters for hkls:\n" for i in range(len(nanIdx)): errorString += "%d %d %d\n" % tuple(hkls_idx[:, nanIdx[i]]) - raise RuntimeError, errorString + raise RuntimeError(errorString) else: # CAPI version gives vstacked angles... must be (2, nhkls) calc_omes = np.vstack([oangs0[:, 2], oangs1[:, 2]]) if omePeriod is not None: - calc_omes = np.vstack([xf.mapAngle(oangs0[:, 2], omePeriod), - xf.mapAngle(oangs1[:, 2], omePeriod)]) + calc_omes = np.vstack([xf.mapAngle(oangs0[:, 2], omePeriod), + xf.mapAngle(oangs1[:, 2], omePeriod)]) # do angular difference - diff_omes = xf.angularDifference(np.tile(meas_omes, (2, 1)), calc_omes) + diff_omes = xf.angularDifference(np.tile(meas_omes, (2, 1)), calc_omes) match_omes = np.argsort(diff_omes, axis=0) == 0 - calc_omes = calc_omes.T.flatten()[match_omes.T.flatten()] + calc_omes = calc_omes.T.flatten()[match_omes.T.flatten()] return match_omes, calc_omes + def geomParamsToInput(wavelength, tiltAngles, chi, expMap_c, tVec_d, tVec_s, tVec_c, dParams): """ + helper routing to format data into parameter list for + calibrateDetectorFromSX """ p = np.zeros(17) - p[0] = wavelength - p[1] = tiltAngles[0] - p[2] = tiltAngles[1] - p[3] = tiltAngles[2] - p[4] = tVec_d[0] - p[5] = tVec_d[1] - p[6] = tVec_d[2] - p[7] = chi - p[8] = tVec_s[0] - p[9] = tVec_s[1] + p[0] = wavelength + p[1] = tiltAngles[0] + p[2] = tiltAngles[1] + p[3] = tiltAngles[2] + p[4] = tVec_d[0] + p[5] = tVec_d[1] + p[6] = tVec_d[2] + p[7] = chi + p[8] = tVec_s[0] + p[9] = tVec_s[1] p[10] = tVec_s[2] p[11] = expMap_c[0] p[12] = expMap_c[1] @@ -157,71 +166,75 @@ def geomParamsToInput(wavelength, return np.hstack([p, dParams]) + def inputToGeomParams(p): """ + helper routing for packing parameter list from calibrateDetectorFromSX + into a dictionary """ retval = {} retval['wavelength'] = p[0] retval['tiltAngles'] = (p[1], p[2], p[3]) - retval['tVec_d'] = np.c_[p[4], p[5], p[6]].T - retval['chi'] = p[7] - retval['tVec_s'] = np.c_[p[8], p[9], p[10]].T - retval['expMap_c'] = np.c_[p[11], p[12], p[13]].T - retval['tVec_c'] = np.c_[p[14], p[15], p[16]].T - retval['dParams'] = p[17:] + retval['tVec_d'] = np.c_[p[4], p[5], p[6]].T + retval['chi'] = p[7] + retval['tVec_s'] = np.c_[p[8], p[9], p[10]].T + retval['expMap_c'] = np.c_[p[11], p[12], p[13]].T + retval['tVec_c'] = np.c_[p[14], p[15], p[16]].T + retval['dParams'] = p[17:] return retval -""" -###################################################################### -############## CALIBRATION ############## -###################################################################### -""" +# ============================================================================= +# ############## CALIBRATION FUNCTIONS +# ============================================================================= + def calibrateDetectorFromSX( - xyo_det, hkls_idx, bMat, wavelength, - tiltAngles, chi, expMap_c, - tVec_d, tVec_s, tVec_c, - vInv=vInv_ref, - beamVec=bVec_ref, etaVec=eta_ref, - distortion=(dFunc_ref, dParams_ref, dFlag_ref, dScl_ref), - pFlag=pFlag_ref, pScl=pScl_ref, - omePeriod=None, - factor=0.1, - xtol=sqrt_epsf, ftol=sqrt_epsf, - ): + xyo_det, hkls_idx, bMat, wavelength, + tiltAngles, chi, expMap_c, + tVec_d, tVec_s, tVec_c, + vInv=vInv_ref, + beamVec=bVec_ref, etaVec=eta_ref, + distortion=(dFunc_ref, dParams_ref, dFlag_ref, dScl_ref), + pFlag=pFlag_ref, pScl=pScl_ref, + omePeriod=None, + factor=0.1, + xtol=sqrt_epsf, ftol=sqrt_epsf): """ """ if omePeriod is not None: xyo_det[:, 2] = xf.mapAngle(xyo_det[:, 2], omePeriod) - dFunc = distortion[0] + # FIXME: this format for distortion needs to go away ASAP + dFunc = distortion[0] dParams = distortion[1] - dFlag = distortion[2] - dScl = distortion[3] - - # p = np.zeros(17) - # - # p[0] = wavelength - # p[1] = tiltAngles[0] - # p[2] = tiltAngles[1] - # p[3] = tiltAngles[2] - # p[4] = tVec_d[0] - # p[5] = tVec_d[1] - # p[6] = tVec_d[2] - # p[7] = chi - # p[8] = tVec_s[0] - # p[9] = tVec_s[1] - # p[10] = tVec_s[2] - # p[11] = expMap_c[0] - # p[12] = expMap_c[1] - # p[13] = expMap_c[2] - # p[14] = tVec_c[0] - # p[15] = tVec_c[1] - # p[16] = tVec_c[2] - # - # pFull = np.hstack([p, dParams]) + dFlag = distortion[2] + dScl = distortion[3] + + """ + p = np.zeros(17) + + p[0] = wavelength + p[1] = tiltAngles[0] + p[2] = tiltAngles[1] + p[3] = tiltAngles[2] + p[4] = tVec_d[0] + p[5] = tVec_d[1] + p[6] = tVec_d[2] + p[7] = chi + p[8] = tVec_s[0] + p[9] = tVec_s[1] + p[10] = tVec_s[2] + p[11] = expMap_c[0] + p[12] = expMap_c[1] + p[13] = expMap_c[2] + p[14] = tVec_c[0] + p[15] = tVec_c[1] + p[16] = tVec_c[2] + + pFull = np.hstack([p, dParams]) + """ pFull = geomParamsToInput( wavelength, @@ -230,11 +243,12 @@ def calibrateDetectorFromSX( dParams ) - refineFlag = np.hstack([pFlag, dFlag]) - scl = np.hstack([pScl, dScl]) - pFit = pFull[refineFlag] - fitArgs = (pFull, pFlag, dFunc, dFlag, xyo_det, hkls_idx, - bMat, vInv, beamVec, etaVec, omePeriod) + # TODO: check scaling + refineFlag = np.array(np.hstack([pFlag, dFlag]), dtyp=bool) + scl = np.hstack([pScl, dScl]) + pFit = pFull[refineFlag] + fitArgs = (pFull, pFlag, dFunc, dFlag, xyo_det, hkls_idx, + bMat, vInv, beamVec, etaVec, omePeriod) results = optimize.leastsq(objFuncSX, pFit, args=fitArgs, diag=1./scl[refineFlag].flatten(), @@ -246,21 +260,26 @@ def calibrateDetectorFromSX( retval[refineFlag] = pFit_opt return retval + def objFuncSX(pFit, pFull, pFlag, dFunc, dFlag, xyo_det, hkls_idx, bMat, vInv, bVec, eVec, omePeriod, simOnly=False, return_value_flag=return_value_flag): """ """ - npts = len(xyo_det) + npts = len(xyo_det) - refineFlag = np.hstack([pFlag, dFlag]) + refineFlag = np.array(np.hstack([pFlag, dFlag]), dtype=bool) + print refineFlag # pFull[refineFlag] = pFit/scl[refineFlag] pFull[refineFlag] = pFit - dParams = pFull[-len(dFlag):] - xy_unwarped = dFunc(xyo_det[:, :2], dParams) + if dFunc is not None: + dParams = pFull[-len(dFlag):] + xys = dFunc(xyo_det[:, :2], dParams) + else: + xys = xyo_det[:, :2] # detector quantities wavelength = pFull[0] @@ -269,21 +288,29 @@ def objFuncSX(pFit, pFull, pFlag, dFunc, dFlag, tVec_d = pFull[4:7].reshape(3, 1) # sample quantities - chi = pFull[7] + chi = pFull[7] tVec_s = pFull[8:11].reshape(3, 1) # crystal quantities rMat_c = xf.makeRotMatOfExpMap(pFull[11:14]) tVec_c = pFull[14:17].reshape(3, 1) + # stretch tensor comp matrix from MV notation in SAMPLE frame + vMat_s = mutil.vecMVToSymm(vInv) + + # g-vectors: + # 1. calculate full g-vector components in CRYSTAL frame from B + # 2. rotate into SAMPLE frame and apply stretch + # 3. rotate back into CRYSTAL frame and normalize to unit magnitude + # IDEA: make a function for this sequence of operations with option for + # choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB) gVec_c = np.dot(bMat, hkls_idx) - vMat_s = mutil.vecMVToSymm(vInv) # stretch tensor comp matrix from MV notation in SAMPLE frame - gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) # reciprocal lattice vectors in SAMPLE frame - gHat_s = mutil.unitVector(gVec_s) # unit reciprocal lattice vectors in SAMPLE frame - gHat_c = np.dot(rMat_c.T, gHat_s) # unit reciprocal lattice vectors in CRYSTAL frame + gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) + gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s)) - match_omes, calc_omes = matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, - vInv=vInv, beamVec=bVec, etaVec=eVec, omePeriod=omePeriod) + match_omes, calc_omes = matchOmegas( + xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, + vInv=vInv, beamVec=bVec, etaVec=eVec, omePeriod=omePeriod) calc_xy = np.zeros((npts, 2)) for i in range(npts): @@ -294,64 +321,58 @@ def objFuncSX(pFit, pFull, pFlag, dFunc, dFlag, beamVec=bVec).flatten() pass if np.any(np.isnan(calc_xy)): - print "infeasible pFull: may want to scale back finite difference step size" + raise RuntimeError( + "infeasible pFull: may want to scale" + + "back finite difference step size") # return values if simOnly: + # return simulated values retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)]) else: - diff_vecs_xy = calc_xy - xy_unwarped[:, :2] - diff_ome = xf.angularDifference( calc_omes, xyo_det[:, 2] ) + # return residual vector + # IDEA: try angles instead of xys? + diff_vecs_xy = calc_xy - xys[:, :2] + diff_ome = xf.angularDifference(calc_omes, xyo_det[:, 2]) retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1) ]).flatten() if return_value_flag == 1: - retval = sum( abs(retval) ) + # return scalar sum of squared residuals + retval = sum(abs(retval)) elif return_value_flag == 2: + # return DOF-normalized chisq + # TODO: check this calculation denom = npts - len(pFit) - 1. if denom != 0: nu_fac = 1. / denom else: nu_fac = 1. nu_fac = 1 / (npts - len(pFit) - 1.) - retval = nu_fac * sum(retval**2 / abs(np.hstack([calc_xy, calc_omes.reshape(npts, 1)]).flatten())) + retval = nu_fac * sum(retval**2) return retval -""" -###################################################################### -############## GRAIN FITTING ############## -###################################################################### -""" - -def fitGrain(xyo_det, hkls_idx, bMat, wavelength, - detectorParams, - expMap_c, tVec_c, vInv, - beamVec=bVec_ref, etaVec=eta_ref, - distortion=(dFunc_ref, dParams_ref), +# ============================================================================= +# ############## GRAIN FITTING FUNCTIONS +# ============================================================================= + + +def fitGrain(gFull, instrument, reflections_dict, + bMat, wavelength, gFlag=gFlag_ref, gScl=gScl_ref, omePeriod=None, factor=0.1, xtol=sqrt_epsf, ftol=sqrt_epsf): """ """ + # FIXME: will currently fail if omePeriod is specifed if omePeriod is not None: - xyo_det[:, 2] = xf.mapAngle(xyo_det[:, 2], omePeriod) - - dFunc = distortion[0] - dParams = distortion[1] - - gFull = np.hstack([expMap_c.flatten(), - tVec_c.flatten(), - vInv.flatten()]) - - gFit = gFull[gFlag] + # xyo_det[:, 2] = xf.mapAngle(xyo_det[:, 2], omePeriod) + raise(RuntimeError, "ome period must be specified") - fitArgs = (gFull, gFlag, - detectorParams, - xyo_det, hkls_idx, bMat, wavelength, - beamVec, etaVec, - dFunc, dParams, - omePeriod) + gFit = gFull[gFlag] + fitArgs = (gFull, gFlag, instrument, reflections_dict, + bMat, wavelength, omePeriod) results = optimize.leastsq(objFuncFitGrain, gFit, args=fitArgs, diag=1./gScl[gFlag].flatten(), factor=0.1, xtol=xtol, ftol=ftol) @@ -362,13 +383,14 @@ def fitGrain(xyo_det, hkls_idx, bMat, wavelength, retval[gFlag] = gFit_opt return retval + def objFuncFitGrain(gFit, gFull, gFlag, - detectorParams, - xyo_det, hkls_idx, bMat, wavelength, - bVec, eVec, - dFunc, dParams, + instrument, + reflections_dict, + bMat, wavelength, omePeriod, - simOnly=False, return_value_flag=return_value_flag): + simOnly=False, + return_value_flag=return_value_flag): """ gFull[0] = expMap_c[0] gFull[1] = expMap_c[1] @@ -383,72 +405,152 @@ def objFuncFitGrain(gFit, gFull, gFlag, gFull[10] = vInv_MV[4] gFull[11] = vInv_MV[5] - detectorParams[0] = tiltAngles[0] - detectorParams[1] = tiltAngles[1] - detectorParams[2] = tiltAngles[2] - detectorParams[3] = tVec_d[0] - detectorParams[4] = tVec_d[1] - detectorParams[5] = tVec_d[2] - detectorParams[6] = chi - detectorParams[7] = tVec_s[0] - detectorParams[8] = tVec_s[1] - detectorParams[9] = tVec_s[2] + OLD CALL + objFuncFitGrain(gFit, gFull, gFlag, + detectorParams, + xyo_det, hkls_idx, bMat, wavelength, + bVec, eVec, + dFunc, dParams, + omePeriod, + simOnly=False, return_value_flag=return_value_flag) """ - npts = len(xyo_det) - - gFull[gFlag] = gFit - xy_unwarped = dFunc(xyo_det[:, :2], dParams) + bVec = instrument.beam_vector + eVec = instrument.eta_vector - rMat_d = xfcapi.makeDetectorRotMat(detectorParams[:3]) - tVec_d = detectorParams[3:6].reshape(3, 1) - chi = detectorParams[6] - tVec_s = detectorParams[7:10].reshape(3, 1) + # fill out parameters + gFull[gFlag] = gFit + # map parameters to functional arrays rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3]) tVec_c = gFull[3:6].reshape(3, 1) vInv_s = gFull[6:] - vMat_s = mutil.vecMVToSymm(vInv_s) # NOTE: Inverse of V from F = V * R - - gVec_c = np.dot(bMat, hkls_idx) # gVecs with magnitudes in CRYSTAL frame - gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) # stretched gVecs in SAMPLE frame - gHat_c = mutil.unitVector( - np.dot(rMat_c.T, gVec_s)) # unit reciprocal lattice vectors in CRYSTAL frame + vMat_s = mutil.vecMVToSymm(vInv_s) # NOTE: Inverse of V from F = V * R + + # loop over instrument panels + # CAVEAT: keeping track of key ordering in the "detectors" attribute of + # instrument here because I am not sure if instatiating them using + # dict.fromkeys() preserves the same order if using iteration... + # + calc_omes_dict = dict.fromkeys(instrument.detectors, []) + calc_xy_dict = dict.fromkeys(instrument.detectors) + meas_xyo_all = [] + det_keys_ordered = [] + for det_key, panel in instrument.detectors.iteritems(): + det_keys_ordered.append(det_key) + + # extract transformation quantities + rMat_d = instrument.detectors[det_key].rmat + tVec_d = instrument.detectors[det_key].tvec + chi = instrument.chi + tVec_s = instrument.tvec + + results = reflections_dict[det_key] + if len(results) == 0: + continue + + """ + extract data from results list + fields: + refl_id, gvec_id, hkl, sum_int, max_int, pred_ang, meas_ang, meas_xy + """ + + # WARNING: hkls and derived vectors below must be columnwise; + # strictly necessary??? change affected APIs instead? + # + hkls = np.atleast_2d( + np.vstack([x[2] for x in results]) + ).T + + meas_xyo = np.atleast_2d( + np.vstack([np.r_[x[7], x[6][-1]] for x in results]) + ) - match_omes, calc_omes = matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, - vInv=vInv_s, beamVec=bVec, etaVec=eVec, - omePeriod=omePeriod) + # FIXME: distortion handling must change to class-based + if panel.distortion is not None: + meas_omes = meas_xyo[:, 2] + xy_unwarped = panel.distortion[0]( + meas_xyo[:, :2], panel.distortion[1]) + meas_xyo = np.vstack([xy_unwarped.T, meas_omes]).T + pass + + # append to meas_omes + meas_xyo_all.append(meas_xyo) + + # G-vectors: + # 1. calculate full g-vector components in CRYSTAL frame from B + # 2. rotate into SAMPLE frame and apply stretch + # 3. rotate back into CRYSTAL frame and normalize to unit magnitude + # IDEA: make a function for this sequence of operations with option for + # choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB) + gVec_c = np.dot(bMat, hkls) + gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) + gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s)) + + # !!!: check that this operates on UNWARPED xy + match_omes, calc_omes = matchOmegas( + meas_xyo, hkls, chi, rMat_c, bMat, wavelength, + vInv=vInv_s, beamVec=bVec, etaVec=eVec, + omePeriod=omePeriod) + + # append to omes dict + calc_omes_dict[det_key] = calc_omes + + # TODO: try Numba implementations + rMat_s = xfcapi.makeOscillRotMatArray(chi, calc_omes) + calc_xy = xfcapi.gvecToDetectorXYArray(gHat_c.T, + rMat_d, rMat_s, rMat_c, + tVec_d, tVec_s, tVec_c, + beamVec=bVec) + + # append to xy dict + calc_xy_dict[det_key] = calc_xy + pass - rMat_s = xfcapi.makeOscillRotMatArray(chi, calc_omes) - calc_xy = xfcapi.gvecToDetectorXYArray(gHat_c.T, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec=bVec) + # stack results to concatenated arrays + calc_omes_all = np.hstack([calc_omes_dict[k] for k in det_keys_ordered]) + tmp = [] + for k in det_keys_ordered: + if calc_xy_dict[k] is not None: + tmp.append(calc_xy_dict[k]) + calc_xy_all = np.vstack(tmp) + meas_xyo_all = np.vstack(meas_xyo_all) + npts = len(meas_xyo_all) if np.any(np.isnan(calc_xy)): - print "infeasible pFull" + raise RuntimeError( + "infeasible pFull: may want to scale" + + "back finite difference step size") # return values if simOnly: - retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)]) + # return simulated values + if return_value_flag in [None, 1]: + retval = np.hstack([calc_xy_all, calc_omes_all.reshape(npts, 1)]) + else: + rd = dict.fromkeys(det_keys_ordered) + for det_key in det_keys_ordered: + rd[det_key] = {'calc_xy': calc_xy_dict[det_key], + 'calc_omes': calc_omes_dict[det_key]} + retval = rd else: - diff_vecs_xy = calc_xy - xy_unwarped[:, :2] - diff_ome = xf.angularDifference( calc_omes, xyo_det[:, 2] ) + # return residual vector + # IDEA: try angles instead of xys? + diff_vecs_xy = calc_xy_all - meas_xyo_all[:, :2] + diff_ome = xf.angularDifference(calc_omes_all, meas_xyo_all[:, 2]) retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1) ]).flatten() if return_value_flag == 1: - retval = sum( abs(retval) ) + # return scalar sum of squared residuals + retval = sum(abs(retval)) elif return_value_flag == 2: - denom = npts - len(gFit) - 1. + # return DOF-normalized chisq + # TODO: check this calculation + denom = 3*npts - len(gFit) - 1. if denom != 0: nu_fac = 1. / denom else: nu_fac = 1. - retval = nu_fac * sum(retval**2 / abs(np.hstack([calc_xy, calc_omes.reshape(npts, 1)]).flatten())) + retval = nu_fac * sum(retval**2) return retval - -# def accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old): -# """ -# """ -# return not np.any(np.isnan(f_new)) diff --git a/hexrd/xrd/image_io.py b/hexrd/xrd/image_io.py new file mode 100644 index 00000000..f68b255d --- /dev/null +++ b/hexrd/xrd/image_io.py @@ -0,0 +1,425 @@ +"""Image reading (mostly) and writing + +Classes +------- +Framer2DRC: base class for reader/writers +ReadGeneric: +ReadGE: + +ThreadReadFrame: class for using threads to read frames + +Functions +--------- +newGenericReader - returns a reader instance + +""" +import copy +import os +import time +import logging +import warnings + +import numpy as np + +from hexrd import imageseries + +import detector + +#logging.basicConfig(level=logging.WARNING) +warnings.filterwarnings('always', '', DeprecationWarning) + +class ReaderDeprecationWarning(DeprecationWarning): + """Warnings on use of old reader features""" + def __init__(self, value): + self.value = value + def __str__(self): + return repr(self.value) + + + +class _OmegaImageSeries(object): + """Facade for frame_series class, replacing other readers, primarily ReadGE""" + OMEGA_TAG = 'omega' + + def __init__(self, ims, fmt='hdf5', **kwargs): + """Initialize frame readerOmegaFrameReader + + *ims* is either an imageseries instance or a filename + *fmt* is the format to be passed to imageseries.open() + *kwargs* is the option list to be passed to imageseries.open() + + NOTES: + * The shape returned from imageseries is cast to int from numpy.uint64 + to allow for addition of indices with regular ints + """ + if isinstance(ims, imageseries.imageseriesabc.ImageSeriesABC): + self._imseries = ims + else: + self._imseries = imageseries.open(ims, fmt, **kwargs) + self._nframes = len(self._imseries) + self._shape = self._imseries.shape + self._meta = self._imseries.metadata + + if self.OMEGA_TAG not in self._meta: + #raise ImageIOError('No omega data found in data file') + pass + + def __getitem__(self, k): + return self._imseries[k] + + @property + def nframes(self): + """(get-only) number of frames""" + return self._nframes + + @property + def nrows(self): + """(get-only) number of rows""" + return self._shape[0] + + @property + def ncols(self): + """(get-only) number of columns""" + return self._shape[1] + + @property + def omega(self): + """ (get-only) array of omega begin/end per frame""" + if self.OMEGA_TAG in self._meta: + return self._meta[self.OMEGA_TAG] + else: + return np.zeros((self.nframes,2)) + + + +class Framer2DRC(object): + """Base class for readers. + """ + def __init__(self, + ncols, nrows, pixelPitch=0.2, + dtypeDefault='int16', + dtypeRead='uint16', + dtypeFloat='float64'): + self._nrows = nrows + self._ncols = ncols + self._pixelPitch = pixelPitch + self.__frame_dtype_dflt = dtypeDefault + self.__frame_dtype_read = dtypeRead + self.__frame_dtype_float = dtypeFloat + + self.__nbytes_frame = np.nbytes[dtypeRead]*nrows*ncols + + return + + def get_nrows(self): + return self._nrows + nrows = property(get_nrows, None, None) + + def get_ncols(self): + return self._ncols + ncols = property(get_ncols, None, None) + + def get_pixelPitch(self): + return self._pixelPitch + pixelPitch = property(get_pixelPitch, None, None) + + def get_nbytesFrame(self): + return self.__nbytes_frame + nbytesFrame = property(get_nbytesFrame, None, None) + + def get_dtypeDefault(self): + return self.__frame_dtype_dflt + dtypeDefault = property(get_dtypeDefault, None, None) + + def get_dtypeRead(self): + return self.__frame_dtype_read + dtypeRead = property(get_dtypeRead, None, None) + + def get_dtypeFloat(self): + return self.__frame_dtype_float + dtypeFloat = property(get_dtypeFloat, None, None) + + def getEmptyMask(self): + """convenience method for getting an emtpy mask""" + # this used to be a class method + return np.zeros([self.nrows, self.ncols], dtype=bool) + +class OmegaFramer(object): + """Omega information associated with frame numbers""" + def __init__(self, omegas): + """Initialize omega ranges + + *omegas* is nframes x 2 + + Could check for monotonicity. + """ + self._omegas = omegas + self._ombeg = omegas[0, :] + self._omend = omegas[1, :] + self._omean = omegas.mean(axis=0) + self._odels = omegas[:, 1] - omegas[:, 0] + self._delta = self._odels[0] + self._orange = np.hstack((omegas[:, 0], omegas[-1, 1])) + + return + + def getDeltaOmega(self, nframes=1): + """change in omega over n-frames, assuming constant delta""" + return nframes*(self._delta) + + def getOmegaMinMax(self): + return self._ombeg, self._omend + + def frameToOmega(self, frame): + """can frame be nonintegral? round to int ... """ + return self._omean[frame] + + def omegaToFrame(self, omega): + return np.searchsorted(self._orange) - 1 + + + def omegaToFrameRange(self, omega): + # note: old code assumed single delta omega + return omeToFrameRange(omega, self._omean, self._delta) + + +class ReadGeneric(Framer2DRC, OmegaFramer): + """Generic reader with omega information +""" + def __init__(self, filename, ncols, nrows, *args, **kwargs): + + Framer2DRC.__init__(self, ncols, nrows, **kwargs) + return + + def read(self, nskip=0, nframes=1, sumImg=False): + """ + sumImg can be set to True or to something like numpy.maximum + """ + raise RuntimeError("Generic reader not available for reading") + + def getNFrames(self): + return 0 + + + def getWriter(self, filename): + return None + +class ReadGE(Framer2DRC,OmegaFramer): + """General reader for omega scans + + Originally, this was for reading GE format images, but this is now + a general reader accessing the OmegaFrameReader facade class. The main + functionality to read a sequence of images with associated omega ranges. + + ORIGINAL DOCS + ============= + + *) In multiframe images where background subtraction is requested but no + dark is specified, attempts to use the + empty frame(s). An error is returned if there are not any specified. + If there are multiple empty frames, the average is used. + + """ + def __init__(self, file_info, *args, **kwargs): + """Initialize the reader + + *file_info* is now just the filename or an existing omegaimageseries + *kwargs* is a dictionary + keys include: 'fmt' which provides imageseries format + other keys depend on the format + + Of original kwargs, only using "mask" + """ + self._fname = file_info + self._kwargs = kwargs + self._format = kwargs.pop('fmt', None) + self._nrows = detector.NROWS + self._ncols = detector.NCOLS + self._pixelPitch = detector.PIXEL + pp_key = 'pixelPitch' + if kwargs.has_key(pp_key): + self._pixelPitch = kwargs[pp_key] + try: + self._omis = _OmegaImageSeries(file_info, fmt=self._format, **kwargs) + Framer2DRC.__init__(self, self._omis.nrows, self._omis.ncols) + # note: Omegas are expected in radians, but input in degrees + OmegaFramer.__init__(self, (np.pi/180.)*self._omis.omega) + except(TypeError, IOError): + logging.info('READGE initializations failed') + if file_info is not None: raise + self._omis = None + except ImageIOError: + self._omis = None + pass + self.mask = None + + + # counter for last global frame that was read + self.iFrame = -1 + + return + + + def __call__(self, *args, **kwargs): + return self.read(*args, **kwargs) + + @classmethod + def makeNew(cls): + """return another copy of this reader""" + raise NotImplementedError('this method to be removed') + return None + + def getWriter(self, filename): + return None + + def getNFrames(self): + """number of total frames with real data, not number remaining""" + return self._omis.nframes + + def getFrameOmega(self, iFrame=None): + """if iFrame is none, use internal counter""" + if iFrame is None: + iFrame = self.iFrame + if hasattr(iFrame, '__len__'): + # in case last read was multiframe + oms = [self.frameToOmega(frm) for frm in iFrame] + retval = np.mean(np.asarray(oms)) + else: + retval = self.frameToOmega(iFrame) + return retval + + + def readBBox(self, bbox, raw=True, doFlip=None): + """ + with raw=True, read more or less raw data, with bbox = [(iLo,iHi),(jLo,jHi),(fLo,fHi)] + + """ + # implement in OmegaFrameReader + nskip = bbox[2][0] + bBox = np.array(bbox) + sl_i = slice(*bBox[0]) + sl_j = slice(*bBox[1]) + 'plenty of performance optimization might be possible here' + if raw: + retval = np.empty( tuple(bBox[:,1] - bBox[:,0]), dtype=self.__frame_dtype_read ) + else: + retval = np.empty( tuple(bBox[:,1] - bBox[:,0]), dtype=self.__frame_dtype_dflt ) + for iFrame in range(retval.shape[2]): + thisframe = reader.read(nskip=nskip) + nskip = 0 + retval[:,:,iFrame] = copy.deepcopy(thisframe[sl_i, sl_j]) + return retval + + def getDark(self): + return 0 + + def indicesToMask(self, indices): + """Create mask from list of indices + + Indices can be a list of indices, as from makeIndicesTThRanges + """ + mask = self.getEmptyMask() + if hasattr(indices,'__len__'): + for indThese in indices: + mask[indThese] = True + else: + mask[indices] = True + return mask + + def read(self, nskip=0, nframes=1, sumImg=False): + """Read one or more frames, possibly operating on them + + This returns a single frame is nframes is 1, multiple + frames if nframes > 1 with sumImg off, or a single frame + resulting from some operation on the multiple frames if + sumImg is true or a function. + + *sumImg* can be set to True or to a function of two frames like numpy.maximum + *nskip* applies only to the first frame + """ + self.iFrame = np.atleast_1d(self.iFrame)[-1] + nskip + + multiframe = nframes > 1 + sumimg_callable = hasattr(sumImg, '__call__') + + if not multiframe: + self.iFrame += 1 + img = self._omis[self.iFrame] + if self.mask is not None: + img[self.mask] = 0 + return img + + # multiframe case + self.iFrame = self.iFrame + 1 + range(nframes) + + if not sumImg: + # return multiple frames + imgs = self._omis[self.iFrame] + for i in range(nframes): + if self.mask is not None: + imgs[i, self.mask] = 0 + return imgs + + # Now, operate on frames consecutively + op = sumImg if sumimg_callable else np.add + + ifrm = self.iFrame[0] + + img = self._omis[ifrm] + for i in range(1, nframes): + ifrm += 1 + img = op(img, self._omis[ifrm]) + if not sumimg_callable: + img = img * (1.0/nframes) + + if self.mask is not None: + img[self.mask] = 0 + + # reset iframe to single value of last frame read + self.iFrame = self.iFrame[-1] + if self.iFrame + 1 == self.getNFrames: + self.iFrame = -1 + + return img + + def close(self): + return + + @classmethod + def display(cls, + thisframe, + roi = None, + pw = None, + **kwargs + ): + warnings.warn('display method on readers no longer implemented', + ReaderDeprecationWarning) + +# +# Module functions +# +def omeToFrameRange(omega, omegas, omegaDelta): + """ + check omega range for the frames in + stead of omega center; + result can be a pair of frames if the specified omega is + exactly on the border + """ + retval = np.where(np.abs(omegas - omega) <= omegaDelta*0.5)[0] + return retval + +def newGenericReader(ncols, nrows, *args, **kwargs): + """ Currently just returns a Framer2DRC + """ + + # retval = Framer2DRC(ncols, nrows, **kwargs) + filename = kwargs.pop('filename', None) + retval = ReadGeneric(filename, ncols, nrows, *args, **kwargs) + + return retval + +class ImageIOError(Exception): + def __init__(self, value): + self.value = value + def __str__(self): + return repr(self.value) diff --git a/hexrd/xrd/indexer.py b/hexrd/xrd/indexer.py index a7214666..d865ed20 100644 --- a/hexrd/xrd/indexer.py +++ b/hexrd/xrd/indexer.py @@ -1,12 +1,12 @@ #! /usr/bin/env python -# ============================================================ +# ============================================================================= # Copyright (c) 2012, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # Written by Joel Bernier and others. # LLNL-CODE-529294. # All rights reserved. # -# This file is part of HExrd. For details on dowloading the source, +# This file is part of HEXRD. For details on dowloading the source, # see the file COPYING. # # Please also see the file LICENSE. @@ -24,44 +24,45 @@ # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA or visit . -# ============================================================ +# ============================================================================= +from __future__ import print_function + import sys import os import copy import ctypes -import tempfile -import glob import logging import time -import pdb import numpy as num -#num.seterr(invalid='ignore') +# num.seterr(invalid='ignore') import hexrd.matrixutil as mUtil - -from hexrd.xrd.grain import Grain, makeMeasuredScatteringVectors +from hexrd import constants as const +from hexrd import USE_NUMBA +from hexrd.xrd.grain import Grain, makeMeasuredScatteringVectors from hexrd.xrd.rotations import \ discreteFiber, mapAngle, \ quatOfRotMat, quatProductMatrix, \ rotMatOfExpMap, rotMatOfQuat -from hexrd.xrd.symmetry import toFundamentalRegion -from hexrd.xrd import xrdbase - -from hexrd.xrd import transforms as xf +from hexrd.xrd.symmetry import toFundamentalRegion +from hexrd.xrd import transforms as xf from hexrd.xrd import transforms_CAPI as xfcapi -from hexrd import USE_NUMBA +from hexrd.xrd import xrdbase +# FIXME: numba implementation of paintGridThis is broken if USE_NUMBA: import numba if xrdbase.haveMultiProc: - multiprocessing = xrdbase.multiprocessing # formerly import + multiprocessing = xrdbase.multiprocessing # formerly import +# ============================================================================= +# MODULE PARAMETERS +# ============================================================================= logger = logging.getLogger(__name__) - # module vars piby2 = num.pi * 0.5 r2d = 180. / num.pi @@ -73,17 +74,25 @@ fableSampCOB = num.dot(rotMatOfExpMap(piby2*Zl), rotMatOfExpMap(piby2*Yl)) + +# ============================================================================= +# CLASSES +# ============================================================================= + + class GrainSpotter: """ Interface to grain spotter, which must be in the user's path """ __execName = 'grainspotter' + def __init__(self): self.__tempFNameList = [] if (os.system('which '+self.__execName) != 0): - print >> sys.stderr, "need %s to be in the path" % (self.__execName) - raise RuntimeError, "unrecoverable error" + print("need %s to be in the path" % (self.__execName), + file=sys.stderr) + raise RuntimeError("unrecoverable error") return @@ -97,7 +106,7 @@ def __call__(self, spotsArray, **kwargs): location = self.__class__.__name__ tic = time.time() - phaseID = None + phaseID = None gVecFName = 'tmp' kwarglen = len(kwargs) @@ -110,13 +119,13 @@ def __call__(self, spotsArray, **kwargs): gVecFName = kwargs[argkeys[i]] planeData = spotsArray.getPlaneData(phaseID=phaseID) - U0 = planeData.latVecOps['U0'] - symTag = planeData.getLaueGroup() + U0 = planeData.latVecOps['U0'] + symTag = planeData.getLaueGroup() writeGVE(spotsArray, gVecFName, **kwargs) toc = time.time() - print 'in %s, setup took %g' % (location, toc-tic) + print('in %s, setup took %g' % (location, toc - tic)) tic = time.time() # tempFNameStdout = tempfile.mktemp() @@ -128,7 +137,7 @@ def __call__(self, spotsArray, **kwargs): grainSpotterCmd = '%s %s' % (self.__execName, gVecFName+'.ini') os.system(grainSpotterCmd) toc = time.time() - print 'in %s, execution took %g' % (location, toc-tic) + print('in %s, execution took %g' % (location, toc - tic)) tic = time.time() # add output files to cleanup list @@ -139,13 +148,13 @@ def __call__(self, spotsArray, **kwargs): gffData = num.loadtxt(gffFile) if gffData.ndim == 1: gffData = gffData.reshape(1, len(gffData)) - gffData_U = gffData[:,6:6+9] + gffData_U = gffData[:, 6:6+9] # process for output retval = convertUToRotMat(gffData_U, U0, symTag=symTag) toc = time.time() - print 'in %s, post-processing took %g' % (location, toc-tic) + print('in %s, post-processing took %g' % (location, toc - tic)) tic = time.time() return retval @@ -153,11 +162,13 @@ def __call__(self, spotsArray, **kwargs): def __del__(self): self.cleanup() return + def cleanup(self): for fname in self.__tempFNameList: os.remove(fname) return + def convertUToRotMat(Urows, U0, symTag='Oh', display=False): """ Takes GrainSpotter gff ouput in rows @@ -176,7 +187,7 @@ def convertUToRotMat(Urows, U0, symTag='Oh', display=False): "input must have 9 columns; received %d" % (testDim) ) - qin = quatOfRotMat(Urows.reshape(numU, 3, 3)) + qin = quatOfRotMat(Urows.reshape(numU, 3, 3)) # what the hell is happening here?: qout = num.dot( quatProductMatrix(quatOfRotMat(fableSampCOB), mult='left'), @@ -192,13 +203,14 @@ def convertUToRotMat(Urows, U0, symTag='Oh', display=False): else: qout = toFundamentalRegion(qout, crysSym=symTag, sampSym=None) if display: - print "quaternions in (Fable convention):" - print qin.T - print "quaternions out (hexrd convention, symmetrically reduced)" - print qout.T + print("quaternions in (Fable convention):") + print(qin.T) + print("quaternions out (hexrd convention, symmetrically reduced)") + print(qout.T) Uout = rotMatOfQuat(qout) return Uout + def convertRotMatToFableU(rMats, U0=num.eye(3), symTag='Oh', display=False): """ Makes GrainSpotter gff ouput @@ -210,9 +222,7 @@ def convertRotMatToFableU(rMats, U0=num.eye(3), symTag='Oh', display=False): Urows comes from grainspotter's gff output U0 comes from xrd.crystallography.latticeVectors.U0 """ - numU = num.shape(num.atleast_3d(rMats))[0] - - qin = quatOfRotMat(num.atleast_3d(rMats)) + qin = quatOfRotMat(num.atleast_3d(rMats)) # what the hell is this?: qout = num.dot( quatProductMatrix(quatOfRotMat(fableSampCOB.T), mult='left'), @@ -227,31 +237,37 @@ def convertRotMatToFableU(rMats, U0=num.eye(3), symTag='Oh', display=False): else: qout = toFundamentalRegion(qout, crysSym=symTag, sampSym=None) if display: - print "quaternions in (hexrd convention):" - print qin.T - print "quaternions out (Fable convention, symmetrically reduced)" - print qout.T + print("quaternions in (hexrd convention):") + print(qin.T) + print("quaternions out (Fable convention, symmetrically reduced)") + print(qout.T) Uout = rotMatOfQuat(qout) return Uout -###################################################################### -""" -things for doing fiberSearch with multiprocessing; -multiprocessing has a hard time pickling a function defined in the local scope -of another function, so stuck putting the function out here; -""" +# ============================================================================= +# FIBERSEARCH +# +# things for doing fiberSearch with multiprocessing; +# multiprocessing has a hard time pickling a function defined in the local +# scope of another function, so stuck putting the function out here. +# +# !!!: deprecated +# ============================================================================= + debugMultiproc = 0 if xrdbase.haveMultiProc: foundFlagShared = multiprocessing.Value(ctypes.c_bool) foundFlagShared.value = False -multiProcMode_MP = None -spotsArray_MP = None -candidate_MP = None -dspTol_MP = None +multiProcMode_MP = None +spotsArray_MP = None +candidate_MP = None +dspTol_MP = None minCompleteness_MP = None -doRefinement_MP = None -nStdDev_MP = None +doRefinement_MP = None +nStdDev_MP = None + + def testThisQ(thisQ): """ NOTES: @@ -270,19 +286,17 @@ def testThisQ(thisQ): global doRefinement_MP global nStdDev_MP # assign locals - multiProcMode = multiProcMode_MP - spotsArray = spotsArray_MP - candidate = candidate_MP - dspTol = dspTol_MP + multiProcMode = multiProcMode_MP + candidate = candidate_MP + dspTol = dspTol_MP minCompleteness = minCompleteness_MP - doRefinement = doRefinement_MP - nStdDev = nStdDev_MP - nSigmas = 2 # ... make this a settable option? + doRefinement = doRefinement_MP + nStdDev = nStdDev_MP if multiProcMode: global foundFlagShared foundGrainData = None - #print "testing %d of %d"% (iR+1, numTrials) + # print("testing %d of %d"% (iR + 1, numTrials)) thisRMat = rotMatOfQuat(thisQ) ppfx = '' @@ -295,34 +309,36 @@ def testThisQ(thisQ): but skip evaluations after an acceptable grain has been found """ if debugMultiproc > 1: - print ppfx+'skipping on '+str(thisQ) + print(ppfx + 'skipping on ' + str(thisQ)) return foundGrainData else: if debugMultiproc > 1: - print ppfx+'working on '+str(thisQ) + print(ppfx + 'working on ' + str(thisQ)) candidate.findMatches(rMat=thisRMat, strainMag=dspTol, claimingSpots=False, testClaims=True, updateSelf=True) if debugMultiproc > 1: - print ppfx+' for '+str(thisQ)+' got completeness : '\ - +str(candidate.completeness) + print(ppfx + ' for ' + str(thisQ) + ' got completeness : ' + + str(candidate.completeness)) if candidate.completeness >= minCompleteness: - ## attempt to filter out 'junk' spots here by performing full - ## refinement before claiming + ''' + Attempt to filter out 'junk' spots here by performing full + refinement before claiming + ''' fineEtaTol = candidate.etaTol fineOmeTol = candidate.omeTol if doRefinement: if multiProcMode and foundFlagShared.value: 'some other process beat this one to it' return foundGrainData - print ppfx+"testing candidate q = [%1.2e, %1.2e, %1.2e, %1.2e]"\ - %tuple(thisQ) + print(ppfx + "testing candidate q = [%1.2e, %1.2e, %1.2e, %1.2e]" + % tuple(thisQ)) # not needed # candidate.fitPrecession(display=False) - ## first fit + # first fit candidate.fit(display=False) - ## auto-tolerace based on statistics of current matches + # auto-tolerace based on statistics of current matches validRefls = candidate.grainSpots['iRefl'] > 0 fineEtaTol = nStdDev * num.std( candidate.grainSpots['diffAngles'][validRefls, 1] @@ -330,7 +346,7 @@ def testThisQ(thisQ): fineOmeTol = nStdDev * num.std( candidate.grainSpots['diffAngles'][validRefls, 2] ) - ## next fits with finer tolerances + # next fits with finer tolerances for iLoop in range(3): candidate.findMatches(etaTol=fineEtaTol, omeTol=fineOmeTol, @@ -340,7 +356,7 @@ def testThisQ(thisQ): # not needed # candidate.fitPrecession(display=False) candidate.fit(display=False) if candidate.completeness < minCompleteness: - print ppfx+"candidate failed" + print(ppfx + "candidate failed") return foundGrainData if multiProcMode and foundFlagShared.value: 'some other process beat this one to it' @@ -353,7 +369,7 @@ def testThisQ(thisQ): # not needed? # testClaims=True, # not needed? # updateSelf=True) else: - ## at least do precession correction + # at least do precession correction candidate.fitPrecession(display=False) candidate.findMatches(rMat=thisRMat, strainMag=dspTol, @@ -363,7 +379,7 @@ def testThisQ(thisQ): fineEtaTol = candidate.etaTol fineOmeTol = candidate.omeTol if candidate.completeness < minCompleteness: - print ppfx+"candidate failed" + print(ppfx + "candidate failed") return foundGrainData if multiProcMode and foundFlagShared.value: 'some other process beat this one to it' @@ -380,8 +396,11 @@ def testThisQ(thisQ): # foundGrain.strip() cInfo = quatOfRotMat(candidate.rMat).flatten().tolist() cInfo.append(candidate.completeness) - print ppfx+"Grain found at q = [%1.2e, %1.2e, %1.2e, %1.2e] "\ - "with completeness %g" % tuple(cInfo) + print( + ppfx + + "Grain found at q = [%1.2e, %1.2e, %1.2e, %1.2e] " + + "with completeness %g" % tuple(cInfo) + ) foundGrainData = candidate.getGrainData() 'tolerances not actually set in candidate, so set them manually' foundGrainData['omeTol'] = fineOmeTol @@ -451,13 +470,13 @@ def fiberSearch(spotsArray, hklList, global minCompleteness_MP global doRefinement_MP global nStdDev_MP - multiProcMode_MP = multiProcMode - spotsArray_MP = spotsArray - candidate_MP = candidate - dspTol_MP = dspTol + multiProcMode_MP = multiProcMode + spotsArray_MP = spotsArray + candidate_MP = candidate + dspTol_MP = dspTol minCompleteness_MP = minCompleteness - doRefinement_MP = doRefinement - nStdDev_MP = nStdDev + doRefinement_MP = doRefinement + nStdDev_MP = nStdDev """ set up for shared memory multiprocessing """ @@ -485,13 +504,13 @@ def fiberSearch(spotsArray, hklList, tic = time.time() for iHKL in range(n_hkls_to_search): - print "\n#####################\nProcessing hkl %d of %d\n" \ - % (iHKL+1, nHKLs) + print("\n#####################\nProcessing hkl %d of %d\n" + % (iHKL+1, nHKLs)) thisHKLID = planeData.getHKLID(hklList[iHKL]) - thisRingSpots0 = spotsArray.getHKLSpots(thisHKLID) - thisRingSpots0W = num.where(thisRingSpots0)[0] + thisRingSpots0 = spotsArray.getHKLSpots(thisHKLID) + thisRingSpots0W = num.where(thisRingSpots0)[0] unclaimedOfThese = -spotsArray.checkClaims(indices=thisRingSpots0W) - thisRingSpots = copy.deepcopy(thisRingSpots0) + thisRingSpots = copy.deepcopy(thisRingSpots0) thisRingSpots[thisRingSpots0W] = unclaimedOfThese if friedelOnly: # first, find Friedel Pairs @@ -503,9 +522,9 @@ def fiberSearch(spotsArray, hklList, ) # make some stuff for counters maxSpots = 0.5*( - sum(thisRingSpots) \ + sum(thisRingSpots) - sum(spotsArray.friedelPair[thisRingSpots] == -1) - ) + ) else: spotsIteratorI = spotsArray.getIterHKL( hklList[iHKL], unclaimedOnly=True, friedelOnly=False @@ -519,28 +538,28 @@ def fiberSearch(spotsArray, hklList, """ for iRefl, stuff in enumerate(spotsIteratorI): unclaimedOfThese = -spotsArray.checkClaims(indices=thisRingSpots0W) - thisRingSpots = copy.deepcopy(thisRingSpots0) + thisRingSpots = copy.deepcopy(thisRingSpots0) thisRingSpots[thisRingSpots0W] = unclaimedOfThese if friedelOnly: iSpot, jSpot, angs_I, angs_J = stuff - Gplus = makeMeasuredScatteringVectors(*angs_I) + Gplus = makeMeasuredScatteringVectors(*angs_I) Gminus = makeMeasuredScatteringVectors(*angs_J) Gvec = 0.5*(Gplus - Gminus) maxSpots = 0.5*( - sum(thisRingSpots) \ + sum(thisRingSpots) - sum(spotsArray.friedelPair[thisRingSpots] == -1) - ) + ) else: iSpot, angs_I = stuff - Gvec = makeMeasuredScatteringVectors(*angs_I) + Gvec = makeMeasuredScatteringVectors(*angs_I) maxSpots = sum(thisRingSpots) - print "\nProcessing reflection %d (spot %d), %d remain "\ - "unclaimed\n" % (iRefl+1, iSpot, maxSpots) + print("\nProcessing reflection %d (spot %d), %d remain " + + "unclaimed\n" % (iRefl + 1, iSpot, maxSpots)) if multiProcMode and debugMultiproc > 1: marks = spotsArray._Spots__marks[:] - print 'marks : '+str(marks) + print('marks : ' + str(marks)) # make the fiber; qfib = discreteFiber(hklList[iHKL], Gvec, B=bMat, @@ -566,10 +585,12 @@ def fiberSearch(spotsArray, hklList, if multiProcMode: foundFlagShared.value = False qfibList = map(num.array, qfib.T.tolist()) - #if debugMultiproc: - # print 'qfibList : '+str(qfibList) + # if debugMultiproc: + # print('qfibList : ' + str(qfibList)) results = num.array(pool.map(testThisQ, qfibList, chunksize=1)) - trialGrains = results[num.where(num.array(results, dtype=bool))] + trialGrains = results[ + num.where(num.array(results, dtype=bool)) + ] # for trialGrain in trialGrains: # trialGrain.restore(candidate) else: @@ -582,7 +603,7 @@ def fiberSearch(spotsArray, hklList, 'end of if multiProcMode' if len(trialGrains) == 0: - print "No grain found containing spot %d\n" % (iSpot) + print("No grain found containing spot %d\n" % (iSpot)) # import pdb;pdb.set_trace() else: asMaster = multiProcMode @@ -598,28 +619,29 @@ def fiberSearch(spotsArray, hklList, grainData=foundGrainData, claimingSpots=False ) - #check completeness before accepting - #especially important for multiproc - foundGrain.checkClaims() # updates completeness + # !!! check completeness before accepting + # especially important for multiproc + foundGrain.checkClaims() # updates completeness if debugMultiproc: - print 'final completeness of candidate is %g' \ - % (foundGrain.completeness) + print('final completeness of candidate is %g' + % (foundGrain.completeness)) if foundGrain.completeness >= minCompleteness: conflicts = foundGrain.claimSpots(asMaster=asMaster) numConfl = num.sum(conflicts) if numConfl > 0: - print 'tried to claim %d spots that are already '\ - 'claimed' % (numConfl) + print('tried to claim %d spots that are already ' + + 'claimed' % (numConfl)) grainList.append(foundGrain) nGrains += 1 numUnClaimed = num.sum(-spotsArray.checkClaims()) numClaimed = numTotal - numUnClaimed pctClaimed = num.float(numClaimed) / numTotal - print "Found %d grains so far, %f%% claimed" \ - % (nGrains,100*pctClaimed) + print("Found %d grains so far, %f%% claimed" + % (nGrains, 100*pctClaimed)) - time_to_quit = (pctClaimed > minPctClaimed) or\ - ((quit_after_ngrains > 0) and (nGrains >= quit_after_ngrains)) + time_to_quit = (pctClaimed > minPctClaimed) or \ + ((quit_after_ngrains > 0) + and (nGrains >= quit_after_ngrains)) if time_to_quit: break 'end of iRefl loop' @@ -640,7 +662,7 @@ def fiberSearch(spotsArray, hklList, if not preserveClaims: spotsArray.resetClaims() toc = time.time() - print 'fiberSearch execution took %g seconds' % (toc-tic) + print('fiberSearch execution took %g seconds' % (toc - tic)) if multiProcMode: pool.close() @@ -662,16 +684,39 @@ def fiberSearch(spotsArray, hklList, return retval + def pgRefine(x, etaOmeMaps, omegaRange, threshold): + """ + Objective function for refining orientations found with paintGrid. + + !!!: This function is flagged for removal. + + Parameters + ---------- + x : TYPE + DESCRIPTION. + etaOmeMaps : TYPE + DESCRIPTION. + omegaRange : TYPE + DESCRIPTION. + threshold : TYPE + DESCRIPTION. + + Returns + ------- + f : TYPE + DESCRIPTION. + + """ phi = sum(x*x) if phi < 1e-7: - q = [num.r_[1.,0.,0.,0.],] + q = [num.r_[1., 0., 0., 0.], ] else: phi = num.sqrt(phi) n = (1. / phi) * x.flatten() cphi2 = num.cos(0.5*phi) sphi2 = num.sin(0.5*phi) - q = [num.r_[cphi2, sphi2*n[0], sphi2*n[1], sphi2*n[2]],] + q = [num.r_[cphi2, sphi2*n[0], sphi2*n[1], sphi2*n[2]], ] c = paintGrid( q, etaOmeMaps, threshold=threshold, bMat=None, omegaRange=omegaRange, etaRange=None, debug=False @@ -679,7 +724,109 @@ def pgRefine(x, etaOmeMaps, omegaRange, threshold): f = abs(1. - c) return f -paramMP = None + +# ============================================================================= +# DIRECT SEARCH FUNCTIONS +# ============================================================================= + + +def test_orientation_FF_init(params): + """ + Broadcast the indexing parameters as globals for multiprocessing + + Parameters + ---------- + params : dict + The dictionary of indexing parameters. + + Returns + ------- + None. + + Notes + ----- + See test_orientation_FF_reduced for specification. + """ + global paramMP + paramMP = params + + +def test_orientation_FF_reduced(quat): + """ + Return the completeness score for input quaternion. + + Parameters + ---------- + quat : array_like (4,) + The unit quaternion representation for the orientation to be tested. + + Returns + ------- + float + The completeness, i.e., the ratio between the predicted and observed + Bragg reflections subject to the specified tolerances. + + + Notes + ----- + input parameters are + [plane_data, instrument, imgser_dict, + tth_tol, eta_tol, ome_tol, eta_ranges, ome_period, + npdiv, threshold] + """ + plane_data = paramMP['plane_data'] + instrument = paramMP['instrument'] + imgser_dict = paramMP['imgser_dict'] + tth_tol = paramMP['tth_tol'] + eta_tol = paramMP['eta_tol'] + ome_tol = paramMP['ome_tol'] + eta_ranges = paramMP['eta_ranges'] + ome_period = paramMP['ome_period'] + npdiv = paramMP['npdiv'] + threshold = paramMP['threshold'] + + phi = 2*num.arccos(quat[0]) + n = xfcapi.unitRowVector(quat[1:]) + grain_params = num.hstack([ + phi*n, const.zeros_3, const.identity_6x1, + ]) + + compl, scrap = instrument.pull_spots( + plane_data, grain_params, imgser_dict, + tth_tol=tth_tol, eta_tol=eta_tol, ome_tol=ome_tol, + npdiv=npdiv, threshold=threshold, + eta_ranges=eta_ranges, + ome_period=ome_period, + check_only=True) + + return sum(compl)/float(len(compl)) + + +# ============================================================================= +# PAINTGRID +# ============================================================================= + + +def paintgrid_init(params): + global paramMP + paramMP = params + + # create valid_eta_spans, valid_ome_spans from etaMin/Max and omeMin/Max + # this allows using faster checks in the code. + # TODO: build valid_eta_spans and valid_ome_spans directly in paintGrid + # instead of building etaMin/etaMax and omeMin/omeMax. It may also + # be worth handling range overlap and maybe "optimize" ranges if + # there happens to be contiguous spans. + paramMP['valid_eta_spans'] = _normalize_ranges(paramMP['etaMin'], + paramMP['etaMax'], + -num.pi) + + paramMP['valid_ome_spans'] = _normalize_ranges(paramMP['omeMin'], + paramMP['omeMax'], + min(paramMP['omePeriod'])) + return + + def paintGrid(quats, etaOmeMaps, threshold=None, bMat=None, omegaRange=None, etaRange=None, @@ -712,12 +859,12 @@ def paintGrid(quats, etaOmeMaps, planeData = etaOmeMaps.planeData - hklIDs = num.r_[etaOmeMaps.iHKLList] - hklList = num.atleast_2d(planeData.hkls[:, hklIDs].T).tolist() - nHKLS = len(hklIDs) + hklIDs = num.r_[etaOmeMaps.iHKLList] + hklList = num.atleast_2d(planeData.hkls[:, hklIDs].T).tolist() + nHKLS = len(hklIDs) - numEtas = len(etaOmeMaps.etaEdges) - 1 - numOmes = len(etaOmeMaps.omeEdges) - 1 + numEtas = len(etaOmeMaps.etaEdges) - 1 + numOmes = len(etaOmeMaps.omeEdges) - 1 if threshold is None: threshold = num.zeros(nHKLS) @@ -732,9 +879,9 @@ def paintGrid(quats, etaOmeMaps, threshold = threshold * num.ones(nHKLS) elif hasattr(threshold, '__len__'): if len(threshold) != nHKLS: - raise RuntimeError, "threshold list is wrong length!" + raise RuntimeError("threshold list is wrong length!") else: - print "INFO: using list of threshold values" + print("INFO: using list of threshold values") else: raise RuntimeError( "unknown threshold option. should be a list of numbers or None" @@ -761,15 +908,15 @@ def paintGrid(quats, etaOmeMaps, omeMin = None omeMax = None - if omegaRange is None: # this NEEDS TO BE FIXED! - omeMin = [num.min(etaOmeMaps.omeEdges),] - omeMax = [num.max(etaOmeMaps.omeEdges),] + if omegaRange is None: # this NEEDS TO BE FIXED! + omeMin = [num.min(etaOmeMaps.omeEdges), ] + omeMax = [num.max(etaOmeMaps.omeEdges), ] else: omeMin = [omegaRange[i][0] for i in range(len(omegaRange))] omeMax = [omegaRange[i][1] for i in range(len(omegaRange))] if omeMin is None: omeMin = [-num.pi, ] - omeMax = [ num.pi, ] + omeMax = [num.pi, ] omeMin = num.asarray(omeMin) omeMax = num.asarray(omeMax) @@ -780,7 +927,7 @@ def paintGrid(quats, etaOmeMaps, etaMax = [etaRange[i][1] for i in range(len(etaRange))] if etaMin is None: etaMin = [-num.pi, ] - etaMax = [ num.pi, ] + etaMax = [num.pi, ] etaMin = num.asarray(etaMin) etaMax = num.asarray(etaMax) @@ -805,7 +952,7 @@ def paintGrid(quats, etaOmeMaps, # symHKLs_ix provides the start/end index for each subarray # of symHKLs. symHKLs_ix = num.add.accumulate([0] + [s.shape[1] for s in symHKLs]) - symHKLs = num.vstack(s.T for s in symHKLs) + symHKLs = num.vstack([s.T for s in symHKLs]) # Pack together the common parameters for processing params = { @@ -824,12 +971,11 @@ def paintGrid(quats, etaOmeMaps, 'etaTol': etaTol, 'etaIndices': etaIndices, 'etaEdges': etaOmeMaps.etaEdges, - 'etaOmeMaps': etaOmeMaps.dataStore, + 'etaOmeMaps': num.stack(etaOmeMaps.dataStore), 'bMat': bMat, 'threshold': threshold } - # do the mapping start = time.time() retval = None @@ -840,15 +986,14 @@ def paintGrid(quats, etaOmeMaps, pool.close() else: # single process version. - global paramMP - paintgrid_init(params) # sets paramMP + paintgrid_init(params) # sets paramMP retval = map(paintGridThis, quats.T) - paramMP = None # clear paramMP elapsed = (time.time() - start) logger.info("paintGrid took %.3f seconds", elapsed) return retval + def _meshgrid2d(x, y): """ A special-cased implementation of num.meshgrid, for just @@ -864,7 +1009,6 @@ def _meshgrid2d(x, y): return (r1, r2) - def _normalize_ranges(starts, stops, offset, ccw=False): """normalize in the range [offset, 2*pi+offset[ the ranges defined by starts and stops. @@ -881,7 +1025,6 @@ def _normalize_ranges(starts, stops, offset, ccw=False): if not num.all(starts < stops): raise ValueError('Invalid angle ranges') - # If there is a range that spans more than 2*pi, # return the full range two_pi = 2 * num.pi @@ -892,8 +1035,10 @@ def _normalize_ranges(starts, stops, offset, ccw=False): stops = num.mod(stops - offset, two_pi) + offset order = num.argsort(starts) - result = num.hstack((starts[order, num.newaxis], - stops[order, num.newaxis])).ravel() + result = num.hstack( + (starts[order, num.newaxis], + stops[order, num.newaxis]) + ).ravel() # at this point, result is in its final form unless there # is wrap-around in the last segment. Handle this case: if result[-1] < result[-2]: @@ -910,35 +1055,16 @@ def _normalize_ranges(starts, stops, offset, ccw=False): return result -def paintgrid_init(params): - global paramMP - paramMP = params - - # create valid_eta_spans, valid_ome_spans from etaMin/Max and omeMin/Max - # this allows using faster checks in the code. - # TODO: build valid_eta_spans and valid_ome_spans directly in paintGrid - # instead of building etaMin/etaMax and omeMin/omeMax. It may also - # be worth handling range overlap and maybe "optimize" ranges if - # there happens to be contiguous spans. - paramMP['valid_eta_spans'] = _normalize_ranges(paramMP['etaMin'], - paramMP['etaMax'], - -num.pi) - - paramMP['valid_ome_spans'] = _normalize_ranges(paramMP['omeMin'], - paramMP['omeMax'], - min(paramMP['omePeriod'])) - - - -################################################################################ - +############################################################################### +# # paintGridThis contains the bulk of the process to perform for paintGrid for a # given quaternion. This is also used as the basis for multiprocessing, as the # work is split in a per-quaternion basis among different processes. # The remainding arguments are marshalled into the module variable "paramMP". +# +# There is a version of PaintGridThis using numba, and another version used +# when numba is not available. The numba version should be noticeably faster. -# There is a version of PaintGridThis using numba, and another version used when -# numba is not available. The numba version should be noticeably faster. def _check_dilated(eta, ome, dpix_eta, dpix_ome, etaOmeMap, threshold): """This is part of paintGridThis: @@ -948,26 +1074,45 @@ def _check_dilated(eta, ome, dpix_eta, dpix_ome, etaOmeMap, threshold): Note this function is "numba friendly" and will be jitted when using numba. + TODO: currently behaves like "num.any" call for values above threshold. + There is some ambigutiy if there are NaNs in the dilation range, but it + hits a value above threshold first. Is that ok??? + + FIXME: works in non-numba implementation of paintGridThis only + """ i_max, j_max = etaOmeMap.shape - ome_start, ome_stop = max(ome - dpix_ome, 0), min(ome + dpix_ome + 1, i_max) - eta_start, eta_stop = max(eta - dpix_eta, 0), min(eta + dpix_eta + 1, j_max) + ome_start, ome_stop = ( + max(ome - dpix_ome, 0), + min(ome + dpix_ome + 1, i_max) + ) + eta_start, eta_stop = ( + max(eta - dpix_eta, 0), + min(eta + dpix_eta + 1, j_max) + ) for i in range(ome_start, ome_stop): for j in range(eta_start, eta_stop): - if etaOmeMap[i,j] > threshold: + if etaOmeMap[i, j] > threshold: return 1 + if num.isnan(etaOmeMap[i, j]): + return -1 return 0 +# ============================================================================= +# HELPER FUNCTIONS WITH SPLIT DEFS BASED ON USE_NUMBA +# ============================================================================= + + if USE_NUMBA: def paintGridThis(quat): - # Note that this version does not use omeMin/omeMax to specify the valid - # angles. It uses "valid_eta_spans" and "valid_ome_spans". These are - # precomputed and make for a faster check of ranges than + # Note that this version does not use omeMin/omeMax to specify the + # valid angles. It uses "valid_eta_spans" and "valid_ome_spans". + # These are precomputed and make for a faster check of ranges than # "validateAngleRanges" - symHKLs = paramMP['symHKLs'] # the HKLs - symHKLs_ix = paramMP['symHKLs_ix'] # index partitioning of symHKLs + symHKLs = paramMP['symHKLs'] # the HKLs + symHKLs_ix = paramMP['symHKLs_ix'] # index partitioning of symHKLs bMat = paramMP['bMat'] wavelength = paramMP['wavelength'] omeEdges = paramMP['omeEdges'] @@ -992,7 +1137,7 @@ def paintGridThis(quat): debug = False if debug: - print( "using ome, eta dilitations of (%d, %d) pixels" \ + print("using ome, eta dilitations of (%d, %d) pixels" % (dpix_ome, dpix_eta)) # get the equivalent rotation of the quaternion in matrix form (as @@ -1003,22 +1148,22 @@ def paintGridThis(quat): # Compute the oscillation angles of all the symHKLs at once oangs_pair = xfcapi.oscillAnglesOfHKLs(symHKLs, 0., rMat, bMat, wavelength) - #pdb.set_trace() + # pdb.set_trace() return _filter_and_count_hits(oangs_pair[0], oangs_pair[1], symHKLs_ix, etaEdges, valid_eta_spans, valid_ome_spans, omeEdges, omePeriod, etaOmeMaps, etaIndices, omeIndices, dpix_eta, dpix_ome, threshold) - @numba.jit def _find_in_range(value, spans): - """find the index in spans where value >= spans[i] and value < spans[i]. + """ + Find the index in spans where value >= spans[i] and value < spans[i]. spans is an ordered array where spans[i] <= spans[i+1] (most often < will hold). - If value is not in the range [spans[0], spans[-1][, then -2 is returned. + If value is not in the range [spans[0], spans[-1]] then -2 is returned. This is equivalent to "bisect_right" in the bisect package, in which code it is based, and it is somewhat similar to NumPy's searchsorted, @@ -1042,7 +1187,6 @@ def _find_in_range(value, spans): return li - @numba.njit def _angle_is_hit(ang, eta_offset, ome_offset, hkl, valid_eta_spans, valid_ome_spans, etaEdges, omeEdges, etaOmeMaps, @@ -1060,10 +1204,13 @@ def _angle_is_hit(ang, eta_offset, ome_offset, hkl, valid_eta_spans, - actual check for a hit, using dilation for the tolerance. - Note the function returns both, if it was a hit and if it passed the the + Note the function returns both, if it was a hit and if it passed the filtering, as we'll want to discard the filtered values when computing the hit percentage. + CAVEAT: added map-based nan filtering to _check_dilated; this may not + be the best option. Perhaps filter here? + """ tth, eta, ome = ang @@ -1095,9 +1242,10 @@ def _angle_is_hit(ang, eta_offset, ome_offset, hkl, valid_eta_spans, ome = omeIndices[ome_idx] isHit = _check_dilated(eta, ome, dpix_eta, dpix_ome, etaOmeMaps[hkl], threshold[hkl]) - - return isHit, 1 - + if isHit == -1: + return 0, 0 + else: + return isHit, 1 @numba.njit def _filter_and_count_hits(angs_0, angs_1, symHKLs_ix, etaEdges, @@ -1126,40 +1274,45 @@ def _filter_and_count_hits(angs_0, angs_1, symHKLs_ix, etaEdges, if i >= end_curr: curr_hkl_idx += 1 end_curr = symHKLs_ix[curr_hkl_idx+1] - hit, not_filtered = _angle_is_hit(angs_0[i], eta_offset, ome_offset, - curr_hkl_idx, valid_eta_spans, - valid_ome_spans, etaEdges, - omeEdges, etaOmeMaps, etaIndices, - omeIndices, dpix_eta, dpix_ome, - threshold) + + # first solution + hit, not_filtered = _angle_is_hit( + angs_0[i], eta_offset, ome_offset, + curr_hkl_idx, valid_eta_spans, + valid_ome_spans, etaEdges, + omeEdges, etaOmeMaps, etaIndices, + omeIndices, dpix_eta, dpix_ome, + threshold) hits += hit total += not_filtered - hit, not_filtered = _angle_is_hit(angs_1[i], eta_offset, ome_offset, - curr_hkl_idx, valid_eta_spans, - valid_ome_spans, etaEdges, - omeEdges, etaOmeMaps, etaIndices, - omeIndices, dpix_eta, dpix_ome, - threshold) + + # second solution + hit, not_filtered = _angle_is_hit( + angs_1[i], eta_offset, ome_offset, + curr_hkl_idx, valid_eta_spans, + valid_ome_spans, etaEdges, + omeEdges, etaOmeMaps, etaIndices, + omeIndices, dpix_eta, dpix_ome, + threshold) hits += hit total += not_filtered return float(hits)/float(total) if total != 0 else 0.0 - @numba.njit def _map_angle(angle, offset): - """Equivalent to xf.mapAngle in this context, and 'numba friendly' - """ - return num.mod(angle-offset, 2*num.pi)+offset + Equivalent to xf.mapAngle in this context, and 'numba friendly' + """ + return num.mod(angle - offset, 2*num.pi) + offset # use a jitted version of _check_dilated _check_dilated = numba.njit(_check_dilated) else: def paintGridThis(quat): # unmarshall parameters into local variables - symHKLs = paramMP['symHKLs'] # the HKLs - symHKLs_ix = paramMP['symHKLs_ix'] # index partitioning of symHKLs + symHKLs = paramMP['symHKLs'] # the HKLs + symHKLs_ix = paramMP['symHKLs_ix'] # index partitioning of symHKLs bMat = paramMP['bMat'] wavelength = paramMP['wavelength'] omeEdges = paramMP['omeEdges'] @@ -1184,7 +1337,7 @@ def paintGridThis(quat): debug = False if debug: - print( "using ome, eta dilitations of (%d, %d) pixels" \ + print("using ome, eta dilitations of (%d, %d) pixels" % (dpix_ome, dpix_eta)) # get the equivalent rotation of the quaternion in matrix form (as @@ -1201,13 +1354,15 @@ def paintGridThis(quat): valid_ome_spans, omePeriod) if len(hkl_idx > 0): - hits = _count_hits(eta_idx, ome_idx, hkl_idx, etaOmeMaps, - etaIndices, omeIndices, dpix_eta, dpix_ome, - threshold) - retval = float(hits) / float(len(hkl_idx)) - else: - retval = 0 - + hits, predicted = _count_hits( + eta_idx, ome_idx, hkl_idx, etaOmeMaps, + etaIndices, omeIndices, dpix_eta, dpix_ome, + threshold + ) + retval = float(hits) / float(predicted) + if retval > 1: + import pdb + pdb.set_trace() return retval def _normalize_angs_hkls(angs_0, angs_1, omePeriod, symHKLs_ix): @@ -1225,15 +1380,14 @@ def _normalize_angs_hkls(angs_0, angs_1, omePeriod, symHKLs_ix): symHKLs_ix = symHKLs_ix*2 hkl_idx = num.empty((symHKLs_ix[-1],), dtype=int) start = symHKLs_ix[0] - idx=0 + idx = 0 for end in symHKLs_ix[1:]: hkl_idx[start:end] = idx start = end - idx+=1 + idx += 1 return oangs, hkl_idx - def _filter_angs(angs_0, angs_1, symHKLs_ix, etaEdges, valid_eta_spans, omeEdges, valid_ome_spans, omePeriod): """ @@ -1247,9 +1401,9 @@ def _filter_angs(angs_0, angs_1, symHKLs_ix, etaEdges, valid_eta_spans, """ oangs, hkl_idx = _normalize_angs_hkls(angs_0, angs_1, omePeriod, symHKLs_ix) - # using "right" side to make sure we always get an index *past* the value - # if it happens to be equal. That is... we search the index of the first - # value that is "greater than" rather than "greater or equal" + # using "right" side to make sure we always get an index *past* the + # value if it happens to be equal. That is... we search the index of + # the first value that is "greater than" rather than "greater or equal" culled_eta_indices = num.searchsorted(etaEdges, oangs[:, 1], side='right') culled_ome_indices = num.searchsorted(omeEdges, oangs[:, 2], @@ -1259,19 +1413,23 @@ def _filter_angs(angs_0, angs_1, symHKLs_ix, etaEdges, valid_eta_spans, # The spans contains an ordered sucession of start and end angles which # form the valid angle spans. So knowing if an angle is valid is # equivalent to finding the insertion point in the spans array and - # checking if the resulting insertion index is odd or even. An odd value - # means that it falls between a start and a end point of the "valid - # span", meaning it is a hit. An even value will result in either being - # out of the range (0 or the last index, as length is even by - # construction) or that it falls between a "end" point from one span and - # the "start" point of the next one. - valid_eta = num.searchsorted(valid_eta_spans, oangs[:, 1], side='right') - valid_ome = num.searchsorted(valid_ome_spans, oangs[:, 2], side='right') + # checking if the resulting insertion index is odd or even. + # An odd value means that it falls between a start and a end point of + # the "valid span", meaning it is a hit. An even value will result in + # either being out of the range (0 or the last index, as length is even + # by construction) or that it falls between a "end" point from one span + # and the "start" point of the next one. + valid_eta = num.searchsorted( + valid_eta_spans, oangs[:, 1], side='right' + ) + valid_ome = num.searchsorted( + valid_ome_spans, oangs[:, 2], side='right' + ) # fast odd/even check valid_eta = valid_eta & 1 valid_ome = valid_ome & 1 # Create a mask of the good ones - valid = ~num.isnan(oangs[:, 0]) # tth not NaN + valid = ~num.isnan(oangs[:, 0]) # tth not NaN valid = num.logical_and(valid, valid_eta) valid = num.logical_and(valid, valid_ome) valid = num.logical_and(valid, culled_eta_indices > 0) @@ -1285,7 +1443,6 @@ def _filter_angs(angs_0, angs_1, symHKLs_ix, etaEdges, valid_eta_spans, return hkl_idx, eta_idx, ome_idx - def _count_hits(eta_idx, ome_idx, hkl_idx, etaOmeMaps, etaIndices, omeIndices, dpix_eta, dpix_ome, threshold): """ @@ -1307,10 +1464,12 @@ def _count_hits(eta_idx, ome_idx, hkl_idx, etaOmeMaps, isHit = _check_dilated(eta, ome, dpix_eta, dpix_ome, etaOmeMaps[iHKL], threshold[iHKL]) - if isHit: + if isHit > 0: hits += 1 + if isHit == -1: + predicted -= 1 - return hits + return hits, predicted def writeGVE(spotsArray, fileroot, **kwargs): @@ -1363,19 +1522,18 @@ def writeGVE(spotsArray, fileroot, **kwargs): assert isinstance(fileroot, str) # keyword argument processing - phaseID = None - sgNum = 225 - cellString = 'P' - omeRange = num.r_[-60, 60] # in DEGREES - deltaOme = 0.25 # in DEGREES - minMeas = 24 - minCompl = 0.7 - minUniqn = 0.5 - uncertainty = [0.10, 0.25, .50] # in DEGREES - eulStep = 2 # in DEGREES - nSigmas = 2 - minFracG = 0.90 - numTrials = 100000 + phaseID = None + sgNum = 225 + cellString = 'P' + deltaOme = 0.25 # in DEGREES + minMeas = 24 + minCompl = 0.7 + minUniqn = 0.5 + uncertainty = [0.10, 0.25, .50] # in DEGREES + eulStep = 2 # in DEGREES + nSigmas = 2 + minFracG = 0.90 + numTrials = 100000 positionFit = True kwarglen = len(kwargs) @@ -1389,8 +1547,6 @@ def writeGVE(spotsArray, fileroot, **kwargs): phaseID = kwargs[argkeys[i]] elif argkeys[i] == 'cellString': cellString = kwargs[argkeys[i]] - elif argkeys[i] == 'omeRange': - omeRange = kwargs[argkeys[i]] elif argkeys[i] == 'deltaOme': deltaOme = kwargs[argkeys[i]] elif argkeys[i] == 'minMeas': @@ -1428,19 +1584,17 @@ def writeGVE(spotsArray, fileroot, **kwargs): yc_p = ncols_p - col_p zc_p = nrows_p - row_p - wd_mu = spotsArray.detectorGeom.workDist * 1e3 # in microns (Soeren) + wd_mu = spotsArray.detectorGeom.workDist * 1e3 # in microns (Soeren) osc_axis = num.dot(fableSampCOB.T, Yl).flatten() # start grabbing stuff from planeData planeData = spotsArray.getPlaneData(phaseID=phaseID) - cellp = planeData.latVecOps['dparms'] - U0 = planeData.latVecOps['U0'] - wlen = planeData.wavelength - dsp = planeData.getPlaneSpacings() - fHKLs = planeData.getSymHKLs() - tThRng = planeData.getTThRanges() - symTag = planeData.getLaueGroup() + cellp = planeData.latVecOps['dparms'] + wlen = planeData.wavelength + dsp = planeData.getPlaneSpacings() + fHKLs = planeData.getSymHKLs() + tThRng = planeData.getTThRanges() # single range should be ok since entering hkls tThMin, tThMax = (r2d*tThRng.min(), r2d*tThRng.max()) @@ -1477,13 +1631,13 @@ def writeGVE(spotsArray, fileroot, **kwargs): gvecString = '' spotsIter = spotsArray.getIterPhase(phaseID, returnBothCoordTypes=True) for iSpot, angCOM, xyoCOM in spotsIter: - sR, sC, sOme = xyoCOM # detector coords - sTTh, sEta, sOme = angCOM # angular coords (radians) - sDsp = wlen / 2. / num.sin(0.5*sTTh) # dspacing + sR, sC, sOme = xyoCOM # detector coords + sTTh, sEta, sOme = angCOM # angular coords (radians) + sDsp = wlen / 2. / num.sin(0.5*sTTh) # dspacing - # get raw y, z (Fable frame) - yraw = ncols_p - sC - zraw = nrows_p - sR + # # get raw y, z (Fable frame) + # yraw = ncols_p - sC + # zraw = nrows_p - sR # convert eta to fable frame rEta = mapAngle(90. - r2d*sEta, [0, 360], units='degrees') @@ -1510,40 +1664,42 @@ def writeGVE(spotsArray, fileroot, **kwargs): # write gve file for grainspotter fid = open(fileroot+'.gve', 'w') - print >> fid, '%1.8f %1.8f %1.8f %1.8f %1.8f %1.8f ' % tuple(cellp) \ - + cellString + '\n' \ - + '# wavelength = %1.8f\n' % (wlen) \ - + '# wedge = 0.000000\n' \ - + '# axis = %d %d %d\n' % tuple(osc_axis) \ - + '# cell__a %1.4f\n' %(cellp[0]) \ - + '# cell__b %1.4f\n' %(cellp[1]) \ - + '# cell__c %1.4f\n' %(cellp[2]) \ - + '# cell_alpha %1.4f\n' %(cellp[3]) \ - + '# cell_beta %1.4f\n' %(cellp[4]) \ - + '# cell_gamma %1.4f\n' %(cellp[5]) \ - + '# cell_lattice_[P,A,B,C,I,F,R] %s\n' %(cellString) \ - + '# chi 0.0\n' \ - + '# distance %.4f\n' %(wd_mu) \ - + '# fit_tolerance 0.5\n' \ - + '# o11 1\n' \ - + '# o12 0\n' \ - + '# o21 0\n' \ - + '# o22 -1\n' \ - + '# omegasign %1.1f\n' %(num.sign(deltaOme)) \ - + '# t_x 0\n' \ - + '# t_y 0\n' \ - + '# t_z 0\n' \ - + '# tilt_x 0.000000\n' \ - + '# tilt_y 0.000000\n' \ - + '# tilt_z 0.000000\n' \ - + '# y_center %.6f\n' %(yc_p) \ - + '# y_size %.6f\n' %(mmPerPixel*1.e3) \ - + '# z_center %.6f\n' %(zc_p) \ - + '# z_size %.6f\n' %(mmPerPixel*1.e3) \ - + '# ds h k l\n' \ - + gvecHKLString \ - + '# xr yr zr xc yc ds eta omega\n' \ - + gvecString + print( + '%1.8f %1.8f %1.8f %1.8f %1.8f %1.8f ' % tuple(cellp) + + cellString + '\n' + + '# wavelength = %1.8f\n' % (wlen) + + '# wedge = 0.000000\n' + + '# axis = %d %d %d\n' % tuple(osc_axis) + + '# cell__a %1.4f\n' % cellp[0] + + '# cell__b %1.4f\n' % cellp[1] + + '# cell__c %1.4f\n' % cellp[2] + + '# cell_alpha %1.4f\n' % cellp[3] + + '# cell_beta %1.4f\n' % cellp[4] + + '# cell_gamma %1.4f\n' % cellp[5] + + '# cell_lattice_[P,A,B,C,I,F,R] %s\n' % cellString + + '# chi 0.0\n' + + '# distance %.4f\n' % wd_mu + + '# fit_tolerance 0.5\n' + + '# o11 1\n' + + '# o12 0\n' + + '# o21 0\n' + + '# o22 -1\n' + + '# omegasign %1.1f\n' % num.sign(deltaOme) + + '# t_x 0\n' + + '# t_y 0\n' + + '# t_z 0\n' + + '# tilt_x 0.000000\n' + + '# tilt_y 0.000000\n' + + '# tilt_z 0.000000\n' + + '# y_center %.6f\n' % yc_p + + '# y_size %.6f\n' % mmPerPixel*1.e3 + + '# z_center %.6f\n' % zc_p + + '# z_size %.6f\n' % mmPerPixel*1.e3 + + '# ds h k l\n' + + gvecHKLString + + '# xr yr zr xc yc ds eta omega\n' + + gvecString, file=fid + ) fid.close() ############################################################### @@ -1562,19 +1718,23 @@ def writeGVE(spotsArray, fileroot, **kwargs): fid = open(fileroot+'_grainSpotter.ini', 'w') # self.__tempFNameList.append(fileroot) - print >> fid, 'spacegroup %d\n' % (sgNum) \ - + 'tthrange %g %g\n' % (tThMin, tThMax) \ - + 'etarange %g %g\n' % (etaMin, etaMax) \ - + 'domega %g\n' % (deltaOme) \ - + omeRangeString + \ - + 'filespecs %s.gve %s_grainSpotter.log\n' % (fileroot, fileroot) \ - + 'cuts %d %g %g\n' % (minMeas, minCompl, minUniqn) \ - + 'eulerstep %g\n' % (eulStep) \ - + 'uncertainties %g %g %g\n' \ - % (uncertainty[0], uncertainty[1], uncertainty[2]) \ - + 'nsigmas %d\n' % (nSigmas) \ - + 'minfracg %g\n' % (minFracG) \ - + randomString \ - + positionString + '\n' + print( + 'spacegroup %d\n' % (sgNum) + + 'tthrange %g %g\n' % (tThMin, tThMax) + + 'etarange %g %g\n' % (etaMin, etaMax) + + 'domega %g\n' % (deltaOme) + + omeRangeString + + 'filespecs %s.gve %s_grainSpotter.log\n' % (fileroot, fileroot) + + 'cuts %d %g %g\n' % (minMeas, minCompl, minUniqn) + + 'eulerstep %g\n' % (eulStep) + + 'uncertainties %g %g %g\n' % (uncertainty[0], + uncertainty[1], + uncertainty[2]) + + 'nsigmas %d\n' % (nSigmas) + + 'minfracg %g\n' % (minFracG) + + randomString + + positionString + + '\n', file=fid + ) fid.close() return diff --git a/hexrd/xrd/material.py b/hexrd/xrd/material.py index 0fe77399..741e0ef7 100644 --- a/hexrd/xrd/material.py +++ b/hexrd/xrd/material.py @@ -61,8 +61,8 @@ class Material(object): DFLT_SSMAX = 50 DFLT_KEV = valWUnit('wavelength', 'energy', 80.725e0, 'keV') - DFLT_STR = 0.002 - DFLT_TTH = 0.002 + DFLT_STR = 0.0025 + DFLT_TTH = numpy.radians(0.25) DFLT_ATOMINFO = numpy.array([[0,0,0,1]]) """Fractional Atom Position of an atom in the unit cell followed by the number of electrons within that atom. The max number of electrons is 96. diff --git a/hexrd/xrd/rotations.py b/hexrd/xrd/rotations.py index 5ad8da84..63ae23a0 100644 --- a/hexrd/xrd/rotations.py +++ b/hexrd/xrd/rotations.py @@ -10,9 +10,9 @@ # # Please also see the file LICENSE. # -# This program is free software; you can redistribute it and/or modify it under the -# terms of the GNU Lesser General Public License (as published by the Free Software -# Foundation) version 2.1 dated February 1999. +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY @@ -28,13 +28,14 @@ # # Module containing functions relevant to rotations # -import sys, os, time +import sys +import time import numpy from numpy import \ - arange, array, asarray, atleast_1d, average, \ - ndarray, diag, empty, ones, zeros, \ - cross, dot, pi, arccos, arcsin, cos, sin, sqrt, \ - sort, squeeze, tile, vstack, hstack, r_, c_, ix_, \ + arange, arctan2, array, asarray, atleast_1d, average, \ + ndarray, diag, eye, zeros, \ + cross, dot, outer, pi, arccos, arcsin, cos, sin, sqrt, \ + sort, tile, vstack, hstack, c_, ix_, \ abs, mod, sign, \ finfo, isscalar from numpy import float_ as nFloat @@ -46,19 +47,22 @@ multMatArray, nullSpace # # Module Data -tinyRotAng = finfo(float).eps # ~2e-16 -angularUnits = 'radians' # module-level angle units -I3 = array([[1., 0., 0.], # (3, 3) identity - [0., 1., 0.], - [0., 0., 1.]]) +tinyRotAng = finfo(float).eps # ~2e-16 +angularUnits = 'radians' # module-level angle units +I3 = array([[1., 0., 0.], # (3, 3) identity + [0., 1., 0.], + [0., 0., 1.]]) # periodDict = {'degrees': 360.0, 'radians': 2*numpy.pi} + + # # ================================================== Functions # def arccosSafe(temp): """ - Protect against numbers slightly larger than 1 in magnitude due to round-off + Protect against numbers slightly larger than 1 in magnitude + due to round-off """ temp = atleast_1d(temp) if (abs(temp) > 1.00001).any(): @@ -635,6 +639,94 @@ def angleAxisOfRotMat(R): raxis[:, special] = saxis return angle, unitVector(raxis) + + +def make_rmat_euler(tilt_angles, axes_order, extrinsic=True): + """ + extrinsic (PASSIVE) or intrinsic (ACTIVE) by kw + tilt_angles are in RADIANS + """ + axes = numpy.eye(3) + + axes_dict = dict(x=0, y=1, z=2) + + # axes orders, all permutations + orders = [ + 'xyz', 'zyx', + 'zxy', 'yxz', + 'yzx', 'xzy', + 'xyx', 'xzx', + 'yxy', 'yzy', + 'zxz', 'zyz', + ] + + axo = axes_order.lower() + assert axo in orders and len(axes_order) == 3, \ + '%s is not a valid choice' % axes_order + + if extrinsic: + rmats = numpy.zeros((3, 3, 3)) + for i, ax in enumerate(axo): + rmats[i] = rotMatOfExpMap( + tilt_angles[i]*axes[axes_dict[ax]] + ) + return numpy.dot(rmats[2], numpy.dot(rmats[1], rmats[0])) + else: + rm0 = rotMatOfExpMap( + tilt_angles[0]*axes[axes_dict[axo[0]]] + ) + rm1 = rotMatOfExpMap( + tilt_angles[1]*rm0[:, axes_dict[axo[1]]] + ) + rm2 = rotMatOfExpMap( + tilt_angles[2]*numpy.dot(rm1, rm0[:, axes_dict[axo[2]]]) + ) + return numpy.dot(rm2, numpy.dot(rm1, rm0)) + + +def angles_from_rmat_xyz(rmat): + """ + calculate x-y-z euler angles from a rotation matrix in + the PASSIVE convention + """ + eps = sqrt(finfo('float').eps) + ry = -arcsin(rmat[2, 0]) + sgny = sign(ry) + if abs(ry) < 0.5*pi - eps: + cosy = cos(ry) + rz = arctan2(rmat[1, 0]/cosy, rmat[0, 0]/cosy) + rx = arctan2(rmat[2, 1]/cosy, rmat[2, 2]/cosy) + else: + rz = 0.5*arctan2(sgny*rmat[1, 2], sgny*rmat[0, 2]) + if sgny > 0: + rx = -rz + else: + rx = rz + return rx, ry, rz + + +def angles_from_rmat_zxz(rmat): + """ + calculate z-x-z euler angles from a rotation matrix in + the ACTIVE convention + + alpha, beta, gamma + """ + if abs(rmat[2, 2]) > 1. - sqrt(finfo('float').eps): + beta = 0. + alpha = arctan2(rmat[1, 0], rmat[0, 0]) + gamma = 0. + else: + xnew = rmat[:, 0] + znew = rmat[:, 2] + alpha = arctan2(znew[0], -znew[1]) + rma = rotMatOfExpMap(alpha*c_[0., 0., 1.].T) + znew1 = dot(rma.T, znew) + beta = arctan2(-znew1[1], znew1[2]) + rmb = rotMatOfExpMap(beta*c_[cos(alpha), sin(alpha), 0.].T) + xnew2 = dot(rma.T, dot(rmb.T, xnew)) + gamma = arctan2(xnew2[1], xnew2[0]) + return alpha, beta, gamma # # ==================== Fiber # diff --git a/hexrd/xrd/transforms.py b/hexrd/xrd/transforms.py index 08fcde4f..f82893a8 100644 --- a/hexrd/xrd/transforms.py +++ b/hexrd/xrd/transforms.py @@ -789,7 +789,7 @@ def _unitVectorMulti(a, b): else: for i in range(n): b[i, j] = a[i, j] - + def unitVector(a): """ @@ -893,7 +893,7 @@ def _makeEtaFrameRotMat(bHat_l, eHat_l, out): # bHat_l and eHat_l CANNOT have 0 magnitude! # must catch this case as well as colinear bHat_l/eHat_l elsewhere... bHat_mag = np.sqrt(bHat_l[0]**2 + bHat_l[1]**2 + bHat_l[2]**2) - + # assign Ze as -bHat_l for i in range(3): out[i, 2] = -bHat_l[i] / bHat_mag @@ -933,10 +933,10 @@ def makeEtaFrameRotMat(bHat_l, eHat_l): def makeEtaFrameRotMat(bHat_l, eHat_l): """ make eta basis COB matrix with beam antiparallel with Z - + takes components from ETA frame to LAB """ - # normalize input + # normalize input bHat_l = unitVector(bHat_l.reshape(3, 1)) eHat_l = unitVector(eHat_l.reshape(3, 1)) @@ -945,11 +945,35 @@ def makeEtaFrameRotMat(bHat_l, eHat_l): if np.sqrt(np.sum(Ye*Ye)) < 1e-8: raise RuntimeError, "bHat_l and eHat_l must NOT be colinear!" Ye = unitVector(Ye.reshape(3, 1)) - + # find Xe as cross(bHat_l, Ye) Xe = np.cross(bHat_l.flatten(), Ye.flatten()).reshape(3, 1) return np.hstack([Xe, Ye, -bHat_l]) +def angles_in_range(angles, starts, stops, degrees=True): + """Determine whether angles lie in or out of specified ranges + + *angles* - a list/array of angles + *starts* - a list of range starts + *stops* - a list of range stops + + OPTIONAL ARGS: + *degrees* - [True] angles & ranges in degrees (or radians) +""" + TAU = 360.0 if degrees else 2*np.pi + nw = len(starts) + na = len(angles) + in_range = np.zeros((na), dtype=bool) + for i in range(nw): + amin = starts[i] + amax = stops[i] + for j in range(na): + a = angles[j] + acheck = amin + np.mod(a - amin, TAU) + if acheck <= amax: + in_range[j] = True + + return in_range def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): """ diff --git a/hexrd/xrd/transforms_CAPI.py b/hexrd/xrd/transforms_CAPI.py index 6b9334da..0b4d9fac 100644 --- a/hexrd/xrd/transforms_CAPI.py +++ b/hexrd/xrd/transforms_CAPI.py @@ -137,6 +137,9 @@ def gvecToDetectorXY(gVec_c, (m, 2) ndarray containing the intersections of m <= n diffracted beams associated with gVecs """ + rMat_d = np.ascontiguousarray( rMat_d ) + rMat_s = np.ascontiguousarray( rMat_s ) + rMat_c = np.ascontiguousarray( rMat_c ) gVec_c = np.ascontiguousarray( np.atleast_2d( gVec_c ) ) tVec_d = np.ascontiguousarray( tVec_d.flatten() ) tVec_s = np.ascontiguousarray( tVec_s.flatten() ) @@ -172,7 +175,9 @@ def gvecToDetectorXYArray(gVec_c, associated with gVecs """ gVec_c = np.ascontiguousarray( gVec_c ) + rMat_d = np.ascontiguousarray( rMat_d ) rMat_s = np.ascontiguousarray( rMat_s ) + rMat_c = np.ascontiguousarray( rMat_c ) tVec_d = np.ascontiguousarray( tVec_d.flatten() ) tVec_s = np.ascontiguousarray( tVec_s.flatten() ) tVec_c = np.ascontiguousarray( tVec_c.flatten() ) @@ -209,6 +214,8 @@ def detectorXYToGvec(xy_det, associated with gVecs """ xy_det = np.ascontiguousarray( np.atleast_2d(xy_det) ) + rMat_d = np.ascontiguousarray( rMat_d ) + rMat_s = np.ascontiguousarray( rMat_s ) tVec_d = np.ascontiguousarray( tVec_d.flatten() ) tVec_s = np.ascontiguousarray( tVec_s.flatten() ) tVec_c = np.ascontiguousarray( tVec_c.flatten() ) @@ -219,6 +226,46 @@ def detectorXYToGvec(xy_det, tVec_d, tVec_s, tVec_c, beamVec, etaVec) +def detectorXYToGvecArray(xy_det, + rMat_d, rMat_s, + tVec_d, tVec_s, tVec_c, + beamVec=bVec_ref, etaVec=eta_ref): + """ + Takes a list cartesian (x, y) pairs in the detector coordinates and calculates + the associated reciprocal lattice (G) vectors and (bragg angle, azimuth) pairs + with respect to the specified beam and azimth (eta) reference directions + + Required Arguments: + xy_det -- (n, 2) ndarray or list-like input of n detector (x, y) points + rMat_d -- (3, 3) ndarray, the COB taking DETECTOR FRAME components to LAB FRAME + rMat_s -- (n, 3, 3) ndarray, the COB taking SAMPLE FRAME components to LAB FRAME + tVec_d -- (3, 1) ndarray, the translation vector connecting LAB to DETECTOR in LAB + tVec_s -- (3, 1) ndarray, the translation vector connecting LAB to SAMPLE in LAB + tVec_c -- (3, 1) ndarray, the translation vector connecting SAMPLE to CRYSTAL in SAMPLE + + Optional Keyword Arguments: + beamVec -- (3, 1) mdarray containing the incident beam direction components in the LAB FRAME + etaVec -- (3, 1) mdarray containing the reference azimuth direction components in the LAB FRAME + + Outputs: + (n, 2) ndarray containing the (tTh, eta) pairs associated with each (x, y) + (n, 3) ndarray containing the associated G vector directions in the LAB FRAME + associated with gVecs + """ + xy_det = np.ascontiguousarray( np.atleast_2d(xy_det) ) + rMat_d = np.ascontiguousarray( rMat_d ) + rMat_s = np.ascontiguousarray( rMat_s ) + tVec_d = np.ascontiguousarray( tVec_d.flatten() ) + tVec_s = np.ascontiguousarray( tVec_s.flatten() ) + tVec_c = np.ascontiguousarray( tVec_c.flatten() ) + beamVec = np.ascontiguousarray( beamVec.flatten() ) + etaVec = np.ascontiguousarray( etaVec.flatten() ) + return _transforms_CAPI.detectorXYToGvec(xy_det, + rMat_d, rMat_s, + tVec_d, tVec_s, tVec_c, + beamVec, etaVec) + + def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, vInv=None, beamVec=bVec_ref, etaVec=eta_ref): """ @@ -416,6 +463,7 @@ def rowNorm(a): return cnrma def unitRowVector(vecIn): + vecIn = np.ascontiguousarray(vecIn) if vecIn.ndim == 1: return _transforms_CAPI.unitRowVector(vecIn) elif vecIn.ndim == 2: @@ -445,7 +493,7 @@ def makeOscillRotMatArray(chi, omeArray): chi value and an array of omega values. """ arg = np.ascontiguousarray(omeArray) - return _transforms_CAPI.makeOscillRotMatArray(chi, omeArray) + return _transforms_CAPI.makeOscillRotMatArray(chi, arg) def makeRotMatOfExpMap(expMap): """ diff --git a/hexrd/xrd/xrdutil.py b/hexrd/xrd/xrdutil.py index 3bc6cfd4..cf4ebd32 100644 --- a/hexrd/xrd/xrdutil.py +++ b/hexrd/xrd/xrdutil.py @@ -11,9 +11,9 @@ # # Please also see the file LICENSE. # -# This program is free software; you can redistribute it and/or modify it under the -# terms of the GNU Lesser General Public License (as published by the Free Software -# Foundation) version 2.1 dated February 1999. +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY @@ -32,25 +32,27 @@ from math import pi import shelve - +import h5py import numpy as num +from numpy.ctypeslib import ctypes + from scipy import sparse from scipy.linalg import svd from scipy import ndimage -import scipy.optimize as opt import matplotlib -from matplotlib.widgets import Slider, Button, RadioButtons -from matplotlib import cm, colors +from matplotlib.widgets import Slider +from matplotlib import cm from matplotlib import collections +from hexrd import constants from hexrd import plotwrap from hexrd import tens from hexrd import matrixutil as mutil from hexrd import pfigutil from hexrd import gridutil as gutil -from hexrd.valunits import toFloat +from hexrd.valunits import valWUnit from hexrd import USE_NUMBA import hexrd.orientations as ors @@ -71,7 +73,7 @@ from hexrd.xrd import transforms as xf from hexrd.xrd import transforms_CAPI as xfcapi -from hexrd.xrd import distortion +from hexrd.xrd import distortion as distortion_module #from hexrd.cacheframes import get_frames #from hexrd.coreutil import get_instrument_parameters @@ -92,7 +94,7 @@ debugDflt = False -dFunc_ref = distortion.dummy +dFunc_ref = distortion_module.dummy dParams_ref = [] d2r = piby180 = num.pi/180. @@ -102,6 +104,14 @@ ten_epsf = 10 * epsf # ~2.2e-15 sqrt_epsf = num.sqrt(epsf) # ~1.5e-8 +bHat_l_DFLT = constants.beam_vec.flatten() +eHat_l_DFLT = constants.eta_vec.flatten() + +nans_2 = num.nan*num.ones(2) +nans_3 = num.nan*num.ones(3) +nans_6 = num.nan*num.ones(6) + + class FormatEtaOme: 'for plotting data as a matrix, with ijAsXY=True' def __init__(self, etas, omes, A, T=False, debug=False): @@ -497,13 +507,14 @@ def makePathVariantPoles(rMatRef, fromPhase, return qVecList + def displayPathVariants(data, rMatRef, fromPhase, pathList, planeDataDict, detectorGeom, omeMin, omeMax, phaseForDfltPD=None, - markerList = markerListDflt, - hklList = None, + markerList=markerListDflt, + hklList=None, color=None, pointKWArgs={}, hklIDs=None, pw=None): @@ -1095,7 +1106,7 @@ def __display(self, omeEdges, etaEdges, data, nVecs, nP, opacity, rangeVV_w): 'for colorbar, make a mappable so that the range shown is correct' mappable = cm.ScalarMappable(cmap=self.cmap, norm=norm) mappable.set_array(vals) - forColorBar = mappable + #forColorBar = mappable else: pfigR = pfigutil.renderEAProj(nVecsN, vals[northern], nP) @@ -1112,7 +1123,7 @@ def __display(self, omeEdges, etaEdges, data, nVecs, nP, opacity, rangeVV_w): # if opacity is not None: # raise RuntimeError, 'not coded: opacity for non-rendered pole figure, specify integer-valued nP' conn = makeMNConn(len(omeEdges), len(etaEdges), tri=False) - nQuads = conn.shape[1] + #nQuads = conn.shape[1] #nVecsPatches = num.empty([nQuads, 4, 3]) #verts = num.empty([nQuads, 4, 2]) #vals = num.minimum(num.maximum(data[:,:].flatten(), vmin), vmax)# handled with set_clim @@ -1415,7 +1426,8 @@ def save(self, *args, **kwargs): self.p.save(*args, **kwargs) class CollapseOmeEta(object): - """ + """ MARKED FOR DELETION + Can pass a mask to use in addition to whatever the readers are already set up to do; with frames set zero where mask is True @@ -1705,34 +1717,270 @@ def getEtaOmeMaps(self): -class EtaOmeMaps(object): +class GenerateEtaOmeMaps(object): + """ + eta-ome map class derived from new image_series and YAML config + + ...for now... + + must provide: + + self.dataStore + self.planeData + self.iHKLList + self.etaEdges # IN RADIANS + self.omeEdges # IN RADIANS + self.etas # IN RADIANS + self.omegas # IN RADIANS + + """ + def __init__(self, image_series, instrument_params, planeData, active_hkls, + ome_step=0.25, eta_step=None, npdiv=2, threshold=None): + """ + image_series must be OmegaImageSeries class + instrument_params must be a dict (loaded from yaml spec) + active_hkls must be a list (required for now) + """ + # ome and eta steps are in DEGREES + self._ome_step = ome_step + + if eta_step is None: + self._eta_step = abs( + image_series.omega[0, 1] - image_series.omega[0, 0] + ) + else: + self._eta_step = abs(eta_step) # just in case negative... + + # ...TO DO: change name of iHKLList? + self._iHKLList = active_hkls + self._planeData = planeData + + """ + eta range is forced to be [-180, 180] for now, so step must be positive + + step is same as omega unless specified (in degrees) + + ...TO DO: FIX FOR GENERAL RANGE ONCE ETA_PERIOD IS SPEC'D + """ + num_eta = int(360./float(abs(self._eta_step))) + eta_step_r = num.radians(self._eta_step) + self._etas = eta_step_r*(num.arange(num_eta) + 0.5) - num.pi # RADIANS! + self._etaEdges = num.hstack([self._etas - 0.5*eta_step_r, + self._etas[-1] + 0.5*eta_step_r]) + + """ + omegas come from image series directly + """ + # image series omegas have shape (nframes, 2) + self._omegas = num.radians(num.average(image_series.omega, axis=1)) + self._omeEdges = num.radians( + num.concatenate( + [image_series.omega[:, 0].flatten(), image_series.omega[-1, 1].flatten()] + ) + ) + """ + construct patches in place on init + + ...TO DO: rename to 'maps'? + """ + ij_patches = [] + + # grab relevant tolerances for patches + tth_tol = num.degrees(self._planeData.tThWidth) + eta_tol = num.degrees(abs(self._etas[1] - self._etas[0])) + + # grab distortion + if instrument_params['detector']['distortion']['function_name'] is None: + distortion = None + else: + # ...THIS IS STILL A KLUDGE!!!!! + distortion = (xf.dFunc_ref, + num.r_[instrument_params['detector']['distortion']['parameters']] + ) + + # stack parameters + detector_params = num.hstack([ + instrument_params['detector']['transform']['tilt'], + instrument_params['detector']['transform']['translation'], + instrument_params['oscillation_stage']['chi'], + instrument_params['oscillation_stage']['translation'], + ]) + pixel_pitch = instrument_params['detector']['pixels']['size'] + + # 6 detector affine xform parameters + rMat_d = xfcapi.makeDetectorRotMat(detector_params[:3]) + tVec_d = detector_params[3:6] + + # 'dummy' sample frame rot mat + rMat_s = num.eye(3) + tVec_s = num.zeros(3) + + # since making maps for all eta, must hand trivial crystal params + rMat_c = num.eye(3) + tVec_c = num.zeros(3) + + # make full angs list (tth, eta, 0.) + angs = [num.vstack([tth*num.ones(num_eta), + self._etas, + num.zeros(num_eta)]) \ + for tth in self._planeData.getTTh()[active_hkls]] + + for i_ring in range(len(angs)): + # need xy coords and pixel sizes + gVec_ring_l = xfcapi.anglesToGVec(angs[i_ring].T) + xydet_ring = xfcapi.gvecToDetectorXY(gVec_ring_l, + rMat_d, rMat_s, rMat_c, + tVec_d, tVec_s, tVec_c) + + if distortion is not None: + det_xy = distortion[0](xydet_ring, + distortion[1], + invert=True) + ang_ps = angularPixelSize(det_xy, pixel_pitch, + rMat_d, rMat_s, + tVec_d, tVec_s, tVec_c, + distortion=distortion) + + patches = make_reflection_patches(instrument_params, + angs[i_ring].T[:, :2], ang_ps, + omega=None, + tth_tol=tth_tol, eta_tol=eta_tol, + distortion=distortion, + npdiv=npdiv, quiet=False, + compute_areas_func=gutil.compute_areas) + ij_patches.append(patches) + # initialize maps and loop + pbar = ProgressBar( + widgets=[Bar('>'), ' ', ETA(), ' ', ReverseBar('<')], + maxval=len(image_series) + ).start() + maps = num.zeros((len(active_hkls), len(self._omegas), len(self._etas))) + print "maps.shape: ", maps.shape + for i_ome in range(len(image_series)): + pbar.update(i_ome) + this_frame = image_series[i_ome] + if threshold is not None: + this_frame[this_frame < threshold] = 0 + for i_ring in range(len(active_hkls)): + for j_eta in range(num_eta): + ii = ij_patches[i_ring][j_eta][-1][0] + jj = ij_patches[i_ring][j_eta][-1][1] + areas = ij_patches[i_ring][j_eta][-2] + maps[i_ring, i_ome, j_eta] = num.sum(this_frame[ii, jj] * areas / float(num.sum(areas))) + pass # close eta loop + pass # close ring loop + pass # close ome loop + pbar.finish() + self._dataStore = maps + + @property + def dataStore(self): + return self._dataStore + + @property + def planeData(self): + return self._planeData + + @property + def iHKLList(self): + return num.atleast_1d(self._iHKLList).flatten() + + @property + def etaEdges(self): + return self._etaEdges + + @property + def omeEdges(self): + return self._omeEdges + @property + def etas(self): + return self._etas + + @property + def omegas(self): + return self._omegas + + def save(self, filename): + """ + self.dataStore + self.planeData + self.iHKLList + self.etaEdges + self.omeEdges + self.etas + self.omegas + """ + args = num.array(self.planeData.getParams())[:4] + args[2] = valWUnit('wavelength', 'length', args[2], 'angstrom') # force units... + hkls = self.planeData.hkls + + save_dict = {'dataStore':self.dataStore, + 'etas':self.etas, + 'etaEdges':self.etaEdges, + 'iHKLList':self.iHKLList, + 'omegas':self.omegas, + 'omeEdges':self.omeEdges, + 'planeData_args':args, + 'planeData_hkls':hkls, + } + num.savez_compressed(filename, **save_dict) + return + pass # end of class: GenerateEtaOmeMaps + + + +class EtaOmeMaps(object): """ find-orientations loads pickled eta-ome data, but CollapseOmeEta is not pickleable, because it holds a list of ReadGE, each of which holds a reference to an open file object, which is not pickleable. """ - def __init__(self, ome_eta): - self.dataStore = ome_eta.dataStore - self.planeData = ome_eta.planeData - self.iHKLList = ome_eta.iHKLList - self.etaEdges = ome_eta.etaEdges - self.omeEdges = ome_eta.omeEdges - self.etas = ome_eta.etas - self.omegas = ome_eta.omegas - return + def __init__(self, ome_eta_archive): + ome_eta = num.load(ome_eta_archive, allow_pickle=True) + planeData_args = ome_eta['planeData_args'] + planeData_hkls = ome_eta['planeData_hkls'] + self.planeData = crystallography.PlaneData(planeData_hkls, *planeData_args) + + self.dataStore = ome_eta['dataStore'] + self.iHKLList = ome_eta['iHKLList'] + self.etaEdges = ome_eta['etaEdges'] + self.omeEdges = ome_eta['omeEdges'] + self.etas = ome_eta['etas'] + self.omegas = ome_eta['omegas'] + + return + pass # end of class: EtaOmeMaps + +# obselete # class EtaOmeMaps(object): +# obselete # +# obselete # """ +# obselete # find-orientations loads pickled eta-ome data, but CollapseOmeEta is not +# obselete # pickleable, because it holds a list of ReadGE, each of which holds a +# obselete # reference to an open file object, which is not pickleable. +# obselete # """ +# obselete # +# obselete # def __init__(self, ome_eta): +# obselete # self.dataStore = ome_eta.dataStore +# obselete # self.planeData = ome_eta.planeData +# obselete # self.iHKLList = ome_eta.iHKLList +# obselete # self.etaEdges = ome_eta.etaEdges +# obselete # self.omeEdges = ome_eta.omeEdges +# obselete # self.etas = ome_eta.etas +# obselete # self.omegas = ome_eta.omegas +# obselete # return # not ready # class BaseEtaOme(object): # not ready # """ # not ready # eta-ome map base class derived from new YAML config -# not ready # +# not ready # # not ready # ...for now... -# not ready # +# not ready # # not ready # must provide: -# not ready # +# not ready # # not ready # self.dataStore # not ready # self.planeData # not ready # self.iHKLList @@ -1740,7 +1988,7 @@ def __init__(self, ome_eta): # not ready # self.omeEdges # IN RADIANS # not ready # self.etas # IN RADIANS # not ready # self.omegas # IN RADIANS -# not ready # +# not ready # # not ready # This wrapper will provide all but dataStore. # not ready # """ # not ready # def __init__(self, cfg, reader=None, eta_step=None): @@ -1750,34 +1998,34 @@ def __init__(self, ome_eta): # not ready # """ # not ready # self.cfg = cfg # not ready # self.instr_cfg = get_instrument_parameters(cfg) -# not ready # +# not ready # # not ready # # currently hard-coded to do reader from npz frame cache # not ready # # kwarg *MUST* be 'new' style reader # not ready # if reader is None: # not ready # self.__reader = get_frames(reader, self.cfg) # not ready # else: # not ready # self.__reader = reader -# not ready # +# not ready # # not ready # # set eta_step IN DEGREES # not ready # if eta_step is None: # not ready # self._eta_step = self.cfg.image_series.omega.step # not ready # else: # not ready # self._eta_step = abs(eta_step) # just in case negative... -# not ready # +# not ready # # not ready # material_list = cPickle.load(open(cfg.material.definitions, 'r')) # not ready # material_names = [material_list[i].name for i in range(len(material_list))] # not ready # material_dict = dict(zip(material_names, material_list)) # not ready # self.planeData = material_dict[cfg.material.active].planeData -# not ready # +# not ready # # not ready # self._iHKLList = None -# not ready # +# not ready # # not ready # self._etaEdges = None # not ready # self._omeEdges = None # not ready # self._etas = None # not ready # self._omegas = None -# not ready # +# not ready # # not ready # return -# not ready # +# not ready # # not ready # @property # not ready # def iHKLList(self): # not ready # return self._iHKLList @@ -1788,7 +2036,7 @@ def __init__(self, ome_eta): # not ready # """ # not ready # if ids is not None: # not ready # assert hasattr(ids, '__len__'), "ids must be a list or list-like object" -# not ready # +# not ready # # not ready # # start with all available # not ready # active_hkls = range(pd.hkls.shape[1]) # not ready # # check cfg file @@ -1797,7 +2045,7 @@ def __init__(self, ome_eta): # not ready # active_hkls = active_hkls if temp == 'all' else temp # not ready # # override with hkls from command line, if specified # not ready # return ids if ids is not None else active_hkls -# not ready # +# not ready # # not ready # @property # not ready # def omegas(self): # not ready # return self._omegas @@ -1810,11 +2058,11 @@ def __init__(self, ome_eta): # not ready # ome_start = self.__reader[1][0] # not ready # ome_step = self.__reader[1][1] # not ready # return ome_step*(num.arange(num_ome) + 0.5) + ome_start -# not ready # +# not ready # # not ready # @property # not ready # def eta_step(self): # not ready # return self._eta_step -# not ready # +# not ready # # not ready # @property # not ready # def etas(self): # not ready # return self._etas @@ -1822,12 +2070,12 @@ def __init__(self, ome_eta): # not ready # def etas(self): # not ready # """ # not ready # range is forced to be [-180, 180] for now, so step must be positive -# not ready # +# not ready # # not ready # step is same as omega unless specified (in degrees) # not ready # """ # not ready # num_eta = int(360/float(abs(self.eta_step))) # not ready # return num.radians(self.eta_step)*(num.arange(num_eta) + 0.5) - num.pi -# not ready # +# not ready # # not ready # @property # not ready # def omeEdges(self): # not ready # return self._omeEdges @@ -1835,28 +2083,28 @@ def __init__(self, ome_eta): # not ready # def omeEdges(self): # not ready # ome_step = self.omegas[1] - self.omegas[0] # same as self.__reader[1][1] # not ready # return num.hstack([self.omegas - 0.5*ome_step, self.omegas[-1] + 0.5*ome_step]) -# not ready # +# not ready # # not ready # @property # not ready # def etaEdges(self): # not ready # return self._etaEdges # not ready # @etaEdges.getter # not ready # def etaEdges(self): # not ready # return num.hstack([self.etas - 0.5*eta_step, self.etas[-1] + 0.5*eta_step]) -# not ready # +# not ready # # not ready # class EtaOmeMaps(BaseEtaOme): # not ready # """ # not ready # """ # not ready # def __init__(self, cfg, reader=None, eta_step=None, # not ready # omega=0., tVec_s=num.zeros(3), # not ready # npdiv=2): -# not ready # +# not ready # # not ready # # first init the base class # not ready # super( EtaOmeMaps, self ).__init__(cfg, reader=reader, eta_step=eta_step) -# not ready # +# not ready # # not ready # # grac relevant tolerances for patches # not ready # tth_tol = num.degrees(self.planeData.tThWidth) # not ready # eta_tol = num.degrees(abs(self.etas[1]-self.etas[0])) -# not ready # +# not ready # # not ready # # grab distortion # not ready # if instr_cfg['detector']['distortion']['function_name'] is None: # not ready # distortion = None @@ -1867,35 +2115,35 @@ def __init__(self, ome_eta): # not ready # ) # not ready # # stack parameters # not ready # detector_params = num.hstack([ -# not ready # instr_cfg['detector']['transform']['tilt_angles'], -# not ready # instr_cfg['detector']['transform']['t_vec_d'], +# not ready # instr_cfg['detector']['transform']['tilt'], +# not ready # instr_cfg['detector']['transform']['translation'], # not ready # instr_cfg['oscillation_stage']['chi'], -# not ready # instr_cfg['oscillation_stage']['t_vec_s'], +# not ready # instr_cfg['oscillation_stage']['translation'], # not ready # ]) # not ready # pixel_pitch = instr_cfg['detector']['pixels']['size'] # not ready # chi = self.instr_cfg['oscillation_stage']['chi'] # in DEGREES -# not ready # +# not ready # # not ready # # 6 detector affine xform parameters # not ready # rMat_d = makeDetectorRotMat(detector_params[:3]) # not ready # tVec_d = detector_params[3:6] -# not ready # +# not ready # # not ready # # 'dummy' sample frame rot mat # not ready # rMats_s = makeOscillRotMat(num.radians([chi, omega])) -# not ready # +# not ready # # not ready # # since making maps for all eta, must hand trivial crystal params # not ready # rMat_c = np.eye(3) # not ready # tVec_c = np.zeros(3) -# not ready # +# not ready # # not ready # # make angle arrays for patches # not ready # neta = len(self.etas) # not ready # nome = len(reader[0]) -# not ready # +# not ready # # not ready # # make full angs list # not ready # angs = [num.vstack([tth*num.ones(neta), # not ready # etas, # not ready # num.zeros(nome)]) # not ready # for tth in self.planeData.getTTh()] -# not ready # +# not ready # # not ready # """SET MAPS CONTAINER AS ATTRIBUTE""" # not ready # self.dataStore = num.zeros((len(angs), nome, neta)) # not ready # for i_ring in range(len(angs)): @@ -1904,7 +2152,7 @@ def __init__(self, ome_eta): # not ready # xydet_ring = xfcapi.gvecToDetectorXY(gVec_ring_l, # not ready # rMat_d, rMat_s, rMat_c, # not ready # tVec_d, tVec_s, tVec_c) -# not ready # +# not ready # # not ready # if distortion is not None: # not ready # det_xy = distortion[0](xydet_ring, # not ready # distortion[1], @@ -1913,7 +2161,7 @@ def __init__(self, ome_eta): # not ready # rMat_d, rMat_s, # not ready # tVec_d, tVec_s, tVec_c, # not ready # distortion=distortion) -# not ready # +# not ready # # not ready # patches = make_reflection_patches(self.instr_cfg, # not ready # angs[i_ring].T[:, :2], ang_ps, # not ready # omega=None, @@ -1921,7 +2169,7 @@ def __init__(self, ome_eta): # not ready # distortion=distortion, # not ready # npdiv=npdiv, quiet=False, # not ready # compute_areas_func=gutil.compute_areas) -# not ready # +# not ready # # not ready # for i in range(nome): # not ready # this_frame = num.array(reader[0][i].todense()) # not ready # for j in range(neta): @@ -3353,27 +3601,35 @@ def _filter_hkls_eta_ome(hkls, angles, eta_range, ome_range): def _project_on_detector_plane(allAngs, rMat_d, rMat_c, chi, - tVec_d, tVec_c, tVec_s, distortion): - # hkls not needed # gVec_cs = num.dot(bMat, allHKLs.T) - gVec_cs = xfcapi.anglesToGVec( - allAngs, chi=chi, rMat_c=rMat_c - ) - rMat_ss = xfcapi.makeOscillRotMatArray( - chi, num.ascontiguousarray(allAngs[:,2]) - ) + tVec_d, tVec_c, tVec_s, + distortion, + beamVec=constants.beam_vec): + """ + utility routine for projecting a list of (tth, eta, ome) onto the + detector plane parameterized by the args + """ + gVec_cs = xfcapi.anglesToGVec(allAngs, + chi=chi, + rMat_c=rMat_c, + bHat_l=beamVec) + + rMat_ss = xfcapi.makeOscillRotMatArray(chi, allAngs[:, 2]) + tmp_xys = xfcapi.gvecToDetectorXYArray( gVec_cs, rMat_d, rMat_ss, rMat_c, - tVec_d, tVec_s, tVec_c - ) - valid_mask = ~(num.isnan(tmp_xys[:,0]) | num.isnan(tmp_xys[:,1])) + tVec_d, tVec_s, tVec_c, + beamVec=beamVec) - if distortion is None or len(distortion) == 0: - det_xy = tmp_xys[valid_mask] - else: - det_xy = distortion[0](tmp_xys[valid_mask], + valid_mask = ~(num.isnan(tmp_xys[:, 0]) | num.isnan(tmp_xys[:, 1])) + + det_xy = num.atleast_2d(tmp_xys[valid_mask, :]) + + # FIXME: distortion kludge + if distortion is not None and len(distortion) == 2: + det_xy = distortion[0](det_xy, distortion[1], invert=True) - return det_xy, rMat_ss[-1] + return det_xy, rMat_ss, valid_mask def simulateGVecs(pd, detector_params, grain_params, @@ -3446,7 +3702,7 @@ def simulateGVecs(pd, detector_params, grain_params, ang_ps = [] else: #...preallocate for speed...? - det_xy, rMat_s = _project_on_detector_plane( + det_xy, rMat_s, on_plane = _project_on_detector_plane( allAngs, rMat_d, rMat_c, chi, tVec_d, tVec_c, tVec_s, @@ -3653,10 +3909,11 @@ def _compute_max(tth, eta, result): return result - def angularPixelSize(xy_det, xy_pixelPitch, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - distortion=None, beamVec=None, etaVec=None): + def angularPixelSize( + xy_det, xy_pixelPitch, + rMat_d, rMat_s, + tVec_d, tVec_s, tVec_c, + distortion=None, beamVec=None, etaVec=None): """ * choices to beam vector and eta vector specs have been supressed * assumes xy_det in UNWARPED configuration @@ -3761,13 +4018,12 @@ def _coo_build_window(frame_i, min_row, max_row, min_col, max_col): return window -def make_reflection_patches(instr_cfg, tth_eta, ang_pixel_size, - omega=None, +def make_reflection_patches(instr_cfg, + tth_eta, ang_pixel_size, omega=None, tth_tol=0.2, eta_tol=1.0, - rMat_c=num.eye(3), tVec_c=num.zeros((3, 1)), - distortion=None, - npdiv=1, quiet=False, compute_areas_func=gutil.compute_areas, - beamVec=None): + rmat_c=num.eye(3), tvec_c=num.zeros((3, 1)), + npdiv=1, quiet=False, + compute_areas_func=gutil.compute_areas): """ prototype function for making angular patches on a detector @@ -3775,7 +4031,7 @@ def make_reflection_patches(instr_cfg, tth_eta, ang_pixel_size, pixel_pitch is [row_size, column_size] in mm - DISTORTION HANDING IS STILL A KLUDGE + FIXME: DISTORTION HANDING IS STILL A KLUDGE!!! patches are: @@ -3790,14 +4046,21 @@ def make_reflection_patches(instr_cfg, tth_eta, ang_pixel_size, t | x | x | x | ... | x | x | x | a ------------- ... ------------- + outputs are: + (tth_vtx, eta_vtx), + (x_vtx, y_vtx), + connectivity, + subpixel_areas, + (x_center, y_center), + (i_row, j_col) """ npts = len(tth_eta) - # detector frame - rMat_d = xfcapi.makeDetectorRotMat( - instr_cfg['detector']['transform']['tilt_angles'] + # detector quantities + rmat_d = xfcapi.makeRotMatOfExpMap( + num.r_[instr_cfg['detector']['transform']['tilt']] ) - tVec_d = num.r_[instr_cfg['detector']['transform']['t_vec_d']] + tvec_d = num.r_[instr_cfg['detector']['transform']['translation']] pixel_size = instr_cfg['detector']['pixels']['size'] frame_nrows = instr_cfg['detector']['pixels']['rows'] @@ -3805,21 +4068,38 @@ def make_reflection_patches(instr_cfg, tth_eta, ang_pixel_size, panel_dims = ( -0.5*num.r_[frame_ncols*pixel_size[1], frame_nrows*pixel_size[0]], - 0.5*num.r_[frame_ncols*pixel_size[1], frame_nrows*pixel_size[0]] + 0.5*num.r_[frame_ncols*pixel_size[1], frame_nrows*pixel_size[0]] + ) + row_edges = num.arange(frame_nrows + 1)[::-1]*pixel_size[1] \ + + panel_dims[0][1] + col_edges = num.arange(frame_ncols + 1)*pixel_size[0] \ + + panel_dims[0][0] + + # grab distortion + # FIXME: distortion function is still hard-coded here + try: + dfunc_name = instr_cfg['detector']['distortion']['function_name'] + except(KeyError): + dfunc_name = None + + if dfunc_name is None: + distortion = None + else: + # !!!: warning -- hard-coded distortion + distortion = ( + distortion_module.GE_41RT, + num.r_[instr_cfg['detector']['distortion']['parameters']] ) - row_edges = num.arange(frame_nrows + 1)[::-1]*pixel_size[1] + panel_dims[0][1] - col_edges = num.arange(frame_ncols + 1)*pixel_size[0] + panel_dims[0][0] # sample frame chi = instr_cfg['oscillation_stage']['chi'] - tVec_s = num.r_[instr_cfg['oscillation_stage']['t_vec_s']] + tvec_s = num.r_[instr_cfg['oscillation_stage']['translation']] # beam vector - if beamVec is None: - beamVec = xfcapi.bVec_ref - + bvec = num.r_[instr_cfg['beam']['vector']] + # data to loop - # ...WOULD IT BE CHEAPER TO CARRY ZEROS OR USE CONDITIONAL? + # ??? WOULD IT BE CHEAPER TO CARRY ZEROS OR USE CONDITIONAL? if omega is None: full_angs = num.hstack([tth_eta, num.zeros((npts, 1))]) else: @@ -3827,88 +4107,93 @@ def make_reflection_patches(instr_cfg, tth_eta, ang_pixel_size, patches = [] for angs, pix in zip(full_angs, ang_pixel_size): - ndiv_tth = npdiv*num.ceil( tth_tol/num.degrees(pix[0]) ) - ndiv_eta = npdiv*num.ceil( eta_tol/num.degrees(pix[1]) ) - - tth_del = num.arange(0, ndiv_tth+1)*tth_tol/float(ndiv_tth) - 0.5*tth_tol - eta_del = num.arange(0, ndiv_eta+1)*eta_tol/float(ndiv_eta) - 0.5*eta_tol - - # store dimensions for convenience - # * etas and tths are bin vertices, ome is already centers - sdims = [ len(eta_del)-1, len(tth_del)-1 ] + # calculate bin edges for patch based on local angular pixel size + # tth + ntths, tth_edges = gutil.make_tolerance_grid( + bin_width=num.degrees(pix[0]), + window_width=tth_tol, + num_subdivisions=npdiv + ) + + # eta + netas, eta_edges = gutil.make_tolerance_grid( + bin_width=num.degrees(pix[1]), + window_width=eta_tol, + num_subdivisions=npdiv + ) + + # FOR ANGULAR MESH + conn = gutil.cellConnectivity( + netas, + ntths, + origin='ll' + ) # meshgrid args are (cols, rows), a.k.a (fast, slow) - m_tth, m_eta = num.meshgrid(tth_del, eta_del) - npts_patch = m_tth.size + m_tth, m_eta = num.meshgrid(tth_edges, eta_edges) + npts_patch = m_tth.size # calculate the patch XY coords from the (tth, eta) angles - # * will CHEAT and ignore the small perturbation the different - # omega angle values causes and simply use the central value + # !!! will CHEAT and ignore the small perturbation the different + # omega angle values causes and simply use the central value gVec_angs_vtx = num.tile(angs, (npts_patch, 1)) \ - + num.radians( - num.vstack([m_tth.flatten(), - m_eta.flatten(), - num.zeros(npts_patch) - ]).T - ) - - # FOR ANGULAR MESH - conn = gutil.cellConnectivity( sdims[0], sdims[1], origin='ll') + + num.radians( + num.vstack([m_tth.flatten(), + m_eta.flatten(), + num.zeros(npts_patch) + ]).T + ) - rMat_s = xfcapi.makeOscillRotMat([chi, angs[2]]) - - # make G-vectors - gVec_c = xfcapi.anglesToGVec( - gVec_angs_vtx, - chi=chi, - rMat_c=rMat_c, - bHat_l=beamVec) - xy_eval_vtx = xfcapi.gvecToDetectorXY( - gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec=beamVec) - if distortion is not None and len(distortion) == 2: - xy_eval_vtx = distortion[0](xy_eval_vtx, distortion[1], invert=True) - pass + xy_eval_vtx, rmats_s, on_plane = _project_on_detector_plane( + gVec_angs_vtx, + rmat_d, rmat_c, + chi, + tvec_d, tvec_c, tvec_s, + distortion, + beamVec=bvec) areas = compute_areas_func(xy_eval_vtx, conn) # EVALUATION POINTS - # * for lack of a better option will use centroids - tth_eta_cen = gutil.cellCentroids( num.atleast_2d(gVec_angs_vtx[:, :2]), conn ) - gVec_angs = num.hstack([tth_eta_cen, - num.tile(angs[2], (len(tth_eta_cen), 1))]) - gVec_c = xfcapi.anglesToGVec( - gVec_angs, - chi=chi, - rMat_c=rMat_c, - bHat_l=beamVec) - xy_eval = xfcapi.gvecToDetectorXY( - gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec=beamVec) - if distortion is not None and len(distortion) == 2: - xy_eval = distortion[0](xy_eval, distortion[1], invert=True) - pass - row_indices = gutil.cellIndices(row_edges, xy_eval[:, 1]) - col_indices = gutil.cellIndices(col_edges, xy_eval[:, 0]) + # !!! for lack of a better option will use centroids + tth_eta_cen = gutil.cellCentroids( + num.atleast_2d(gVec_angs_vtx[:, :2]), + conn + ) + + gVec_angs = num.hstack( + [tth_eta_cen, + num.tile(angs[2], (len(tth_eta_cen), 1))] + ) + + xy_eval, rmats_s, on_plane = _project_on_detector_plane( + gVec_angs, + rmat_d, rmat_c, + chi, + tvec_d, tvec_c, tvec_s, + distortion, + beamVec=bvec) + + row_indices = gutil.cellIndices(row_edges, xy_eval[:, 1]) + col_indices = gutil.cellIndices(col_edges, xy_eval[:, 0]) # append patch data to list - patches.append(((gVec_angs_vtx[:, 0].reshape(m_tth.shape), - gVec_angs_vtx[:, 1].reshape(m_tth.shape)), - (xy_eval_vtx[:, 0].reshape(m_tth.shape), - xy_eval_vtx[:, 1].reshape(m_tth.shape)), - conn, - areas.reshape(sdims[0], sdims[1]), - (row_indices.reshape(sdims[0], sdims[1]), - col_indices.reshape(sdims[0], sdims[1])) - ) - ) - pass + patches.append( + ((gVec_angs_vtx[:, 0].reshape(m_tth.shape), + gVec_angs_vtx[:, 1].reshape(m_tth.shape)), + (xy_eval_vtx[:, 0].reshape(m_tth.shape), + xy_eval_vtx[:, 1].reshape(m_tth.shape)), + conn, + areas.reshape(netas, ntths), + (xy_eval[:, 0].reshape(netas, ntths), + xy_eval[:, 1].reshape(netas, ntths)), + (row_indices.reshape(netas, ntths), + col_indices.reshape(netas, ntths))) + ) + pass # close loop over angles return patches + def pullSpots(pd, detector_params, grain_params, reader, ome_period=(-num.pi, num.pi), eta_range=[(-num.pi, num.pi), ], @@ -3920,7 +4205,8 @@ def pullSpots(pd, detector_params, grain_params, reader, npdiv=1, threshold=10, doClipping=False, filename=None, save_spot_list=False, use_closest=True, - quiet=True): + discard_at_bounds=True, + quiet=True, output_hdf5=True): """ Function for pulling spots from a reader object for specific detector panel and crystal specifications @@ -3986,6 +4272,8 @@ def pullSpots(pd, detector_params, grain_params, reader, iframe = num.arange(0, nframes) + # If this is 0, then scan covers a full 360 + # !!! perhaps need to catch roundoff better? full_range = xf.angularDifference(ome_range[0], ome_range[1]) if ome_tol <= 0.5*r2d*abs(del_ome): @@ -4023,6 +4311,11 @@ def pullSpots(pd, detector_params, grain_params, reader, "pred tth \tpred eta \t pred ome \t" + \ "meas tth \tmeas eta \t meas ome \t" + \ "meas X \tmeas Y \t meas ome\n#" + if output_hdf5: + # !!! this is a bit kludgey as it puts constraints on the filename... + h5fname = fid.name.split('.')[0] + gw = GrainDataWriter_h5(h5fname, detector_params, grain_params) + iRefl = 0 spot_list = [] for hklid, hkl, angs, xy, pix in zip(*sim_g): @@ -4043,7 +4336,7 @@ def pullSpots(pd, detector_params, grain_params, reader, # store dimensions for convenience # * etas and tths are bin vertices, ome is already centers - sdims = [ len(ome_del), len(eta_del)-1, len(tth_del)-1 ] + sdims = [ len(ome_del), len(eta_edges)-1, len(tth_del)-1 ] # meshgrid args are (cols, rows), a.k.a (fast, slow) m_tth, m_eta = num.meshgrid(tth_del, eta_del) @@ -4105,12 +4398,22 @@ def pullSpots(pd, detector_params, grain_params, reader, patch_i = patch_i.flatten(); patch_j = patch_j.flatten() # read frame in, splitting reader if necessary + # !!! in cases without full ranges, perhaps skip spot completely if ome_tol falls outside ranges? split_reader = False if min(frame_indices) < 0: if full_range > 0: - reidx = num.where(frame_indices >= 0)[0] - sdims[0] = len(reidx) - frame_indices = frame_indices[reidx] + if discard_at_bounds: + # our tol box has run outside scan range + # !!! in this case we will DISCARD the spot + if not quiet: + print "(%d, %d, %d): window falls below omega range; skipping..." \ + % tuple(hkl) + continue + else: + reidx = num.where(frame_indices >= 0)[0] + sdims[0] = len(reidx) + ome_centers = ome_centers[reidx] + frame_indices = frame_indices[reidx] elif full_range == 0: split_reader = True reidx1 = num.where(frame_indices < 0)[0] @@ -4119,9 +4422,18 @@ def pullSpots(pd, detector_params, grain_params, reader, oidx2 = frame_indices[reidx2] if max(frame_indices) >= nframes: if full_range > 0: - reidx = num.where(frame_indices < nframes)[0] - sdims[0] = len(reidx) - frame_indices = frame_indices[reidx] + if discard_at_bounds: + # our tol box has run outside scan range + # !!! in this case we will DISCARD the spot + if not quiet: + print "(%d, %d, %d): window falls above omega range; skipping..." \ + % tuple(hkl) + continue + else: + reidx = num.where(frame_indices < nframes)[0] + sdims[0] = len(reidx) + ome_centers = ome_centers[reidx] + frame_indices = frame_indices[reidx] elif full_range == 0: split_reader = True reidx1 = num.where(frame_indices < nframes)[0] @@ -4136,7 +4448,6 @@ def pullSpots(pd, detector_params, grain_params, reader, frames = num.hstack([f1, f2]) else: frames = reader[0][frame_indices[0]:sdims[0]+frame_indices[0]] - else: rdr = reader.makeNew() if split_reader: @@ -4254,6 +4565,7 @@ def pullSpots(pd, detector_params, grain_params, reader, spot_intensity = num.sum(spot_data[labels == 1]) max_intensity = num.max(spot_data[labels == 1]) pass + if coms is not None: com_angs = num.array([tth_edges[0] + (0.5 + coms[2])*delta_tth, eta_edges[0] + (0.5 + coms[1])*delta_eta, @@ -4275,9 +4587,32 @@ def pullSpots(pd, detector_params, grain_params, reader, spot_intensity = num.nan max_intensity = num.nan pass + + # ################################# + # generate PREDICTED xy coords + rMat_s = xfcapi.makeOscillRotMat([chi, angs[2]]) + gVec_c = xf.anglesToGVec(num.atleast_2d(angs), bVec, eVec, + rMat_s=rMat_s, rMat_c=rMat_c) + # these are on ``ideal'' detector + pxy = xfcapi.gvecToDetectorXY( + gVec_c.T, + rMat_d, rMat_s, rMat_c, + tVec_d, tVec_s, tVec_c + ).flatten() + # apply inverser distortion (if provided) + if distortion is not None and len(distortion) == 2: + pxy = distortion[0]( + num.atleast_2d(new_xy), + distortion[1], + invert=True + ).flatten() # + # ################################# + + # ===================================================================== # OUTPUT - # + # ===================================================================== + # output dictionary if save_spot_list: w_dict = {} @@ -4303,28 +4638,61 @@ def pullSpots(pd, detector_params, grain_params, reader, spot_list.append(w_dict) pass if filename is not None: + nans_tabbed_12_2 = '{:^12}\t{:^12}\t' + nans_tabbed_18_6 = '{:^18}\t{:^18}\t{:^18}\t{:^18}\t{:^18}\t{:^18}' + output_str = \ + '{:<6d}\t{:<6d}\t'.format(int(peakId), int(hklid)) + \ + '{:<3d}\t{:<3d}\t{:<3d}\t'.format(*num.array(hkl, dtype=int)) if peakId >= 0: - print >> fid, "%d\t%d\t" % (peakId, hklid) + \ - "%d\t%d\t%d\t" % tuple(hkl) + \ - "%1.6e\t%1.6e\t" % (spot_intensity, max_intensity) + \ - "%1.12e\t%1.12e\t%1.12e\t" % tuple(angs) + \ - "%1.12e\t%1.12e\t%1.12e\t" % tuple(com_angs) + \ - "%1.12e\t%1.12e\t%1.12e" % (new_xy[0], new_xy[1], com_angs[2]) + output_str += \ + '{:<1.6e}\t{:<1.6e}\t'.format(spot_intensity, max_intensity) + \ + '{:<1.12e}\t{:<1.12e}\t{:<1.12e}\t'.format(*angs) + \ + '{:<1.12e}\t{:<1.12e}\t{:<1.12e}\t'.format(*com_angs) + \ + '{:<1.12e}\t{:<1.12e}\t{:<1.12e}'.format(new_xy[0], new_xy[1], com_angs[2]) + #print >> fid, "%d\t%d\t" % (peakId, hklid) + \ + # "%d\t%d\t%d\t" % tuple(hkl) + \ + # "%1.6e\t%1.6e\t" % (spot_intensity, max_intensity) + \ + # "%1.12e\t%1.12e\t%1.12e\t" % tuple(angs) + \ + # "%1.12e\t%1.12e\t%1.12e\t" % tuple(com_angs) + \ + # "%1.12e\t%1.12e\t%1.12e" % (new_xy[0], new_xy[1], com_angs[2]) else: - print >> fid, "%d\t%d\t" % (peakId, hklid) + \ - "%d\t%d\t%d\t" % tuple(hkl) + \ - "%f \t%f \t" % tuple(num.nan*num.ones(2)) + \ - "%1.12e\t%1.12e\t%1.12e\t" % tuple(angs) + \ - "%f \t%f \t%f" % tuple(num.nan*num.ones(3)) + \ - " \t%f \t%f \t%f" % tuple(num.nan*num.ones(3)) + output_str += \ + nans_tabbed_12_2.format(*nans_2) + \ + '{:<1.12e}\t{:<1.12e}\t{:<1.12e}\t'.format(*angs) + \ + nans_tabbed_18_6.format(*nans_6) + #print >> fid, "%d\t%d\t" % (peakId, hklid) + \ + # "%d\t%d\t%d\t" % tuple(hkl) + \ + # "%f \t%f \t" % tuple(nans_2) + \ + # "%1.12e\t%1.12e\t%1.12e\t" % tuple(angs) + \ + # "%f \t%f \t%f" % tuple(nans_3) + \ + # " \t%f \t%f \t%f" % tuple(nans_3) + pass + print >> fid, output_str + + if output_hdf5: + if peakId < 0: + mangs = nans_3 + mxy = nans_2 + else: + mangs = com_angs + mxy = new_xy + ijs = num.stack([row_indices, col_indices]) + gw.dump_patch( + iRefl, peakId, hklid, hkl, + tth_eta_cen[:, 0], tth_eta_cen[:, 1], ome_centers, + xy_eval, ijs, frame_indices, + spot_data, angs, pxy, mangs, mxy, gzip=9) pass pass iRefl += 1 pass - if filename is not None: fid.close() - + if filename is not None: + fid.close() + if output_hdf5: + gw.close() return spot_list + def extract_detector_transformation(detector_params): """ goes from 10 vector of detector parames OR instrument config dictionary @@ -4332,16 +4700,121 @@ def extract_detector_transformation(detector_params): """ # extract variables for convenience if isinstance(detector_params, dict): rMat_d = xfcapi.makeDetectorRotMat( - instr_cfg['detector']['transform']['tilt_angles'] + detector_params['detector']['transform']['tilt'] ) - tVec_d = num.r_[instr_cfg['detector']['transform']['t_vec_d']] - chi = instr_cfg['oscillation_stage']['chi'] - tVec_s = num.r_[instr_cfg['oscillation_stage']['t_vec_s']] + tVec_d = num.r_[detector_params['detector']['transform']['translation']] + chi = detector_params['oscillation_stage']['chi'] + tVec_s = num.r_[detector_params['oscillation_stage']['translation']] else: assert len(detector_params >= 10), \ "list of detector parameters must have length >= 10" rMat_d = xfcapi.makeDetectorRotMat(detector_params[:3]) tVec_d = num.ascontiguousarray(detector_params[3:6]) - chi = detector_params[6] + chi = detector_params[6] tVec_s = num.ascontiguousarray(detector_params[7:10]) return rMat_d, tVec_d, chi, tVec_s + + +class GrainDataWriter_h5(object): + """ + TODO: add material spec + """ + def __init__(self, filename, detector_params, grain_params): + #use_attr = True + if isinstance(filename, h5py.File): + self.fid = filename + else: + self.fid = h5py.File(filename + '.hdf5', 'w') + + # add instrument groups and attributes + self.instr_grp = self.fid.create_group('instrument') + rMat_d, tVec_d, chi, tVec_s = extract_detector_transformation(detector_params) + #self.instr_grp.attrs.create('rmat_d', rMat_d) + #self.instr_grp.attrs.create('tvec_d', tVec_d.flatten()) + #self.instr_grp.attrs.create('chi', chi) + #self.instr_grp.attrs.create('tvec_s', tVec_s.flatten()) + self.instr_grp.create_dataset('rmat_d', data=rMat_d) + self.instr_grp.create_dataset('tvec_d', data=tVec_d.flatten()) + self.instr_grp.create_dataset('chi', data=chi) + self.instr_grp.create_dataset('tvec_s', data=tVec_s.flatten()) + + + + self.grain_grp = self.fid.create_group('grain') + rMat_c = xfcapi.makeRotMatOfExpMap(grain_params[:3]) + tVec_c = num.array(grain_params[3:6]).flatten() + vInv_s = num.array(grain_params[6:]).flatten() + vMat_s = num.linalg.inv(mutil.vecMVToSymm(vInv_s)) + #self.grain_grp.attrs.create('rmat_c', rMat_c) + #self.grain_grp.attrs.create('tvec_c', tVec_c.flatten()) + #self.grain_grp.attrs.create('inv(V)_s', vInv_s) + #self.grain_grp.attrs.create('vmat_s', vMat_s) + self.grain_grp.create_dataset('rmat_c', data=rMat_c) + self.grain_grp.create_dataset('tvec_c', data=tVec_c.flatten()) + self.grain_grp.create_dataset('inv(V)_s', data=vInv_s) + self.grain_grp.create_dataset('vmat_s', data=vMat_s) + + # add grain parameter + data_key = 'reflection_data' + self.data_grp = self.fid.create_group(data_key) + + # FIXME: throws exception when called after close method + # def __del__(self): + # self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, + i_refl, peak_id, hkl_id, hkl, + tth_centers, eta_centers, ome_centers, + xy_centers, ijs, frame_indices, + spot_data, pangs, pxy, mangs, mxy, gzip=2): + """ + to be called inside loop over patches + + default GZIP level for data arrays is 2 + """ + + # create spot group + spot_grp = self.data_grp.create_group("spot_%05d" % i_refl) + spot_grp.attrs.create('peak_id', peak_id) + spot_grp.attrs.create('hkl_id', hkl_id) + spot_grp.attrs.create('hkl', hkl) + spot_grp.attrs.create('predicted_angles', pangs) + spot_grp.attrs.create('predicted_xy', pxy) + if mangs is None: + mangs = num.nan*num.ones(3) + spot_grp.attrs.create('measured_angles', mangs) + if mxy is None: + mxy = num.nan*num.ones(3) + spot_grp.attrs.create('measured_xy', mxy) + + # get centers crds from edge arrays + ome_dim, eta_dim, tth_dim = spot_data.shape + + tth_crd = tth_centers.reshape(eta_dim, tth_dim) + eta_crd = eta_centers.reshape(eta_dim, tth_dim) + ome_crd = num.tile( + ome_centers, (eta_dim*tth_dim, 1) + ).T.reshape(ome_dim, eta_dim, tth_dim) + + # make datasets + spot_grp.create_dataset('tth_crd', data=tth_crd, + compression="gzip", compression_opts=gzip) + spot_grp.create_dataset('eta_crd', data=eta_crd, + compression="gzip", compression_opts=gzip) + spot_grp.create_dataset('ome_crd', data=ome_crd, + compression="gzip", compression_opts=gzip) + spot_grp.create_dataset('xy_centers', + data=xy_centers.T.reshape(2, eta_dim, tth_dim), + compression="gzip", compression_opts=gzip) + spot_grp.create_dataset('ij_centers', + data=ijs.reshape(2, eta_dim, tth_dim), + compression="gzip", compression_opts=gzip) + spot_grp.create_dataset('frame_indices', + data=num.array(frame_indices, dtype=int), + compression="gzip", compression_opts=gzip) + spot_grp.create_dataset('intensities', data=spot_data, + compression="gzip", compression_opts=gzip) + return diff --git a/scripts/VirtualDiffraction.py b/scripts/VirtualDiffraction.py deleted file mode 100644 index 26a1eda1..00000000 --- a/scripts/VirtualDiffraction.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Mon Nov 9 11:03:24 2015 - -@author: pagan2 -""" - -#%% - -import numpy as np -import hexrd.xrd.material as mat -import hexrd.xrd.crystallography as crys -import hexrd.xrd.transforms_CAPI as trans -import multiprocessing as mp - - -#%% - - -material=mat.Material() -material.beamEnergy=15 -material.sgnum=227 -material.latticeParameters=[5.4310,] -material.name='Silicon' - -#%% - -samplePos=np.array([[0],[0],[0]]) -crysPos=np.array([[0],[0],[0]]) -rMat_c=np.identity(3) -bMat=material.planeData.latVecOps['B'] -wavelength=material.planeData.wavelength - -material.planeData.t - -#%% -omega0,omega1=trans.oscillAnglesOfHKLs(material.planeData.hkls.T, 0, rMat_c, bMat, wavelength) - - - - -#%% - -def VirtDiffWorker - - - - diff --git a/scripts/convert_instrument_config.yml b/scripts/convert_instrument_config.yml new file mode 100644 index 00000000..d924fd33 --- /dev/null +++ b/scripts/convert_instrument_config.yml @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Mon Jul 29 13:33:57 2019 + +@author: joel +""" +from __future__ import print_function + +import argparse +import yaml +from hexrd.xrd.rotations import make_rmat_euler, angleAxisOfRotMat +from hexrd.xrd import transforms_CAPI as xfcapi + + +# top level keys +ROOT_KEYS = ['beam', 'oscillation_stage', 'detectors'] +OLD_OSCILL_KEYS = ['t_vec_s', 'chi'] +NEW_OSCILL_KEYS = ['translation', 'chi'] +OLD_TRANSFORM_KEYS = ['t_vec_d', 'tilt_angles'] +NEW_TRANSFORM_KEYS = ['translation', 'tilt'] + + +def convert_instrument_config(old_cfg, output=None): + """ + convert v0.5.x style YMAL config to v0.6.x + """ + icfg = yaml.safe_load(open(old_cfg, 'r')) + + new_cfg = dict.fromkeys(icfg) + + # %% first beam + new_cfg['beam'] = icfg['beam'] + + # %% next, calibration crystal if applicable + calib_key = 'calibration_crystal' + if calib_key in icfg.keys(): + new_cfg[calib_key] = icfg[calib_key] + + + # %% sample stage + old_dict = icfg['oscillation_stage'] + tmp_dict = dict.fromkeys(NEW_OSCILL_KEYS) + for tk in zip(OLD_OSCILL_KEYS, NEW_OSCILL_KEYS): + tmp_dict[tk[1]] = old_dict[tk[0]] + new_cfg['oscillation_stage'] = tmp_dict + + # %% detectors + new_cfg['detectors'] = dict.fromkeys(icfg['detectors']) + det_block_keys = ['pixels', 'saturation_level', 'transform', 'distortion'] + for det_id, source_params in icfg['detectors'].items(): + new_dict = {} + for key in det_block_keys: + if key != 'transform': + try: + new_dict[key] = source_params[key] + except(KeyError): + if key == 'distortion': + continue + elif key == 'saturation_level': + new_dict[key] = 2**16 + else: + raise RuntimeError("unrecognized parameter key '%s'" % key) + else: + old_dict = source_params[key] + tmp_dict = dict.fromkeys(NEW_TRANSFORM_KEYS) + for tk in zip(OLD_TRANSFORM_KEYS, NEW_TRANSFORM_KEYS): + if tk[0] == 't_vec_d': + tmp_dict[tk[1]] = old_dict[tk[0]] + elif tk[0] == 'tilt_angles': + xyz_angles = old_dict[tk[0]] + rmat = make_rmat_euler(xyz_angles, + 'xyz', + extrinsic=True) + phi, n = angleAxisOfRotMat(rmat) + tmp_dict[tk[1]] = (phi*n.flatten()).tolist() + new_dict[key] = tmp_dict + new_cfg['detectors'][det_id] = new_dict + + # %% dump new file + if output is None: + print(new_cfg) + else: + yaml.dump(new_cfg, open(output, 'w')) + return + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="convert a v0.5.x instrument config to v0.6.x format" + ) + + parser.add_argument( + 'instrument_cfg', + help="v0.5 style instrument config YAML file" + ) + + parser.add_argument( + '-o', '--output-file', + help="output file name", + type=str, + default="" + ) + + args = parser.parse_args() + + old_cfg = args.instrument_cfg + output_file = args.output_file + + if len(output_file) == 0: + output_file = None + + convert_instrument_config(old_cfg, output=output_file) diff --git a/scripts/gen_schmid_tensors.py b/scripts/gen_schmid_tensors.py new file mode 100644 index 00000000..750c7433 --- /dev/null +++ b/scripts/gen_schmid_tensors.py @@ -0,0 +1,133 @@ +# ============================================================ +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on downloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License (as published by the Free Software +# Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ +#%% +import sys + +import argparse + +import numpy as np +import cPickle as cpl + + +from hexrd import matrixutil as mutil +from hexrd.xrd import symmetry as sym + +#%% +def gen_schmid_tensors(pd,uvw,hkl): + + # slip plane directions + slipdir = mutil.unitVector( np.dot( pd.latVecOps['F'], uvw) ) # 2 -1 -1 0 + slipdir_sym = sym.applySym(slipdir, pd.getQSym(), csFlag=False, cullPM=True, tol=1e-08) + + # slip plane plane normals + n_plane = mutil.unitVector( np.dot( pd.latVecOps['B'], hkl ) ) + n_plane_sym = sym.applySym(n_plane, pd.getQSym(), csFlag=False, cullPM=True, tol=1e-08) + + + num_slip_plane= n_plane_sym.shape[1] + + num_slip_sys=0 + for i in range(num_slip_plane): + planeID = np.where(abs(np.dot(n_plane_sym[:, i],slipdir_sym)) < 1.e-8)[0] + num_slip_sys +=planeID.shape[0] + + T= np.zeros((num_slip_sys, 3, 3)) + counter=0 + # + for i in range(num_slip_plane): + planeID = np.where(abs(np.dot(n_plane_sym[:, i],slipdir_sym)) < 1.e-8)[0] + for j in np.arange(planeID.shape[0]): + T[counter, :, :] = np.dot(slipdir_sym[:, planeID[j]].reshape(3, 1), n_plane_sym[:, i].reshape(1, 3)) + counter+=1 + #Clean some round off errors + round_off_err=np.where(abs(T)<1e-8) + T[round_off_err[0],round_off_err[1],round_off_err[2]]=0. + + return T + + +#%% + +if __name__ == '__main__': + """ + USAGE : python genschmidtensors material_file material_name uvw hkl output_file_stem + """ + parser = argparse.ArgumentParser(description='Generate a set of schmid tensors for a given slip direction [uvw] and slip plane (hkl)') + + + parser.add_argument('mat_file_loc', type=str) + parser.add_argument('mat_name', type=str) + parser.add_argument('uvw', type=str) + parser.add_argument('hkl', type=str) + parser.add_argument('out_file', type=str) + + + args = vars(parser.parse_args(sys.argv[1:])) + + + mat_list = cpl.load(open(args['mat_file_loc'], 'r')) + + # need to find the index of the active material + # ***PROBABLY WILL CHANGE TO DICT INSTEAD OF LIST + mat_idx = np.where([mat_list[i].name == args['mat_name'] for i in range(len(mat_list))])[0] + + # grab plane data, and useful things hanging off of it + pd = mat_list[mat_idx[0]].planeData + + uvw=np.zeros([3,1]) + sign=1. + increment=0 + for ii in args['uvw']: + if ii =='-': + sign=-1. + else: + uvw[increment,0]=sign*float(ii) + sign=1. + increment+=1 + + hkl=np.zeros([3,1]) + sign=1. + increment=0 + for ii in args['hkl']: + if ii =='-': + sign=-1. + else: + hkl[increment,0]=sign*float(ii) + sign=1. + increment+=1 + + + T=gen_schmid_tensors(pd,uvw,hkl) + + + f=open(args['out_file']+'.txt','w') + + for i in np.arange(T.shape[0]): + f.write("%.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e \n" % \ + (T[i,0,0],T[i,0,1],T[i,0,2],T[i,1,0],T[i,1,1],T[i,1,2],T[i,2,0],T[i,2,1],T[i,2,2])) + f.close() + diff --git a/scripts/ighexrd_v1/grain_averaged_scripts/material_ti7.yml b/scripts/ighexrd_v1/grain_averaged_scripts/material_ti7.yml new file mode 100644 index 00000000..3dfceed4 --- /dev/null +++ b/scripts/ighexrd_v1/grain_averaged_scripts/material_ti7.yml @@ -0,0 +1,43 @@ +analysis_name: ti7-05-scan-11 +find_orientations: + clustering: {algorithm: dbscan, completeness: 0.8, radius: 0.75} + eta: {mask: 10, tolerance: 0.5} + omega: + period: [0, 360] + tolerance: 0.5 + orientation_maps: + active_hkls: [0, 1, 2, 3, 4] + file: junk + threshold: 50.0 + seed_search: + fiber_step: 0.25 + hkl_seeds: [4] + threshold: 2.0 +fit_grains: + do_fit: true + estimate: ti7-05-scan-11/grains.out + npdiv: 2 + panel_buffer: 3 + refit: [3.0, 1.5] + threshold: 50.0 + tolerance: + eta: [2.5, 1.0, 1.0] + omega: [1.0, 0.75, 0.75] + tth: [0.5, 0.35, 0.35] + tth_max: 8.0 +image_series: + file: + ids: [] + stem: null + images: {start: 0} + omega: {start: 0, step: 0.2498265093684941, stop: 360.0} +instrument: + detector: + parameters_old: dummy.par + pixels: + columns: 3073 + rows: 3889 + size: [0.0748, 0.0748] + parameters: dexela_instrument_calibrated_ruby.yml +material: {active: ti7al, definitions: materials.cpl} +multiprocessing: -1 diff --git a/scripts/ighexrd_v1/grain_averaged_scripts/near-field_uniform.py b/scripts/ighexrd_v1/grain_averaged_scripts/near-field_uniform.py new file mode 100644 index 00000000..f4844e4c --- /dev/null +++ b/scripts/ighexrd_v1/grain_averaged_scripts/near-field_uniform.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Tue Aug 14 11:57:34 2018 + +@author: ken38 +""" + +#%% Necessary Dependencies + + +# PROCESSING NF GRAINS WITH MISORIENTATION +#============================================================================== +import numpy as np + +import matplotlib.pyplot as plt + +import multiprocessing as mp + +import os + +from hexrd.grainmap import nfutil +from hexrd.grainmap import tomoutil +from hexrd.grainmap import vtkutil + +#============================================================================== +# %% FILES TO LOAD -CAN BE EDITED +#============================================================================== +#These files are attached, retiga.yml is a detector configuration file +#The near field detector was already calibrated + +#A materials file, is a cPickle file which contains material information like lattice +#parameters necessary for the reconstruction + +main_dir = '/nfs/chess/user/ken38/Ti7_project/ti7-11-1percent/' + +det_file = main_dir + 'retiga.yml' +mat_file= main_dir + 'materials.cpl' + +#============================================================================== +# %% OUTPUT INFO -CAN BE EDITED +#============================================================================== + +output_dir = main_dir + +#============================================================================== + +# %% TOMOGRAPHY DATA FILES -CAN BE EDITED - ZERO LOAD SCAN +#============================================================================== + +#Locations of tomography bright field images +tbf_data_folder='/nfs/chess/raw/2018-1/f2/miller-774-1/ti7-11/2/nf/' + +tbf_img_start=31171 #for this rate, this is the 6th file in the folder +tbf_num_imgs=10 + +#Locations of tomography images +tomo_data_folder='/nfs/chess/raw/2018-1/f2/miller-774-1/ti7-11/3/nf/' + +tomo_img_start=31187#for this rate, this is the 6th file in the folder +tomo_num_imgs=360 + +#============================================================================== +# %% NEAR FIELD DATA FILES -CAN BE EDITED - ZERO LOAD SCAN +#============================================================================== +#These are the near field data files used for the reconstruction, a grains.out file +#from the far field analaysis is used as orientation guess for the grid that will +grain_out_file = main_dir + 'ti7-05-scan-11/grains.out' + +#%% +#Locations of near field images +#Locations of near field images +data_folder='/nfs/chess/raw/2018-1/f2/miller-774-1/ti7-11/7/nf/' #layer 1 + +#img_start=46501#for 0.25 degree/steps and 5 s exposure, end up with 5 junk frames up front, this is the 6th +img_start=31602#for 0.25 degree/steps and 5 s exposure, end up with 6 junk frames up front, this is the 7th +num_imgs=1440 +img_nums=np.arange(img_start,img_start+num_imgs,1) + +output_stem='initial_nf_uniform_diffvol_1' +#============================================================================== +# %% USER OPTIONS -CAN BE EDITED +#============================================================================== +x_ray_energy=61.332 #keV + +#name of the material for the reconstruction +mat_name='ti7al' + +#reconstruction with misorientation included, for many grains, this will quickly +#make the reconstruction size unmanagable +misorientation_bnd=0.0 #degrees +misorientation_spacing=0.25 #degrees + +beam_stop_width=0.6#mm, assumed to be in the center of the detector + +ome_range_deg=[(0.,359.75)] #degrees + +max_tth=-1. #degrees, if a negative number is input, all peaks that will hit the detector are calculated + +#image processing +num_for_dark=250#num images to use for median data +threshold=1.5 +num_erosions=2 #num iterations of images erosion, don't mess with unless you know what you're doing +num_dilations=3 #num iterations of images erosion, don't mess with unless you know what you're doing +ome_dilation_iter=1 #num iterations of 3d image stack dilations, don't mess with unless you know what you're doing + +chunk_size=500#chunksize for multiprocessing, don't mess with unless you know what you're doing + +#thresholds for grains in reconstructions +comp_thresh=0.75 #only use orientations from grains with completnesses ABOVE this threshold +chi2_thresh=0.005 #only use orientations from grains BELOW this chi^2 + +#tomography options +layer_row=1024 # row of layer to use to find the cross sectional specimen shape +recon_thresh=0.00025#usually varies between 0.0001 and 0.0005 +#Don't change these unless you know what you are doing, this will close small holes +#and remove noise +noise_obj_size=500 +min_hole_size=500 + +cross_sectional_dim=1.35 #cross sectional to reconstruct (should be at least 20%-30% over sample width) +#voxel spacing for the near field reconstruction +voxel_spacing = 0.005#in mm +##vertical (y) reconstruction voxel bounds in mm +v_bnds=[-0.085,0.085] +#v_bnds=[-0.,0.] +#======================= +#============================================================================== +# %% LOAD GRAIN AND EXPERIMENT DATA +#============================================================================== + +experiment, nf_to_ff_id_map = nfutil.gen_trial_exp_data(grain_out_file,det_file,mat_file, x_ray_energy, mat_name, max_tth, comp_thresh, chi2_thresh, misorientation_bnd, \ + misorientation_spacing,ome_range_deg, num_imgs, beam_stop_width) + +#============================================================================== +# %% TOMO PROCESSING - GENERATE BRIGHT FIELD +#============================================================================== + +tbf=tomoutil.gen_bright_field(tbf_data_folder,tbf_img_start,tbf_num_imgs,experiment.nrows,experiment.ncols,num_digits=6) + +#============================================================================== +# %% TOMO PROCESSING - BUILD RADIOGRAPHS +#============================================================================== + +rad_stack=tomoutil.gen_attenuation_rads(tomo_data_folder,tbf,tomo_img_start,tomo_num_imgs,experiment.nrows,experiment.ncols,num_digits=6) + +#============================================================================== +# %% TOMO PROCESSING - INVERT SINOGRAM +#============================================================================== + +reconstruction_fbp=tomoutil.tomo_reconstruct_layer(rad_stack,cross_sectional_dim,layer_row=layer_row,\ + start_tomo_ang=ome_range_deg[0][0],end_tomo_ang=ome_range_deg[0][1],\ + tomo_num_imgs=tomo_num_imgs, center=experiment.detector_params[3]) + +#============================================================================== +# %% TOMO PROCESSING - CLEAN TOMO RECONSTRUCTION +#============================================================================== + +binary_recon=tomoutil.threshold_and_clean_tomo_layer(reconstruction_fbp,recon_thresh, noise_obj_size,min_hole_size) + +#============================================================================== +# %% TOMO PROCESSING - RESAMPLE TOMO RECONSTRUCTION +#============================================================================== + +tomo_mask=tomoutil.crop_and_rebin_tomo_layer(binary_recon,recon_thresh,voxel_spacing,experiment.pixel_size[0],cross_sectional_dim) + +#============================================================================== +# %% TOMO PROCESSING - CONSTRUCT DATA GRID +#============================================================================== + +test_crds, n_crds, Xs, Ys, Zs = nfutil.gen_nf_test_grid_tomo(tomo_mask.shape[1], tomo_mask.shape[0], v_bnds, voxel_spacing) + +#============================================================================== +# %% NEAR FIELD - MAKE MEDIAN DARK +#============================================================================== + +dark=nfutil.gen_nf_dark(data_folder,img_nums,num_for_dark,experiment.nrows,experiment.ncols,dark_type='median',num_digits=6) + +#============================================================================== +# %% NEAR FIELD - LOAD IMAGE DATA AND PROCESS +#============================================================================== + +image_stack=gen_nf_image_stack(data_folder,img_nums,dark,ome_dilation_iter,threshold,experiment.nrows,experiment.ncols,num_digits=6)#,grey_bnds=(5,5), gaussian=4.5) + +#============================================================================== +# %% INSTANTIATE CONTROLLER - RUN BLOCK NO EDITING +#============================================================================== + +progress_handler = nfutil.progressbar_progress_observer() +save_handler=nfutil.forgetful_result_handler() + +controller = nfutil.ProcessController(save_handler, progress_handler, + ncpus=mp.cpu_count(), chunk_size=chunk_size) + +multiprocessing_start_method = 'fork' if hasattr(os, 'fork') else 'spawn' + +#============================================================================== +# %% TEST ORIENTATIONS - RUN BLOCK NO EDITING +#============================================================================== + +raw_confidence=nfutil.test_orientations(image_stack, experiment, test_crds, + controller,multiprocessing_start_method) + +#============================================================================== +# %% POST PROCESS W WHEN TOMOGRAPHY HAS BEEN USED +#============================================================================== + +grain_map, confidence_map = nfutil.process_raw_confidence(raw_confidence,Xs.shape,tomo_mask=tomo_mask,id_remap=nf_to_ff_id_map) + +#============================================================================== +# %% SAVE PROCESSED GRAIN MAP DATA +#============================================================================== + +nfutil.save_nf_data(output_dir,output_stem,grain_map,confidence_map,Xs,Ys,Zs,experiment.exp_maps,id_remap=nf_to_ff_id_map) diff --git a/scripts/ighexrd_v1/grain_averaged_scripts/nf_uniformrecons.sh b/scripts/ighexrd_v1/grain_averaged_scripts/nf_uniformrecons.sh new file mode 100644 index 00000000..60973c1d --- /dev/null +++ b/scripts/ighexrd_v1/grain_averaged_scripts/nf_uniformrecons.sh @@ -0,0 +1,26 @@ +#!/bin/sh +# -*- coding: utf-8 -*- + + +cd /workingdirectory/ + +source activate hexrd_environment + +echo starting near-field uniform orientation field + +echo diff_vol_1 +python near-field_uniform_diffvol_1.py + +echo diff_vol_2 +python near-field_uniform_diffvol_2.py + +echo diff_vol_3 +python near-field_uniform_diffvol_3.py + +echo diff_vol_4 +python near-field_uniform_diffvol_4.py + +echo diff_vol_5 +python near-field_uniform_diffvol_5.py + +echo All done diff --git a/scripts/ighexrd_v1/intragrain_scripts/GOE_builder_one_load.py b/scripts/ighexrd_v1/intragrain_scripts/GOE_builder_one_load.py new file mode 100644 index 00000000..893f1937 --- /dev/null +++ b/scripts/ighexrd_v1/intragrain_scripts/GOE_builder_one_load.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Tue Jul 10 09:37:05 2018 + +@author: ken38 +"" +#original findorientations first +Created on Wed Mar 22 19:04:10 2017 + +@author: bernier2 + +""" +#%% +# MUST BE RUN FROM FOLDER WHERE ETA OMEGA MAPS LIVE +# THIS SCRIPT IS DESIGNED TO BE USED ONCE ALL ETA OME MAPS HAVE BEEN FORMED AND ONLY FOR SUBSEQUENT CLOUD SEARCHES. +# IF ETA OMEGA MAP IS NOT OKAY THEN PLEASE RUN WITH ANOTHER SCRIPT TO GENERATE - OR - CHANGE CLOBBER_MAPS TO TRUE +# it is better to not change clobber maps to true but simply generate a new series of eta_ome maps and change yml directory + +#%% +from __future__ import print_function + +import time +import logging + +import os + +import glob + +import multiprocessing + +import numpy as np + +from scipy import ndimage + +import timeit + +import argparse + + +try: + import dill as cpl +except(ImportError): + import cPickle as cpl + +import yaml + +from hexrd import constants as cnst +from hexrd import config +from hexrd import imageseries +from hexrd.imageseries.omega import OmegaImageSeries +from hexrd import instrument +from hexrd.findorientations import \ + generate_orientation_fibers, \ + run_cluster +from hexrd.xrd import transforms_CAPI as xfcapi +from hexrd.xrd import indexer +from matplotlib import pyplot as plt +from hexrd.xrd.xrdutil import EtaOmeMaps + +from hexrd.xrd import rotations as rot + +logger = logging.getLogger(__name__) + +# just require scikit-learn? +have_sklearn = False +try: + import sklearn + vstring = sklearn.__version__.split('.') + if vstring[0] == '0' and int(vstring[1]) >= 14: + from sklearn.cluster import dbscan + from sklearn.metrics.pairwise import pairwise_distances + have_sklearn = True +except ImportError: + pass + + +# plane data +def load_pdata(cpkl, key): + with file(cpkl, "r") as matf: + mat_list = cpl.load(matf) + return dict(zip([i.name for i in mat_list], mat_list))[key].planeData + + +# images +def load_images(yml): + return imageseries.open(yml, format="frame-cache", style="npz") + + +# instrument +def load_instrument(yml): + with file(yml, 'r') as f: + icfg = yaml.load(f) + return instrument.HEDMInstrument(instrument_config=icfg) + +#%% +if __name__ == '__main__': + # + # Run preprocessor + # + parser = argparse.ArgumentParser( + description="batchfndorigrains") + + parser.add_argument('grain_num', + help="grain_num", type=int) + parser.add_argument('yaml_file', + help="yaml file", type=str) + parser.add_argument('sample_name', + help="sample name", type=str) + parser.add_argument('initial_or_final', + help="intial or final", type=str) + parser.add_argument('scan_num', + help="scan num to fit grains", type=int) + + + args = parser.parse_args() + cfg_filename = args.yaml_file + scan_number = args.scan_num + samp_name = args.sample_name + grain = args.grain_num + initial_or_final = args.initial_or_final + +# %% +# ============================================================================= +# START USER INPUT +# ============================================================================= +#----- The following parameters are passed through as arguements if running from command line ------ + +#cfg_filename = 'GOE_ti705.yml' +#samp_name = 'ti7-05' +#scan_number = 68 +#initial_or_final = final +#grain_= 0 +#---------------------------------------------------------------------------------------------------- + +#NEW GOE VARIABLES --- USER SHOULD EDIT ONCE -- same for all loadsteps + +scan_to_center_GOE = 11 #name of scan - typically taken to be the initial zero load step +misorientation_bnd = 3.0 #in degrees +misorientation_spacing = 0.20 #in degrees +id_analysisname = 'ti7-11' + +#location of master grains.out +dir_string = '/nfs/chess/user/ken38/Ti7_project/ti7-11-1percent/' +master_scan = dir_string + 'ti7-11-scan-%d' % scan_to_center_GOE + +#location and name of npz file output +npz_string = 'grain_%d' %grain + '_goe_map_data_%s.npz' % initial_or_final +goe_path = '/nfs/chess/user/ken38/Ti7_project/ti7-11-1percent/GOE/' +npz_save_dir = goe_path + '%s_goe/' % initial_or_final #for npz file +GOE_directory = goe_path + '%s_goe/' % initial_or_final #for grains.out file + +analysis_id = goe_path + id_analysisname + '-grain-%d' % grain + '-%s' % initial_or_final + +# make output directory if doesn't exist +if not os.path.exists(npz_save_dir): + os.mkdir(npz_save_dir) + +# %% +# ============================================================================= +# END USER INPUT +# ============================================================================= +# ------------------------------------------------------------------------------ +#cfg file -- currently ignores image_series block + +data_dir = os.getcwd() +fc_stem = "%s_%s_%%s*.npz" % (samp_name, scan_number) + +make_max_frames = False +use_direct_search = False + +# for clustering neighborhood +# FIXME +min_samples = 2 + +# maps options +clobber_maps = False +show_maps = False + +# ============================================================================= +# END USER INPUT +# ============================================================================= +# %% +cfg = config.open(cfg_filename)[0] + +active_hkls = cfg.find_orientations.orientation_maps.active_hkls +if active_hkls == 'all': + active_hkls = None + +max_tth = cfg.fit_grains.tth_max +if max_tth: + if type(cfg.fit_grains.tth_max) != bool: + max_tth = np.degrees(float(max_tth)) +else: + max_tth = None + +# load plane data +plane_data = load_pdata(cfg.material.definitions, cfg.material.active) +plane_data.tThMax = max_tth + +# load instrument +instr = load_instrument(cfg.instrument.parameters) +det_keys = instr.detectors.keys() + +# !!! panel buffer setting is global and assumes same typ of panel! +for det_key in det_keys: + instr.detectors[det_key].panel_buffer = \ + np.array(cfg.fit_grains.panel_buffer) + +# grab eta ranges +eta_ranges = cfg.find_orientations.eta.range + +# for indexing +build_map_threshold = cfg.find_orientations.orientation_maps.threshold + +on_map_threshold = cfg.find_orientations.threshold +fiber_ndiv = cfg.find_orientations.seed_search.fiber_ndiv +fiber_seeds = cfg.find_orientations.seed_search.hkl_seeds + +tth_tol = np.degrees(plane_data.tThWidth) +eta_tol = cfg.find_orientations.eta.tolerance +ome_tol = cfg.find_orientations.omega.tolerance +# omega period... +# QUESTION: necessary??? +ome_period = np.radians(cfg.find_orientations.omega.period) + +npdiv = cfg.fit_grains.npdiv + +compl_thresh = cfg.find_orientations.clustering.completeness +cl_radius = cfg.find_orientations.clustering.radius + +# %% + +imsd = dict.fromkeys(det_keys) +for det_key in det_keys: + fc_file = sorted( + glob.glob( + os.path.join( + data_dir, + fc_stem % det_key.lower() + ) + ) + ) + if len(fc_file) != 1: + raise(RuntimeError, 'cache file not found, or multiple found') + else: + ims = load_images(fc_file[0]) + imsd[det_key] = OmegaImageSeries(ims) + + +if make_max_frames: + max_frames_output_name = os.path.join( + data_dir, + "%s_%d-maxframes.hdf5" % (samp_name, scan_number) + ) + + if os.path.exists(max_frames_output_name): + os.remove(max_frames_output_name) + + max_frames = dict.fromkeys(det_keys) + for det_key in det_keys: + max_frames[det_key] = imageseries.stats.max(imsd[det_key]) + + ims_out = imageseries.open( + None, 'array', + data=np.array([max_frames[i] for i in max_frames]), + meta={'panels': max_frames.keys()} + ) + imageseries.write( + ims_out, max_frames_output_name, + 'hdf5', path='/imageseries' + ) +# %% + +maps_fname = analysis_id + "_maps.npz" +if os.path.exists(maps_fname) and not clobber_maps: + eta_ome = EtaOmeMaps(maps_fname) +else: + print("INFO:\tbuilding eta_ome maps") + start = timeit.default_timer() + + # make eta_ome maps + eta_ome = instrument.GenerateEtaOmeMaps( + imsd, instr, plane_data, + active_hkls=active_hkls, threshold=build_map_threshold, + ome_period=cfg.find_orientations.omega.period) + + print("INFO:\t\t...took %f seconds" % (timeit.default_timer() - start)) + + # save them + eta_ome.save(maps_fname) +#%% +# ============================================================================= +# BOX TEST POINT GENERATION +# ============================================================================= +# ============================================================================= +# Set up multiprocessing from yml +# ============================================================================= + +ncpus = cfg.multiprocessing +#Reload original data always to start from master grains.out + +exp_maps = np.zeros([1,3]) +grain_id = np.zeros([1,1]) + +grain_out = '/grains.out' +load_data_master = np.loadtxt(master_scan + grain_out) +exp_map1 = load_data_master[grain,3:6] +grain_id1 = load_data_master[grain,0] + +exp_maps[0,:] = exp_map1 +grain_id[0,:] = grain_id1 + +mis_amt=misorientation_bnd*np.pi/180 +spacing=misorientation_spacing*np.pi/180 + +ori_pts = np.arange(-mis_amt, (mis_amt+(spacing*0.999)), spacing) +num_ori_grid_pts=ori_pts.shape[0]**3 +num_oris = exp_maps.shape[0] + +Xs0, Ys0, Zs0 = np.meshgrid(ori_pts, ori_pts, ori_pts) +grid0 = np.vstack([Xs0.flatten(), Ys0.flatten(), Zs0.flatten()]).T + +exp_maps_expanded=np.zeros([num_ori_grid_pts*num_oris,3]) + + + +for ii in np.arange(num_oris): + pts_to_use=np.arange(num_ori_grid_pts) + ii*num_ori_grid_pts + exp_maps_expanded[pts_to_use,:] =grid0 + np.r_[exp_maps[ii,:]] + +exp_maps=exp_maps_expanded + +rMat_c = rot.quatOfExpMap(exp_maps.T) + +qfib=rMat_c +print("INFO: will test %d quaternions using %d processes" + % (qfib.shape[1], ncpus)) + +# %% +# ============================================================================= +# ORIENTATION SCORING +# ============================================================================= + +if use_direct_search: + def test_orientation_FF_init(params): + global paramMP + paramMP = params + + def test_orientation_FF_reduced(quat): + """ + input parameters are [ + plane_data, instrument, imgser_dict, + tth_tol, eta_tol, ome_tol, npdiv, threshold + ] + """ + plane_data = paramMP['plane_data'] + instrument = paramMP['instrument'] + imgser_dict = paramMP['imgser_dict'] + tth_tol = paramMP['tth_tol'] + eta_tol = paramMP['eta_tol'] + ome_tol = paramMP['ome_tol'] + npdiv = paramMP['npdiv'] + threshold = paramMP['threshold'] + + phi = 2*np.arccos(quat[0]) + n = xfcapi.unitRowVector(quat[1:]) + grain_params = np.hstack([ + phi*n, cnst.zeros_3, cnst.identity_6x1, + ]) + + compl, scrap = instrument.pull_spots( + plane_data, grain_params, imgser_dict, + tth_tol=tth_tol, eta_tol=eta_tol, ome_tol=ome_tol, + npdiv=npdiv, threshold=threshold, + eta_ranges=np.radians(cfg.find_orientations.eta.range), + ome_period=(-np.pi, np.pi), + check_only=True) + + return sum(compl)/float(len(compl)) + + params = dict( + plane_data=plane_data, + instrument=instr, + imgser_dict=imsd, + tth_tol=tth_tol, + eta_tol=eta_tol, + ome_tol=ome_tol, + npdiv=npdiv, + threshold=cfg.fit_grains.threshold) + + print("INFO:\tusing direct seach") + pool = multiprocessing.Pool(ncpus, test_orientation_FF_init, (params, )) + completeness = pool.map(test_orientation_FF_reduced, qfib.T) + pool.close() +else: + print("INFO:\tusing map search with paintGrid on %d processes" + % ncpus) + start = timeit.default_timer() + + completeness = indexer.paintGrid( + qfib, + eta_ome, + etaRange=np.radians(cfg.find_orientations.eta.range), + omeTol=np.radians(cfg.find_orientations.omega.tolerance), + etaTol=np.radians(cfg.find_orientations.eta.tolerance), + omePeriod=np.radians(cfg.find_orientations.omega.period), + threshold=on_map_threshold, + doMultiProc=ncpus > 1, + nCPUs=ncpus + ) + + + print("INFO:\t\t...took %f seconds" % (timeit.default_timer() - start)) +completeness = np.array(completeness) + +# %% +# ============================================================================= +# SAVE AS NPZ IN NEW FOLDER +# ============================================================================= + +goe_box_quat = np.zeros([1,4, len(pts_to_use)]) +goe_box_con = np.zeros([1,len(pts_to_use)]) + +#for grain in range (0,len(grain_id)) : +goe_box_quat[0,:,:] = qfib[:,:] +goe_box_con[0,:] = completeness[:] + +np.savez(npz_save_dir + npz_string,goe_box_con=goe_box_con,goe_box_quat=goe_box_quat,Xs0=Xs0,Ys0=Ys0,Zs0=Zs0) + +#%%#============================================================================== +#GRAINS.OUT #currently used for nf - will eliminate +#============================================================================== + +#if not os.path.exists(cfg.analysis_dir): +# os.makedirs(cfg.analysis_dir) + +print("INFO:writing misorientation clouds to grain_id_#.out files" ) + +gw = instrument.GrainDataWriter(os.path.join(GOE_directory, 'grain_id_%i.out') % grain ) +grain_params_list = [] + +for gid, q in enumerate(goe_box_quat[0,:,:].T): + phi = 2*np.arccos(q[0]) + n = xfcapi.unitRowVector(q[1:]) + grain_params = np.hstack([phi*n, cnst.zeros_3, cnst.identity_6x1]) + com = goe_box_con[0,gid] + gw.dump_grain(grain, com, 0., grain_params) + grain_params_list.append(grain_params) +gw.close() diff --git a/scripts/ighexrd_v1/intragrain_scripts/GOE_ti7.yml b/scripts/ighexrd_v1/intragrain_scripts/GOE_ti7.yml new file mode 100644 index 00000000..c50f4d4e --- /dev/null +++ b/scripts/ighexrd_v1/intragrain_scripts/GOE_ti7.yml @@ -0,0 +1,31 @@ +analysis_name: analysis_goe_name +find_orientations: + clustering: {algorithm: dbscan, completeness: 0.8, radius: 0.75} + eta: {mask: 10, tolerance: 0.5} + omega: + period: [0, 360] + tolerance: 0.5 + orientation_maps: + active_hkls: [0, 1, 2, 3, 4] + file: junk + threshold: 50.0 + seed_search: + fiber_step: 0.25 + hkl_seeds: [3,4] + threshold: 2.0 +image_series: + file: + ids: [] + stem: null + images: {start: 0} + omega: {start: 0, step: 0.2498265093684941, stop: 360.0} +instrument: + detector: + parameters_old: dummy.par + pixels: + columns: 3073 + rows: 3889 + size: [0.0748, 0.0748] + parameters: ff_detector_dexela.yml +material: {active: materialid, definitions: materials.cpl} +multiprocessing: -1 diff --git a/scripts/ighexrd_v1/intragrain_scripts/build_GOEs.sh b/scripts/ighexrd_v1/intragrain_scripts/build_GOEs.sh new file mode 100644 index 00000000..4c5cf1eb --- /dev/null +++ b/scripts/ighexrd_v1/intragrain_scripts/build_GOEs.sh @@ -0,0 +1,33 @@ +#!/bin/sh +# -*- coding: utf-8 -*- + +cd /nfs/chess/aux/user/ken38/Ti7_project/ti7-05-3percent/ +source activate hexrd_0.5.29_working +#for first load use initial 11, for last load use final 68 +#counter set to increment up to the number of grains total (see grains.out to decide) + +echo starting eta_ome_maps initial +python generate_eta_ome_maps_parallel_initial.py GOE_ti705.yml ti7-05 initial 11 +echo Done building maps + +echo starting GOE builder initial +counter=0 +while [ $counter -le 792 ]; do +echo grain $counter +python GOE_builder_one_load.py $counter GOE_ti705.yml ti7-05 initial 11 +((counter++)) +done +echo Done + +echo starting eta_ome_maps final +python generate_eta_ome_maps_parallel_final.py GOE_ti705.yml ti7-05 final 68 +echo Done building maps + +echo starting GOE builder final +counter=0 +while [ $counter -le 792 ]; do +echo grain $counter +python GOE_builder_one_load.py $counter GOE_ti705.yml ti7-05 final 68 +((counter++)) +done +echo Done diff --git a/scripts/ighexrd_v1/intragrain_scripts/gen_eta_ome_maps.py b/scripts/ighexrd_v1/intragrain_scripts/gen_eta_ome_maps.py new file mode 100644 index 00000000..23cc3e2d --- /dev/null +++ b/scripts/ighexrd_v1/intragrain_scripts/gen_eta_ome_maps.py @@ -0,0 +1,466 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Tue Jul 10 09:37:05 2018 + +@author: ken38 +"" +#original findorientations first +Created on Wed Mar 22 19:04:10 2017 + +@author: bernier2 + +""" +#%% +# MUST BE RUN FROM FOLDER WHERE ETA OMEGA MAPS LIVE +# THIS SCRIPT IS DESIGNED TO BE USED ONCE ALL ETA OME MAPS HAVE BEEN FORMED AND ONLY FOR SUBSEQUENT CLOUD SEARCHES. +# IF ETA OMEGA MAP IS NOT OKAY THEN PLEASE RUN WITH ANOTHER SCRIPT TO GENERATE - OR - CHANGE CLOBBER_MAPS TO TRUE +# it is better to not change clobber maps to true but simply generate a new series of eta_ome maps and change yml directory + +#%% +from __future__ import print_function + +import time +import logging + +import os + +import glob + +import multiprocessing + +import numpy as np + +from scipy import ndimage + +import timeit + +import argparse + +try: + import dill as cpl +except(ImportError): + import cPickle as cpl + +import yaml + +from hexrd import constants as cnst +from hexrd import config +from hexrd import imageseries +from hexrd.imageseries.omega import OmegaImageSeries +from hexrd import instrument +from hexrd.findorientations import \ + generate_orientation_fibers, \ + run_cluster +from hexrd.xrd import transforms_CAPI as xfcapi +from hexrd.xrd import indexer +from matplotlib import pyplot as plt +from hexrd.xrd.xrdutil import EtaOmeMaps + +from hexrd.xrd import rotations as rot + +logger = logging.getLogger(__name__) + +# just require scikit-learn? +have_sklearn = False +try: + import sklearn + vstring = sklearn.__version__.split('.') + if vstring[0] == '0' and int(vstring[1]) >= 14: + from sklearn.cluster import dbscan + from sklearn.metrics.pairwise import pairwise_distances + have_sklearn = True +except ImportError: + pass + +# plane data +def load_pdata(cpkl, key): + with file(cpkl, "r") as matf: + mat_list = cpl.load(matf) + return dict(zip([i.name for i in mat_list], mat_list))[key].planeData + +# images +def load_images(yml): + return imageseries.open(yml, format="frame-cache", style="npz") + +# instrument +def load_instrument(yml): + with file(yml, 'r') as f: + icfg = yaml.load(f) + return instrument.HEDMInstrument(instrument_config=icfg) + +""" +Created on Fri Dec 9 13:05:27 2016 + +@author: bernier2 +""" + +import os + +import yaml + +import h5py + +import numpy as np + +from scipy import ndimage +from scipy.linalg.matfuncs import logm + +from hexrd.gridutil import cellIndices, make_tolerance_grid +from hexrd import matrixutil as mutil +from hexrd.valunits import valWUnit +from hexrd.xrd.transforms_CAPI import anglesToGVec, \ + detectorXYToGvec, \ + gvecToDetectorXY, \ + makeDetectorRotMat, \ + makeOscillRotMat, \ + makeRotMatOfExpMap, \ + mapAngle, \ + oscillAnglesOfHKLs, \ + rowNorm, \ + validateAngleRanges +from hexrd.xrd import xrdutil +from hexrd.xrd.crystallography import PlaneData +from hexrd import constants as ct + +# from hexrd.utils.progressbar import ProgressBar, Bar, ETA, ReverseBar + +# FIXME: distortion kludge +from hexrd.xrd.distortion import GE_41RT # BAD, VERY BAD!!! + +from skimage.draw import polygon + +#%% +if __name__ == '__main__': + + Run preprocessor + + parser = argparse.ArgumentParser( + description="batchfndorigrains") + + parser.add_argument('yaml_file', + help="yaml file", type=str) + parser.add_argument('sample_name', + help="sample name", type=str) + parser.add_argument('scan_num', + help="scan num to fit grains", type=int) + + args = parser.parse_args() + cfg_filename = args.yaml_file + scan_number = args.scan_num + samp_name = args.sample_name +# %% +#----- The following parameters are passed through as arguements if running from command line ------ +#cfg_filename = 'ti7-05-cloud-tvecs.yml' +#samp_name = 'ti7-05' +#scan_number = 68 +#inital_or_final = 'final'#'initial' + +# ============================================================================= +# START USER INPUT +# ============================================================================= + +#NEW GOE VARIABLES --- USER SHOULD EDIT ONCE -- same for all loadsteps +start_scan = 11 #must match GOE +misorientation_bnd = 3.0 #must match GOE +misorientation_spacing = 0.25 #must match GOE +id_analysisname = 'ti7-11' # must match GOE builder + +#location of master grains.out +dir_string = '/nfs/chess/user/ken38/Ti7_project/ti7-11-1percent/' +load_step_zero_dir = dir_string + 'ti7-11-scan-%d' % start_scan +save_folder = 'saved_GOEs_centered/' + +#%% +#location and name of npz file output +npz_save_dir = dir_string + save_folder + inital_or_final + '/' +# make output directory if doesn't exist +if not os.path.exists(npz_save_dir): + os.mkdir(npz_save_dir) +#%% +# ------------------------------------------------------------------------------ +#cfg file -- currently ignores image_series block + +data_dir = dir_string +fc_stem = "%s_%s_%%s*.npz" % (samp_name, scan_number) + +make_max_frames = False +use_direct_search = False + +# for clustering neighborhood +# FIXME +min_samples = 2 + +# maps options +clobber_maps = False +show_maps = False + +#%% one grain only +grain_out = '/grains.out' +load_data_zero = np.loadtxt(load_step_zero_dir + grain_out) +grain_id = load_data_zero[145:,0] + +#%% LOAD YML FILE +cfg = config.open(cfg_filename)[0] + +#analysis_id = '%s_%s' % ( +# cfg.analysis_name.strip().replace(' ', '-'), +# cfg.material.active.strip().replace(' ', '-'), +# ) + +active_hkls = cfg.find_orientations.orientation_maps.active_hkls +if active_hkls == 'all': + active_hkls = None + +max_tth = cfg.fit_grains.tth_max +if max_tth: + if type(cfg.fit_grains.tth_max) != bool: + max_tth = np.degrees(float(max_tth)) +else: + max_tth = None + +# load plane data +plane_data = load_pdata(cfg.material.definitions, cfg.material.active) +plane_data.tThMax = max_tth + +# load instrument +instr = load_instrument(cfg.instrument.parameters) +det_keys = instr.detectors.keys() + +# !!! panel buffer setting is global and assumes same typ of panel! +for det_key in det_keys: + instr.detectors[det_key].panel_buffer = \ + np.array(cfg.fit_grains.panel_buffer) + +# grab eta ranges +eta_ranges = cfg.find_orientations.eta.range + +# for indexing +build_map_threshold = cfg.find_orientations.orientation_maps.threshold + +on_map_threshold = cfg.find_orientations.threshold +fiber_ndiv = cfg.find_orientations.seed_search.fiber_ndiv +fiber_seeds = cfg.find_orientations.seed_search.hkl_seeds + +tth_tol = np.degrees(plane_data.tThWidth) +eta_tol = cfg.find_orientations.eta.tolerance +ome_tol = cfg.find_orientations.omega.tolerance +# omega period... +# QUESTION: necessary??? +ome_period = np.radians(cfg.find_orientations.omega.period) + +npdiv = cfg.fit_grains.npdiv + +compl_thresh = cfg.find_orientations.clustering.completeness +cl_radius = cfg.find_orientations.clustering.radius + +# % + +imsd = dict.fromkeys(det_keys) +for det_key in det_keys: + fc_file = sorted( + glob.glob( + os.path.join( + data_dir, + fc_stem % det_key.lower() + ) + ) + ) + if len(fc_file) != 1: + raise(RuntimeError, 'cache file not found, or multiple found') + else: + ims = load_images(fc_file[0]) + imsd[det_key] = OmegaImageSeries(ims) + + +if make_max_frames: + max_frames_output_name = os.path.join( + data_dir, + "%s_%d-maxframes.hdf5" % (samp_name, scan_number) + ) + + if os.path.exists(max_frames_output_name): + os.remove(max_frames_output_name) + + max_frames = dict.fromkeys(det_keys) + for det_key in det_keys: + max_frames[det_key] = imageseries.stats.max(imsd[det_key]) + + ims_out = imageseries.open( + None, 'array', + data=np.array([max_frames[i] for i in max_frames]), + meta={'panels': max_frames.keys()} + ) + imageseries.write( + ims_out, max_frames_output_name, + 'hdf5', path='/imageseries' + ) + +#%% +class GenerateEtaOmeMaps(object): + """ + eta-ome map class derived from new image_series and YAML config + + ...for now... + + must provide: + + self.dataStore + self.planeData + self.iHKLList + self.etaEdges # IN RADIANS + self.omeEdges # IN RADIANS + self.etas # IN RADIANS + self.omegas # IN RADIANS + + """ + def __init__(self, grain, image_series_dict, instrument, plane_data, + eta_step=0.25, threshold=None, + ome_period=(0, 360)): + """ + image_series must be OmegaImageSeries class + instrument_params must be a dict (loaded from yaml spec) + active_hkls must be a list (required for now) + """ + grain_params = np.squeeze(load_data_zero[grain,:]) + + analysis_id = id_analysisname + '-grain-%d' % grain + '-%s' % initial_or_final + + self._planeData = plane_data + + # ???: change name of iHKLList? + # ???: can we change the behavior of iHKLList? + active_hkls = [0,1,2,3,4] + + if active_hkls is None: + n_rings = len(plane_data.getTTh()) + self._iHKLList = range(n_rings) + else: + self._iHKLList = active_hkls + n_rings = len(active_hkls) + + # ???: need to pass a threshold? + eta_mapping, etas = instrument.extract_polar_maps_grain( + plane_data, image_series_dict, grain_params, + active_hkls=active_hkls, threshold=threshold, + tth_tol=None, eta_tol=eta_step) + + + # grab a det key + # WARNING: this process assumes that the imageseries for all panels + # have the same length and omegas + det_key = eta_mapping.keys()[0] + data_store = [] + for i_ring in range(n_rings): + full_map = np.zeros_like(eta_mapping[det_key][i_ring]) + nan_mask_full = np.zeros( + (len(eta_mapping), full_map.shape[0], full_map.shape[1]) + ) + i_p = 0 + for det_key, eta_map in eta_mapping.iteritems(): + nan_mask = ~np.isnan(eta_map[i_ring]) + nan_mask_full[i_p] = nan_mask + full_map[nan_mask] += eta_map[i_ring][nan_mask] + i_p += 1 + re_nan_these = np.sum(nan_mask_full, axis=0) == 0 + full_map[re_nan_these] = np.nan + data_store.append(full_map) + self._dataStore = data_store + + # handle omegas + omegas_array = image_series_dict[det_key].metadata['omega'] + self._omegas = mapAngle( + np.radians(np.average(omegas_array, axis=1)), + np.radians(ome_period) + ) + self._omeEdges = mapAngle( + np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), + np.radians(ome_period) + ) + + # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the + # indexer to work properly + if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf: + # !!! SIGNED delta ome + del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0]) + self._omeEdges[-1] = self._omeEdges[-2] + del_ome + + # handle etas + # WARNING: unlinke the omegas in imageseries metadata, + # these are in RADIANS and represent bin centers + self._etas = etas + self._etaEdges = np.r_[ + etas - 0.5*np.radians(eta_step), + etas[-1] + 0.5*np.radians(eta_step)] + + self.save(npz_save_dir + analysis_id + "_maps.npz") + + @property + def dataStore(self): + return self._dataStore + + @property + def planeData(self): + return self._planeData + + @property + def iHKLList(self): + return np.atleast_1d(self._iHKLList).flatten() + + @property + def etaEdges(self): + return self._etaEdges + + @property + def omeEdges(self): + return self._omeEdges + + @property + def etas(self): + return self._etas + + @property + def omegas(self): + return self._omegas + + def save(self, filename): + """ + self.dataStore + self.planeData + self.iHKLList + self.etaEdges + self.omeEdges + self.etas + self.omegas + """ + args = np.array(self.planeData.getParams())[:4] + args[2] = valWUnit('wavelength', 'length', args[2], 'angstrom') + hkls = self.planeData.hkls + save_dict = {'dataStore': self.dataStore, + 'etas': self.etas, + 'etaEdges': self.etaEdges, + 'iHKLList': self.iHKLList, + 'omegas': self.omegas, + 'omeEdges': self.omeEdges, + 'planeData_args': args, + 'planeData_hkls': hkls} + np.savez_compressed(filename, **save_dict) + return + pass # end of class: GenerateEtaOmeMaps + +#%% +from multiprocessing import Pool +from functools import partial + +num_processors=24 +grain_id= list(np.array(grain_id).astype('int').T) +#%% +print('building eta_ome maps using multiprocessing...') + +pool = Pool(processes=num_processors) + +#active hkls hardcoded to [0,1,2,3,4] +eta_ome_partial = partial(GenerateEtaOmeMaps, image_series_dict=imsd, instrument=instr, plane_data=plane_data, threshold=build_map_threshold, ome_period=cfg.find_orientations.omega.period) + +eta_ome = pool.map(eta_ome_partial, grain_id, chunksize=1) +pool.close() diff --git a/scripts/ighexrd_v1/intragrain_scripts/near-field_intragrain.py b/scripts/ighexrd_v1/intragrain_scripts/near-field_intragrain.py new file mode 100644 index 00000000..a81ab473 --- /dev/null +++ b/scripts/ighexrd_v1/intragrain_scripts/near-field_intragrain.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Mon Sep 10 13:48:29 2018 + +@author: ken38 +""" + +import numpy as np +import matplotlib.pyplot as plt +from cycler import cycler +import os +import copy + +from hexrd import valunits +from hexrd.grainmap import nfutil + +from hexrd.xrd import rotations as rot +from hexrd.xrd import symmetry as sym +from hexrd.xrd import transforms as xf +from hexrd.xrd import transforms_CAPI as xfcapi +from hexrd.xrd import xrdutil +from hexrd.xrd.transforms_CAPI import anglesToGVec, \ + makeRotMatOfExpMap, makeDetectorRotMat, makeOscillRotMat, \ + gvecToDetectorXY, detectorXYToGvec +import numba +import argparse +import contextlib +import multiprocessing +import tempfile +import shutil + +import yaml +import cPickle as cpl + +#============================================================================== +# %% INPUT FILES: Location of layer data - nf map and nf images +#============================================================================== + +#location of specific layer of near-field .NPZ array (initial) +file_dir='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/' +npz_file_stem = 'initial_nf_uniform_diffvol_1' + +#location of nearfield images +data_folder='/nfs/chess/raw/2018-1/f2/miller-774-1/ti7-11/7/nf/' #layer 1 +img_start=31602#for 0.25 degree/steps and 5 s exposure, end up with 6 junk frames up front, this is the 7th +num_imgs=1440 +img_nums=np.arange(img_start,img_start+num_imgs,1) + +#location of detector file (.yml) +det_file='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/retiga.yml' +#location of material file (.cpl) +mat_file='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/materials.cpl' + + +#grain_id.out file generated per grain with misorientation orientations for guesses +grain_id_out_dir='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/GOE/initial_goe/' + +grain_out_file='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-scan-14/grains.out' + +#============================================================================== +# %% OUTPUT FILES: Location to save new .npz +#============================================================================== + +output_dir = '/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/NEAR-FIELD/initial_volume_1/' + +# make output directory if doesn't exist +if not os.path.exists(output_dir): + os.mkdir(output_dir) + +#============================================================================== +# %% USER INPUT - X-RAY DATA experiment and analysis parameters (copy from nf) +#============================================================================== +x_ray_energy=61.332 #keV from experiment + +#name of the material in materials.cpl file +mat_name='ti7al' + +#keep at zero, cannot delete right now +misorientation_bnd=0.0 #degrees +misorientation_spacing=0.1 #degrees + +beam_stop_width=0.6#mm, assumed to be in the center of the detector + +ome_range_deg=[(0.,359.75)] #degrees + +max_tth=-1. #degrees, if a negative number is input, all peaks that will hit the detector are calculated + +#image processing +num_for_dark=250#num images to use for median data +threshold=6. #set to 7 for initial. Currently using dark image 'min' +num_erosions=2 #num iterations of images erosion, don't mess with unless you know what you're doing +num_dilations=3 #num iterations of images erosion, don't mess with unless you know what you're doing +ome_dilation_iter=1 #num iterations of 3d image stack dilations, don't mess with unless you know what you're doing + +chunk_size=500#chunksize for multiprocessing, don't mess with unless you know what you're doing + +cross_sectional_dim=1.35 #cross sectional to reconstruct (should be at least 20%-30% over sample width) +#voxel spacing for the near field reconstruction +voxel_spacing = 0.005 #in mm +##vertical (y) reconstruction voxel bounds in mm +v_bnds=[-0.085,0.085] +#v_bnds=[-0.,0.] + +#============================================================================== +# %% Set threshold values for misorientation data (ff - clouds) +#============================================================================== + +#thresholds for grains in reconstructions +comp_thresh=0.7 #only use orientations with completnesses ABOVE this threshold +chi2_thresh=1.0 #is not used; make sure value > 0 + +#============================================================================== +# %% LOAD GRAIN AND EXPERIMENT DATA +#============================================================================== + +experiment, nf_to_ff_id_map = nfutil.gen_trial_exp_data(grain_out_file,det_file,mat_file, x_ray_energy, mat_name, max_tth, comp_thresh, chi2_thresh, misorientation_bnd, \ + misorientation_spacing,ome_range_deg, num_imgs, beam_stop_width) + +#============================================================================== +# %% NEAR FIELD - MAKE MEDIAN DARK +#============================================================================== +print '>>>>>>>>>>>>>>>>>>loading images>>>>>>>>>>>>>>>>>>' +dark=nfutil.gen_nf_dark(data_folder,img_nums,num_for_dark,experiment.nrows,experiment.ncols,dark_type='median',num_digits=6) + +#============================================================================== +# %% NEAR FIELD - LOAD IMAGE DATA AND PROCESS +#============================================================================== + +image_stack=gen_nf_cleaned_image_stack(data_folder,img_nums,dark,ome_dilation_iter,threshold,experiment.nrows,experiment.ncols,num_digits=6)#,grey_bnds=(5,5),gaussian=4.5) + +#============================================================================== +# %% Load STITCHED-DATA from .npz +#============================================================================== + +print ('>>>>>>>>>>>>>>>>>>loading nf map>>>>>>>>>>>>>>>>>>') +hold = np.load(file_dir + npz_file_stem + '_grain_map_data.npz') + +grain_map = hold['grain_map'] +confidence_map = hold['confidence_map'] +Xs = hold['Xs'] +Ys = hold['Ys'] +Zs = hold['Zs'] +ori_list = hold['ori_list'] +id_remap = hold['id_remap'] + +all_grains_in_layer = np.unique(grain_map) + +#%% + +print ('.......multi process..........') +#new gen_trial_data definition +progress_handler = nfutil.progressbar_progress_observer() +save_handler=nfutil.forgetful_result_handler() + +controller = nfutil.ProcessController(save_handler, progress_handler, + ncpus=44, chunk_size=chunk_size) + +multiprocessing_start_method = 'fork' if hasattr(os, 'fork') else 'spawn' + +#%% +mis_all = np.zeros(grain_map.shape) +confidence_index_new = np.copy(confidence_map) +grain_map_new = np.copy(grain_map) +compiled_map = np.copy(grain_map.astype('float')) + +#%% +import scipy.ndimage.morphology as morphology + +#%% +for grain in range(1,all_grains_in_layer.shape[0]): + #GRAIN ID -- USER INPUT + grain_id = all_grains_in_layer[grain] + + print ('>>>>>>>>>>>>>>>>iteration %d>>>>>>>>>>>>>>>' % grain) + print ('>>>>>>>>>>>>>>>>>>grain %d>>>>>>>>>>>>>>>>>' % grain_id) + + grain_id_out_file= grain_id_out_dir + 'grain_id_%s.out' % (grain_id) + + #LOAD EXP_MAPS CONFIDENCE TO DECIDE THRESHOLDS + ori_out = np.loadtxt(grain_id_out_file) + ori_data = ori_out[:,3:6] + ori_comp=ori_out[:,1] + + comp_thresh=np.amax(ori_comp)*0.8 #only use orientations with completnesses ABOVE this threshold + chi2_thresh=1.0 #is not used; make sure value > 0 + + #GENERATES GRAIN MASK FROM GRAIN_MAP + grains_plot_binary_0 = np.copy(grain_map) + + grains_plot_binary_0[grains_plot_binary_0 < grain_id] = 0 + grains_plot_binary_0[grains_plot_binary_0 > grain_id] = 0 + grains_plot_binary_0[grains_plot_binary_0 == grain_id] = 1 + + grains_plot_binary = morphology.binary_dilation(grains_plot_binary_0,iterations=5).astype('int') + + # GRAIN MASK PROCESSING - CREATE GRAIN TEST GRID + test=np.where(grains_plot_binary) + + test_crd_grain=np.zeros([len(test[0]),3]) + + for ii in np.arange(len(test[0])): + test_crd_grain[ii,0]=Xs[test[0][ii],test[1][ii],test[2][ii]] + test_crd_grain[ii,1]=Ys[test[0][ii],test[1][ii],test[2][ii]] + test_crd_grain[ii,2]=Zs[test[0][ii],test[1][ii],test[2][ii]] + + print ('----number of test coordinates = %d -----' % test_crd_grain.shape[0]) + + if test_crd_grain.shape[0] == 0: + pass + + else: + # LOAD GRAIN AND EXPERIMENT DATA + experiment_g, nf_to_ff_id_map_g = gen_trial_exp_data(grain_id_out_file,det_file,mat_file, x_ray_energy, mat_name, max_tth, comp_thresh, chi2_thresh, \ + ome_range_deg, num_imgs, beam_stop_width) + + print ('----number of orientations = %d -----' % len(experiment_g.exp_maps)) + + if len(experiment_g.exp_maps) < 10: + pass + + else: + + # INSTANTIATE CONTROLLER - RUN BLOCK NO EDITING + progress_handler = nfutil.progressbar_progress_observer() + save_handler=nfutil.forgetful_result_handler() + + controller = nfutil.ProcessController(save_handler, progress_handler, + ncpus=44, chunk_size=chunk_size) + + multiprocessing_start_method = 'fork' if hasattr(os, 'fork') else 'spawn' + global _multiprocessing_start_method + _multiprocessing_start_method = 'fork' + #============================================================================== + # TEST ORIENTATIONS WITHIN GRAIN + #============================================================================== + try: + + raw_confidence_mis=nfutil.test_orientations(image_stack, experiment_g, test_crd_grain, + controller,multiprocessing_start_method) + + #============================================================================== + # PUT DATA BACK INTO MESH + #============================================================================== + #full mesh test_crds + print(' >>>>>>>>>>>>>>>>>>>>>putting data back in mesh>>>>>>>>>>>>>>>>>>>>>') + test_crd_all = np.vstack([Xs.flatten(), Ys.flatten(), Zs.flatten()]).T + + raw_confidence_new = np.empty([experiment_g.n_grains,len(test_crd_all[:,0])]) + + for iii in range(0,test_crd_grain.shape[0]): + grain_location = np.where((test_crd_all == test_crd_grain[iii,:]).all(axis=1)) + raw_confidence_new[:,grain_location[0][0]] = raw_confidence_mis[:,iii] + + #============================================================================== + # MASK TEST COORDINATES + #============================================================================== + + print('Compiling Confidence Map...') + confidence_map_g=np.max(raw_confidence_new,axis=0).reshape(Xs.shape) + grain_map_g=np.argmax(raw_confidence_new,axis=0).reshape(Xs.shape) + #id_remap + max_orientation_no=np.max(grain_map_g) + grain_map_copy=np.copy(grain_map_g) + print('Remapping grain ids to ff...') + for ii in np.arange(max_orientation_no): + this_orientation=np.where(grain_map_g==ii) + grain_map_copy[this_orientation]=nf_to_ff_id_map_g[ii] + grain_map_g=grain_map_copy + + #============================================================================== + # MISORIENTATION + #============================================================================== + print('calculate misorientation') + + tmp_data_avg=np.loadtxt(grain_out_file) + tmp_data_grain=np.loadtxt(grain_id_out_file) + + id_avg=tmp_data_avg[:,0] + ori_avg=tmp_data_avg[:,3:6] + id_mis=tmp_data_grain[:,0] + ori_mis=tmp_data_grain[:,3:6] + + mis = np.zeros(grain_map_g.shape) + + q_mor = rot.quatOfExpMap(ori_mis.T) + q_avg = rot.quatOfExpMap(ori_avg.T) + + material_file_loc = mat_file # hexrd material file in cpickle format + mat_name='ti7al' + + mat_list = cpl.load(open(material_file_loc, 'r')) + mat_idx = np.where([mat_list[i].name == mat_name for i in range(len(mat_list))])[0] + + # grab plane data, and useful things hanging off of it + pd = mat_list[mat_idx[0]].planeData + qsyms=sym.quatOfLaueGroup(pd.getLaueGroup()) + + for w in range(0,len(test[0])): + q2 = np.atleast_2d(q_mor[:,grain_map_g[test[0][w],test[1][w],test[2][w]]]).T + q1 = np.atleast_2d(q_avg[:,grain_id]).T + mis[test[0][w],test[1][w],test[2][w]] = rot.misorientation(q1,q2)[0]*180./np.pi + + # plt.imshow(confidence_map_g[20,:,:],cmap='gray') + # plt.hold(True) + # plt.imshow(mis[20,:,:], alpha = 0.5) + # plt.colorbar() + #============================================================================== + # put back into the master mesh + #============================================================================== + print ('put in master mesh') + #fresh array + empty_map_full = np.zeros(compiled_map.shape) + #confidence_index_local = np.copy(empty_map_full) + #grain_map_local = np.copy(empty_map_full) + mis_local = np.copy(empty_map_full) + compiled_map_local = np.copy(empty_map_full) + #place in fresh array + #confidence_index_local[test] = confidence_map_g[test] + #grain_map_local[test] = grain_map_g[test] + mis_local[test] = mis[test] + compiled_map_local[test] = grain_map[test].astype('float') + grain_map_g[test].astype('float')/100000. + + print ('saving as npz') + + save_string = 'grain_%d_nf' % grain_id + #np.savez('/nfs/chess/user/ken38/Ti7_project/nf_data_all/ti7-05-nf/grain-by-grain-nf/'+ save_string, combined_map = local_compiled_map[test], test_crds = test, local_grain_map = grain_map_g[test], local_confidence_map = confidence_map_g[test]) + np.savez(output_dir + save_string, compiled_map_local = compiled_map_local, test_crds = test, local_grain_map = grain_map_g, local_confidence_map = confidence_map_g, mis_local = mis_local) + print ('loop end') + + except: + pass + #============================================================================== + #% save grain mesh + #============================================================================== +#% +#np.savez(output_ext,grain_map_new = grain_map_new,confidence_index_new = confidence_index_new,compiled_map = compiled_map,mis_all = mis_all,Xs=Xs,Ys=Ys,Zs=Zs, grain_map = grain_map, confidence_map = confidence_map) diff --git a/scripts/ighexrd_v1/intragrain_scripts/nf_intragrainrecons.sh b/scripts/ighexrd_v1/intragrain_scripts/nf_intragrainrecons.sh new file mode 100644 index 00000000..29b80e27 --- /dev/null +++ b/scripts/ighexrd_v1/intragrain_scripts/nf_intragrainrecons.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# -*- coding: utf-8 -*- + +cd /nfs/chess/aux/user/ken38/Ti7_project/paper-near-field-run/step_1_near_field_scripts/nf_3_final_stitched_orientations/ +source activate hexrd_0526_nftest + +echo starting final gbg nf +#python nf_gbg_final_diffvol_1.py +#python nf_gbg_final_diffvol_2.py +python nf_gbg_final_diffvol_3.py +python nf_gbg_final_diffvol_4.py +echo Done + diff --git a/scripts/ighexrd_v1/intragrain_scripts/stitch_nf_diffvols.py b/scripts/ighexrd_v1/intragrain_scripts/stitch_nf_diffvols.py new file mode 100644 index 00000000..00aef6cd --- /dev/null +++ b/scripts/ighexrd_v1/intragrain_scripts/stitch_nf_diffvols.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Sun Aug 18 11:19:45 2019 + +@author: ken38 +""" + +import numpy as np +#import h5py +from hexrd.grainmap import nfutil +#============================================================================== +# %% Load DATA from .npz (ONLY FOR RELOADING DATA) +#============================================================================== + +voxel_spacing=0.005#in mm + +px = 271 +stack = 34 +layers = 4 +overlap_amt = 4 +stack0= 30 + +half_bnds = ((stack0*voxel_spacing*layers)+(overlap_amt*voxel_spacing))/2 +v_bnds = [-half_bnds,half_bnds] + +#%% +#full_stitch = np.empty([stack*layers,px,px]) + +full_stitch = np.empty([0,px,px]) +full_stitch_con = full_stitch +full_stitch_ori = full_stitch +full_stitch_mis = full_stitch +full_stitch_combo = full_stitch +#full_stitch_conori = full_stitch + +npz_save_dir = '/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/NEAR-FIELD/' +npz_string = 'ti7-11-stitched-initial_intragrain' + +#%% +output_dir='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/NEAR-FIELD/' + +for i in range(layers,0,-1): +#np.savez(save_dir+save_name, mask_confidence = mask_confidence,mask_grain = mask_grain, mask_misorientation = mask_misorientation, mask_combined_index=mask_combined_index, grain_avg_orientation_list=grain_avg_orientation_list) + output_stem='initial_nf_intragrain_diffvol_%d' % i + hold = np.load(output_dir+output_stem+'.npz') + grain_map = hold['mask_grain'] + misorientation_map = hold['mask_misorientation'] + confidence_map = hold['mask_confidence'] + #grain_map_ori= hold['grain_map'] + #confidence_map_ori= hold['confidence_map'] + combined_index = hold['mask_combined_index'] + + if i == layers: + mismap_red = misorientation_map + grain_map_red = grain_map + con_red = confidence_map + combo_red = combined_index + #oricon_red = confidence_map_ori + + if i < layers: + for ii in range(0,overlap_amt): + layer_idx = full_stitch_ori.shape[0]-1-ii + for iii in range(0,grain_map.shape[0]): + for iv in range(0,grain_map.shape[1]): + if full_stitch_con[layer_idx,iii,iv] < confidence_map[overlap_amt-1-ii,iii,iv]: + pass + else: + full_stitch_con[layer_idx,iii,iv] = confidence_map[overlap_amt-1-ii,iii,iv] + full_stitch_ori[layer_idx,iii,iv] = grain_map[overlap_amt-1-ii,iii,iv] + full_stitch_mis[layer_idx,iii,iv] = misorientation_map[overlap_amt-1-ii,iii,iv] + full_stitch_combo[layer_idx,iii,iv] = combined_index[overlap_amt-1-ii,iii,iv] + + + grain_map_red = grain_map[overlap_amt:,:,:] + con_red = confidence_map[overlap_amt:,:,:] + combo_red = combined_index[overlap_amt:,:,:] + mismap_red = misorientation_map[overlap_amt:,:,:] + #oricon_red = confidence_map_ori[overlap_amt:,:,:] + + full_stitch_ori = np.append(full_stitch_ori,grain_map_red,axis=0) + full_stitch_con = np.append(full_stitch_con,con_red,axis=0) + full_stitch_mis = np.append(full_stitch_mis,mismap_red,axis=0) + full_stitch_combo = np.append(full_stitch_combo,combo_red,axis=0) + #full_stitch_conori = np.append(full_stitch_conori, oricon_red, axis=0) +#%% + +test_crds, n_crds, Xs, Ys, Zs = nfutil.gen_nf_test_grid_tomo(grain_map.shape[1], grain_map.shape[2], v_bnds, voxel_spacing) + +#============================================================================== +# %% SAVE PROCESSED GRAIN MAP DATA +#============================================================================== + + +#nfutil.save_nf_data(output_dir,output_stem,full_stitch_ori,full_stitch_con,Xs,Ys,Zs, full_stitch_mis, full_stitch_conori) #ori_list,id_remap=id_remap) +np.savez(npz_save_dir + npz_string,grain_map=full_stitch_ori,confidence_map=full_stitch_con, misorientation_map=full_stitch_mis,combined_index=full_stitch_combo, Xs=Xs,Ys=Ys,Zs=Zs) + +#============================================================================== +# %% SAVE DATA AS .H5 +#============================================================================== + +hf=h5py.File(output_dir+npz_string+'_data.h5', 'w')#('data.h5','w') +#save_dir+save_stem+'_data.h5' + +g1=hf.create_group('group1') +g1.create_dataset('grain_map', data=full_stitch_ori) +g1.create_dataset('confidence_map', data=full_stitch_con) +#g1.create_dataset('original_confidence_map', data=full_stitch_conori) +g1.create_dataset('misorientation_map', data=full_stitch_mis) +g1.create_dataset('combined', data = full_stitch_combo) +g1.create_dataset('Xs', data=Xs) +g1.create_dataset('Ys', data=Ys) +g1.create_dataset('Zs', data=Zs) + + +hf.close() diff --git a/scripts/ighexrd_v1/intragrain_scripts/stitch_nf_grains.py b/scripts/ighexrd_v1/intragrain_scripts/stitch_nf_grains.py new file mode 100644 index 00000000..60513151 --- /dev/null +++ b/scripts/ighexrd_v1/intragrain_scripts/stitch_nf_grains.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Wed Jan 2 13:23:50 2019 + +@author: ken38 +""" + +#%% +import scipy.stats as stats +import numpy as np +import matplotlib.pyplot as plt + +#LOCATION OF NF DATA IN AVERAGE MAPS +file_dir='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/' +npz_file_stem = 'initial_nf_uniform_diffvol_1' + +#LOCATION OF INDIVIDUAL NF DATA NPZ FROM GOEs +npz_location = '/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/NEAR-FIELD/' + +GOE_loc='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/GOE/initial_goe' +tomo_mask_file='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/tomo_mask.npy' + +#SAVE DIRECTORY +save_dir = npz_location +#SAVE NAME +save_name = 'initial_nf_intragrain_diffvol_1.npz' + +#%% LOAD NF DATA GENERATED FROM GRAIN.OUT +hold = np.load(file_dir + npz_file_stem + '_grain_map_data.npz') + +grain_map = hold['grain_map'] +confidence_map = hold['confidence_map'] +Xs = hold['Xs'] +Ys = hold['Ys'] +Zs = hold['Zs'] +ori_list = hold['ori_list'] +id_remap = hold['id_remap'] + +all_grains_in_layer = np.unique(grain_map) + +empty_map = np.zeros(grain_map.shape) +full_map_confidence = np.copy(empty_map) +full_map_combined = np.copy(empty_map) +full_misorientation = np.copy(empty_map) +full_grain_map = np.copy(empty_map) + +#%% +for i in range(1,len(all_grains_in_layer)): + + grain = all_grains_in_layer[i] + + try: + local = np.load(npz_location + 'grain_%d_nf.npz' % grain) + local_test_crds = local['test_crds'] + local_grain_map = local['local_grain_map'] + local_confidence_map = local['local_confidence_map'] + local_combined_map = local['compiled_map_local'] + local_mis = local['mis_local'] + for ii in range (0,local_test_crds.shape[1]): + if full_map_confidence[local_test_crds[0][ii],local_test_crds[1][ii],local_test_crds[2][ii]] > local_confidence_map[local_test_crds[0][ii],local_test_crds[1][ii],local_test_crds[2][ii]]: + pass + else : + full_map_confidence[local_test_crds[0][ii],local_test_crds[1][ii],local_test_crds[2][ii]] = local_confidence_map[local_test_crds[0][ii],local_test_crds[1][ii],local_test_crds[2][ii]] + full_map_combined[local_test_crds[0][ii],local_test_crds[1][ii],local_test_crds[2][ii]] = local_combined_map[local_test_crds[0][ii],local_test_crds[1][ii],local_test_crds[2][ii]] + full_misorientation[local_test_crds[0][ii],local_test_crds[1][ii],local_test_crds[2][ii]] = local_mis[local_test_crds[0][ii],local_test_crds[1][ii],local_test_crds[2][ii]] + full_grain_map[local_test_crds[0][ii],local_test_crds[1][ii],local_test_crds[2][ii]] = local_grain_map[local_test_crds[0][ii],local_test_crds[1][ii],local_test_crds[2][ii]] + except: + print grain + + +#%% plot +plt.close('all') + +layer_to_plot = 30 + +plt.figure() +plt.imshow(confidence_map[layer_to_plot,:,:], vmax=1) +plt.figure() +plt.imshow(full_map_confidence[layer_to_plot,:,:], vmax=1) + +plt.figure() +plt.imshow(full_map_confidence[layer_to_plot,:,:]-confidence_map[layer_to_plot,:,:], vmin=-0.5, vmax=0.5) + +#%% +tomo_mask = np.load(tomo_mask_file) + +#%% +layer_to_plot = 15 +plt.close('all') +mask = np.where(tomo_mask == False) + +mask_confidence = np.copy(full_map_confidence) +mask_confidence[:,mask[0],mask[1]]=-.001 + +mask_grain = np.copy(np.floor(full_map_combined)) +mask_grain[:,mask[0],mask[1]] =-1 + +mask_misorientation=np.copy(full_misorientation) +mask_misorientation[:,mask[0],mask[1]]=-.001 + +mask_combined_index=np.copy(full_map_combined) +mask_combined_index[:,mask[0],mask[1]]=-1 + +mask_local_id = np.copy(full_grain_map) +mask_local_id[:,mask[0],mask[1]] = -1 + +#%% +plt.close('all') +#plt.figure() +#plt.imshow(mask_confidence[layer_to_plot,:,:], vmax=1, cmap='gray') +#plt.figure() +#plt.imshow(mask_grain[layer_to_plot,:,:]) +#plt.figure() +#plt.imshow(mask_grain[layer_to_plot,:,:])#, vmax=1)#, cmap='gray') +plt.imshow(mask_misorientation[layer_to_plot,:,:], vmax=3, alpha = 1) +plt.colorbar() +#plt.figure() +#plt.imshow(mask_combined_index[layer_to_plot,:,:]) + +#%% +grain_avg_orientation_list = np.zeros([np.unique(mask_grain).shape[0], 4]) +mask_combin_id = np.zeros([mask_confidence.shape[0],mask_confidence.shape[1],mask_confidence.shape[2]]) +all_grains_in_mask = np.unique(mask_grain) + +for iii in range(0,np.unique(mask_grain).shape[0]): + grain = all_grains_in_mask[iii] + if grain != -1: + where = np.where(mask_grain==grain) + mode_subgrain_id = stats.mode(mask_local_id[where].astype('int')) + grain_id_out = np.loadtxt(GOE_loc+'/grain_id_%d.out' % grain) + exp_map_mode = grain_id_out[mode_subgrain_id[0],3:6] + grain_avg_orientation_list[iii] = [grain, exp_map_mode[0][0], exp_map_mode[0][1], exp_map_mode[0][2]] + +#%%SAVE NEW NPZ +np.savez(save_dir+save_name, mask_confidence = mask_confidence,mask_grain = mask_grain, mask_misorientation = mask_misorientation, mask_combined_index=mask_combined_index, grain_avg_orientation_list=grain_avg_orientation_list) diff --git a/scripts/ighexrd_v1/missing_grains_scripts/mg_centroid_conver.py b/scripts/ighexrd_v1/missing_grains_scripts/mg_centroid_conver.py new file mode 100644 index 00000000..3bf9bdb2 --- /dev/null +++ b/scripts/ighexrd_v1/missing_grains_scripts/mg_centroid_conver.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Sat Feb 8 14:42:02 2020 + +@author: ken38 +""" + +import numpy as np + + +l=5 +output_dir='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/' +output_stem = 'ti7-11-initial_average_vol_%d' % l + +#global information +voxel_spacing = 0.005# in mm +overlap_in_microns = 0.020 +overlap_amt = overlap_in_microns/voxel_spacing +num_diffvols = 5 + +#diffraction_volume information +layer = 0 +hold = np.load(output_dir+output_stem+'_grain_map_data.npz') + +#============================================================================== +# %% Calculate Additional Parameters from Input (run without changing) +#============================================================================== +grain_map = hold['grain_map'] +confidence_map = hold['confidence_map'] +Xs_1 = hold['Xs'] +Ys_1 = hold['Ys'] +Zs_1 = hold['Zs'] +ori_list = hold['ori_list'] +id_remap = hold['id_remap'] + +px = grain_map.shape[1] +stack = grain_map.shape[0] + +stack0= stack-overlap_amt +half_bnds = ((stack0*voxel_spacing*num_diffvols)+(overlap_amt*voxel_spacing))/2 +v_bnds = [-half_bnds,half_bnds] + +save_file = 'centroid_diff_%i_miss_grain.npy' #FILE NAME SAVED FROM MISSING_GRAIN_CENTROIDS.PY SCRIPT + +layer_1_coordinates = np.load(save_file % 1).astype('int') +layer_2_coordinates = np.load(save_file % 2).astype('int') +layer_3_coordinates = np.load(save_file % 3).astype('int') +layer_4_coordinates = np.load(save_file % 4).astype('int') +layer_5_coordinates = np.load(save_file % 5).astype('int') +#%%% + +test_crds_1 = np.zeros([layer_1_coordinates.shape[0],3]) +for ii in np.arange(layer_1_coordinates.shape[0]): + test_crds_1[ii,0]=Xs_1[layer_1_coordinates[ii,0],layer_1_coordinates[ii,1],layer_1_coordinates[ii,2]] + test_crds_1[ii,1]=Ys_1[layer_1_coordinates[ii,0],layer_1_coordinates[ii,1],layer_1_coordinates[ii,2]] + test_crds_1[ii,2]=Zs_1[layer_1_coordinates[ii,0],layer_1_coordinates[ii,1],layer_1_coordinates[ii,2]] + +test_crds_2 = np.zeros([layer_2_coordinates.shape[0],3]) +for ii in np.arange(layer_2_coordinates.shape[0]): + test_crds_2[ii,0]=Xs_1[layer_2_coordinates[ii,0],layer_2_coordinates[ii,1],layer_2_coordinates[ii,2]] + test_crds_2[ii,1]=Ys_1[layer_2_coordinates[ii,0],layer_2_coordinates[ii,1],layer_2_coordinates[ii,2]] + test_crds_2[ii,2]=Zs_1[layer_2_coordinates[ii,0],layer_2_coordinates[ii,1],layer_2_coordinates[ii,2]] + +test_crds_3 = np.zeros([layer_3_coordinates.shape[0],3]) +for ii in np.arange(layer_3_coordinates.shape[0]): + test_crds_3[ii,0]=Xs_1[layer_3_coordinates[ii,0],layer_3_coordinates[ii,1],layer_3_coordinates[ii,2]] + test_crds_3[ii,1]=Ys_1[layer_3_coordinates[ii,0],layer_3_coordinates[ii,1],layer_3_coordinates[ii,2]] + test_crds_3[ii,2]=Zs_1[layer_3_coordinates[ii,0],layer_3_coordinates[ii,1],layer_3_coordinates[ii,2]] + +test_crds_4 = np.zeros([layer_4_coordinates.shape[0],3]) +for ii in np.arange(layer_4_coordinates.shape[0]): + test_crds_4[ii,0]=Xs_1[layer_4_coordinates[ii,0],layer_4_coordinates[ii,1],layer_4_coordinates[ii,2]] + test_crds_4[ii,1]=Ys_1[layer_4_coordinates[ii,0],layer_4_coordinates[ii,1],layer_4_coordinates[ii,2]] + test_crds_4[ii,2]=Zs_1[layer_4_coordinates[ii,0],layer_4_coordinates[ii,1],layer_4_coordinates[ii,2]] + +test_crds_5 = np.zeros([layer_5_coordinates.shape[0],3]) +for ii in np.arange(layer_5_coordinates.shape[0]): + test_crds_5[ii,0]=Xs_1[layer_5_coordinates[ii,0],layer_5_coordinates[ii,1],layer_5_coordinates[ii,2]] + test_crds_5[ii,1]=Ys_1[layer_5_coordinates[ii,0],layer_5_coordinates[ii,1],layer_5_coordinates[ii,2]] + test_crds_5[ii,2]=Zs_1[layer_5_coordinates[ii,0],layer_5_coordinates[ii,1],layer_5_coordinates[ii,2]] + +np.save('missing_coords_vol_1.npy',test_crds_1) +np.save('missing_coords_vol_2.npy',test_crds_2) +np.save('missing_coords_vol_3.npy',test_crds_3) +np.save('missing_coords_vol_4.npy',test_crds_4) +np.save('missing_coords_vol_5.npy',test_crds_5) + diff --git a/scripts/ighexrd_v1/missing_grains_scripts/mg_finder.py b/scripts/ighexrd_v1/missing_grains_scripts/mg_finder.py new file mode 100644 index 00000000..935f3cbc --- /dev/null +++ b/scripts/ighexrd_v1/missing_grains_scripts/mg_finder.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 7 14:13:57 2020 + +@author: ken38 +""" + +import numpy as np +import h5py +from hexrd.grainmap import nfutil +import matplotlib.pyplot as plt +#============================================================================== +# %% Input Parameters and Load DATA from .npz (FOR GENERATING OTHER PARAMETERS) +#============================================================================== +l=5 #diffraction volume - each of mine were labeled in the output stem +output_dir='/nfs/chess/aux/user/ken38/Ti7_project/ti7-11-1percent/' +output_stem = 'ti7-11-initial_average_vol_%d' % l + +#global information +voxel_spacing = 0.005# in mm +overlap_in_microns = 0.020 +overlap_amt = overlap_in_microns/voxel_spacing +num_diffvols = 5 + +#diffraction_volume information +layer = 0 +hold = np.load(output_dir+output_stem+'_grain_map_data.npz') + +#============================================================================== +# %% Calculate Additional Parameters from Input (run without changing) +#============================================================================== +grain_map = hold['grain_map'] +confidence_map = hold['confidence_map'] +Xs = hold['Xs'] +Ys = hold['Ys'] +Zs = hold['Zs'] +ori_list = hold['ori_list'] +id_remap = hold['id_remap'] + +px = grain_map.shape[1] +stack = grain_map.shape[0] + +stack0= stack-overlap_amt +half_bnds = ((stack0*voxel_spacing*num_diffvols)+(overlap_amt*voxel_spacing))/2 +v_bnds = [-half_bnds,half_bnds] + +#%% IDENTIFY LOW CONFIDENCE REGION +conf_threshold_high = 0.45 #please note, I had very poor quality data in this example so you will likely want a much higher number than this. +conf_threshold_low = 0.0 + +low_conf = np.logical_and(confidence_mapconf_threshold_low) + +#%% CHECK THAT YOUR THRESHOLD IS IDENTIFYING AREAS YOU WANT +layer_no=30 + +plt.figure('area check') +plt.imshow(confidence_map[layer_no,:,:]) +plt.hold('on') +plt.imshow(low_conf[layer_no,:,:], alpha = 0.2) + +#%% ADDITIONAL CHECK - LOOK AT HOW THE AREAS WILL SEGMENT AND MAKE SURE IT DOESNT RECOGNIZE AS ONE CONTIGUOUS BLOB +layer_no=11 + +from skimage import measure +from skimage import filters + +all_labels = measure.label(low_conf[0:32,:,:]) +blob_labels = measure.label(low_conf[0:32,:,:], background = 0) + +plt.figure('labels',figsize=(9, 3.5)) +plt.subplot(131) +plt.imshow(low_conf[layer_no,:,:], cmap='gray') +plt.axis('off') +plt.subplot(132) +plt.imshow(all_labels[layer_no,:,:], cmap='nipy_spectral') +plt.axis('off') +plt.subplot(133) +plt.imshow(blob_labels[layer_no,:,:], cmap='nipy_spectral') +plt.axis('off') + +plt.tight_layout() +plt.show() + +#%% CREATE CENTROID MAP OF LOW CONFIDENCE REGION + +from scipy import ndimage + +blob_labels_2 = ndimage.label(low_conf[0:32,:,:])[0] +centroids_2 = ndimage.measurements.center_of_mass(low_conf[0:32,:,:], blob_labels_2, np.unique(blob_labels_2)) + +centroid_point_map = np.zeros(np.shape(confidence_map)) +centroid_new = np.empty([0,3]) +for i in range(1,len(centroids_2)): + where=len(np.where(blob_labels_2==i)[0]) + if where>10: + print i + centroid_new = np.append(centroid_new,np.reshape(np.array(centroids_2[i]),[1,3]),axis=0) + centroid_point_map[np.rint(centroids_2[i][0]).astype('int'),np.rint(centroids_2[i][1]).astype('int'), np.rint(centroids_2[i][2]).astype('int')] = 10 + +#%% CAN CHECK THE CENTROIDS ARE IN LOCATIONS EXPECTED. + +layer_no=10 +plt.figure('area check') +plt.imshow(confidence_map[layer_no,:,:]) +plt.hold('on') +plt.imshow(low_conf[layer_no,:,:], alpha = 0.2) +plt.imshow(centroid_point_map[layer_no,:,:], alpha = 0.5) + +#%% SAVE FILE OF CENTROIDS + +save_file = 'centroid_diff_%d_miss_grain.npy' % l +np.save(save_file,centroid_new) + +#%% + diff --git a/scripts/ighexrd_v1/missing_grains_scripts/mg_orientation_finder.py b/scripts/ighexrd_v1/missing_grains_scripts/mg_orientation_finder.py new file mode 100644 index 00000000..b77ef851 --- /dev/null +++ b/scripts/ighexrd_v1/missing_grains_scripts/mg_orientation_finder.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Tue Aug 14 11:57:34 2018 + +@author: ken38 +""" + +#%% Necessary Dependencies +# PROCESSING NF GRAINS WITH MISORIENTATION +#============================================================================== +import numpy as np + +import matplotlib.pyplot as plt + +import multiprocessing as mp + +import os + +from hexrd.grainmap import nfutil +from hexrd.grainmap import tomoutil +from hexrd.grainmap import vtkutil + +from hexrd.xrd import transforms_CAPI as xfcapi +from hexrd.xrd import rotations as rot +#============================================================================== +# %% FILES TO LOAD -CAN BE EDITED +#============================================================================== + +main_dir = '/nfs/chess/user/ken38/Ti7_project/ti7-11-1percent/' #working directory. + +det_file = main_dir + 'retiga.yml' #near-field camera configuration file +mat_file= main_dir + 'materials.cpl' #A materials file, is a cPickle file which contains material information like lattice +#parameters necessary for the reconstruction + +missing_grain_coordinates = 'missing_coords_vol_1.npy' #missing coordinate list identified from the find missing centroids script. +quaternion_test_list = 'quat_2.npy' #I recommend a fine discritization over the fundamental region of orientation space for your material. + +#============================================================================== +# %% OUTPUT INFO -CAN BE EDITED +#============================================================================== + +output_dir = main_dir #can change output directory. +output_stem='ti7-11-initial_average_vol_1' +new_quat_save_output = 'quats_to_add_vol_1.npy' + +#============================================================================== +# %% TOMOGRAPHY DATA FILES -CAN BE EDITED - ZERO LOAD SCAN +#============================================================================== + +#Locations of tomography bright field images +tbf_data_folder='/nfs/chess/raw/2018-1/f2/miller-774-1/ti7-11/2/nf/' + +tbf_img_start=31171 #for this rate, this is the 6th file in the folder +tbf_num_imgs=10 + +#Locations of tomography images +tomo_data_folder='/nfs/chess/raw/2018-1/f2/miller-774-1/ti7-11/3/nf/' + +tomo_img_start=31187#for this rate, this is the 6th file in the folder +tomo_num_imgs=360 + +#============================================================================== +# %% GRAINS.OUT SCAN FOR THE LOAD STEP OF INTEREST +#============================================================================== +grain_out_file = main_dir + 'ti7-11-scan-14/grains.out' + +#%% +#Locations of near field images +#Locations of near field images +data_folder='/nfs/chess/raw/2018-1/f2/miller-774-1/ti7-11/7/nf/' #layer 1 + +#img_start=31601#for 0.25 degree/steps and 5 s exposure, end up with 5 junk frames up front, this is the 6th +img_start=31602#for 0.25 degree/steps and 5 s exposure, end up with 6 junk frames up front, this is the 7th +num_imgs=1440 +img_nums=np.arange(img_start,img_start+num_imgs,1) + +#============================================================================== +# %% USER OPTIONS -CAN BE EDITED #WILL WANT THESE TO BE SET THE SAME AS INITIAL NEAR FIELD +#============================================================================== +x_ray_energy=61.332 #keV + +#name of the material for the reconstruction +mat_name='ti7al' + +#reconstruction with misorientation included, for many grains, this will quickly +#make the reconstruction size unmanagable +misorientation_bnd=0.0 #degrees +misorientation_spacing=0.25 #degrees + +beam_stop_width=0.6#mm, assumed to be in the center of the detector + +ome_range_deg=[(0.,359.75)] #degrees + +max_tth=-1. #degrees, if a negative number is input, all peaks that will hit the detector are calculated + +#image processing +num_for_dark=250#num images to use for median data +threshold=6. +num_erosions=2 #num iterations of images erosion, don't mess with unless you know what you're doing +num_dilations=3 #num iterations of images erosion, don't mess with unless you know what you're doing +ome_dilation_iter=1 #num iterations of 3d image stack dilations, don't mess with unless you know what you're doing + +chunk_size=500#chunksize for multiprocessing, don't mess with unless you know what you're doing + +#thresholds for grains in reconstructions +comp_thresh=0.3#nly use orientations from grains with completnesses ABOVE this threshold +chi2_thresh=1.0#nly use orientations from grains BELOW this chi^2 + +#tomography options +layer_row=1024 # row of layer to use to find the cross sectional specimen shape +recon_thresh=0.00025#usually varies between 0.0001 and 0.0005 +#Don't change these unless you know what you are doing, this will close small holes +#and remove noise +noise_obj_size=500 +min_hole_size=500 + +cross_sectional_dim=1.35 #cross sectional to reconstruct (should be at least 20%-30% over sample width) +#voxel spacing for the near field reconstruction +voxel_spacing = 0.005#in mm +##vertical (y) reconstruction voxel bounds in mm +v_bnds=[-0.085,0.085] +#v_bnds=[-0.,0.] +#======================= +#============================================ + + ####END USER INPUT#### + +#============================================================================== +# %% LOAD GRAIN AND EXPERIMENT DATA +#============================================================================== + +experiment, nf_to_ff_id_map = nfutil.gen_trial_exp_data(grain_out_file,det_file,mat_file, x_ray_energy, mat_name, max_tth, comp_thresh, chi2_thresh, misorientation_bnd, \ + misorientation_spacing,ome_range_deg, num_imgs, beam_stop_width) + +#============================================================================== +# %% TOMO PROCESSING - GENERATE BRIGHT FIELD +#============================================================================== + +tbf=tomoutil.gen_bright_field(tbf_data_folder,tbf_img_start,tbf_num_imgs,experiment.nrows,experiment.ncols,num_digits=6) + +#============================================================================== +# %% TOMO PROCESSING - BUILD RADIOGRAPHS +#============================================================================== + +rad_stack=tomoutil.gen_attenuation_rads(tomo_data_folder,tbf,tomo_img_start,tomo_num_imgs,experiment.nrows,experiment.ncols,num_digits=6) + +#============================================================================== +# %% TOMO PROCESSING - INVERT SINOGRAM +#============================================================================== + +reconstruction_fbp=tomoutil.tomo_reconstruct_layer(rad_stack,cross_sectional_dim,layer_row=layer_row,\ + start_tomo_ang=ome_range_deg[0][0],end_tomo_ang=ome_range_deg[0][1],\ + tomo_num_imgs=tomo_num_imgs, center=experiment.detector_params[3]) + +#============================================================================== +# %% TOMO PROCESSING - CLEAN TOMO RECONSTRUCTION +#============================================================================== + +binary_recon=tomoutil.threshold_and_clean_tomo_layer(reconstruction_fbp,recon_thresh, noise_obj_size,min_hole_size) + +#============================================================================== +# %% TOMO PROCESSING - RESAMPLE TOMO RECONSTRUCTION +#============================================================================== + +tomo_mask=tomoutil.crop_and_rebin_tomo_layer(binary_recon,recon_thresh,voxel_spacing,experiment.pixel_size[0],cross_sectional_dim) + +#============================================================================== +# %% TOMO PROCESSING - CONSTRUCT DATA GRID +#============================================================================== + +test_crds, n_crds, Xs, Ys, Zs = nfutil.gen_nf_test_grid_tomo(tomo_mask.shape[1], tomo_mask.shape[0], v_bnds, voxel_spacing) + +#============================================================================== +# %% NEAR FIELD - MAKE MEDIAN DARK +#============================================================================== + +dark=nfutil.gen_nf_dark(data_folder,img_nums,num_for_dark,experiment.nrows,experiment.ncols,dark_type='median',num_digits=6) + +#============================================================================== +# %% NEAR FIELD - LOAD IMAGE DATA AND PROCESS +#============================================================================== + +image_stack=gen_nf_cleaned_image_stack(data_folder,img_nums,dark,ome_dilation_iter,threshold,experiment.nrows,experiment.ncols,num_digits=6,grey_bnds=(5,5),gaussian=4.5) + +#%% + +test_crds_load = np.load(output_dir + missing_grain_coordinates) +test_crds = test_crds_load[:,:] +n_crds = test_crds.shape[0] + +random_quaternions = np.load(output_dir + quaternion_test_list) + +n_grains = random_quaternions.shape[1] +rMat_c = rot.rotMatOfQuat(random_quaternions) +exp_maps = np.zeros([random_quaternions.shape[1],3]) +for i in range(0,random_quaternions.shape[1]): + phi = 2*np.arccos(random_quaternions[0,i]) + n = xfcapi.unitRowVector(random_quaternions[1:,i]) + exp_maps[i,:] = phi*n + +#%% + +experiment.n_grains = n_grains +experiment.rMat_c = rMat_c +experiment.exp_maps = exp_maps + +#============================================================================== +# %% INSTANTIATE CONTROLLER - RUN BLOCK NO EDITING +#============================================================================== + +progress_handler = nfutil.progressbar_progress_observer() +save_handler=nfutil.forgetful_result_handler() + +controller = nfutil.ProcessController(save_handler, progress_handler, + ncpus=mp.cpu_count(), chunk_size=chunk_size) + +#controller = nfutil.ProcessController(save_handler, progress_handler, +# ncpus=40, chunk_size=chunk_size) + +multiprocessing_start_method = 'fork' if hasattr(os, 'fork') else 'spawn' + +#============================================================================== +# %% TEST ORIENTATIONS - RUN BLOCK NO EDITING +#============================================================================== + +raw_confidence=nfutil.test_orientations(image_stack, experiment, test_crds, + controller,multiprocessing_start_method) + + +#%% +best_quaternion = np.zeros([test_crds.shape[0],4]) +for i in range(0,raw_confidence.shape[1]): + where = np.where(raw_confidence[:,i] == np.max(raw_confidence[:,i])) + best_quaternion[i,:] = random_quaternions[:,where[0][0]] + print np.max(raw_confidence[:,i]) + +#%% +np.save(output_dir + new_quat_save_output, best_quaternion) diff --git a/scripts/makeOverlapTable.py b/scripts/makeOverlapTable.py index b3ceccff..2cd459a8 100755 --- a/scripts/makeOverlapTable.py +++ b/scripts/makeOverlapTable.py @@ -4,54 +4,97 @@ This is a temporary script file. """ +from __future__ import print_function -import argparse, os, sys +import argparse +import os +import sys +import time +import yaml -import cPickle +try: + import dill as cpl +except(ImportError): + import cPickle as cpl import numpy as np -from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib.colors import BoundaryNorm from hexrd import config +from hexrd import instrument from hexrd.xrd import transforms_CAPI as xfcapi -from hexrd.coreutil import get_instrument_parameters from sklearn.cluster import dbscan -from scipy import cluster + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +do_plots = False +save_fig = False + + +# ============================================================================= +# LOCAL FUNCTIONS +# ============================================================================= + +# plane data +def load_pdata(cpkl, key): + with file(cpkl, "r") as matf: + mat_list = cpl.load(matf) + return dict(zip([i.name for i in mat_list], mat_list))[key].planeData + + +# instrument +def load_instrument(yml): + with file(yml, 'r') as f: + icfg = yaml.load(f) + return instrument.HEDMInstrument(instrument_config=icfg) + def adist(ang0, ang1): - resd = xfcapi.angularDifference(ang0 ,ang1) + resd = xfcapi.angularDifference(ang0, ang1) return np.sqrt(sum(resd**2)) + +def postprocess_dbscan(labels): + cl = np.array(labels, dtype=int) # convert to array + noise_points = cl == -1 # index for marking noise + cl += 1 # move index to 1-based instead of 0 + cl[noise_points] = -1 # re-mark noise as -1 + + # extract number of clusters + if np.any(cl == -1): + nblobs = len(np.unique(cl)) - 1 + else: + nblobs = len(np.unique(cl)) + return cl, nblobs + + def build_overlap_table(cfg, tol_mult=0.5): - - icfg = get_instrument_parameters(cfg) - + + # get instrument object + instr = load_instrument(cfg.instrument.parameters) + + # load grain table gt = np.loadtxt( os.path.join(cfg.analysis_dir, 'grains.out') ) - ngrains = len(gt) - - mat_list = cPickle.load(open(cfg.material.definitions, 'r')) - mat_names = [mat_list[i].name for i in range(len(mat_list))] - mat_dict = dict(zip(mat_names, mat_list)) - - matl = mat_dict[cfg.material.active] - - pd = matl.planeData + + # get plane data + pd = load_pdata(cfg.material.definitions, cfg.material.active) pd.exclusions = np.zeros(len(pd.exclusions), dtype=bool) pd.tThMax = np.radians(cfg.fit_grains.tth_max) pd.tThWidth = np.radians(cfg.fit_grains.tolerance.tth[-1]) - + # for clustering... eps = tol_mult*np.radians( min( - min(cfg.fit_grains.tolerance.eta), + min(cfg.fit_grains.tolerance.eta), 2*min(cfg.fit_grains.tolerance.omega) ) ) @@ -63,165 +106,210 @@ def build_overlap_table(cfg, tol_mult=0.5): pids.append( [pd.hklDataList[hklids[i]]['hklID'] for i in range(len(hklids))] ) - + # Make table of unit diffraction vectors - st = [] - for i in range(ngrains): - this_st = np.loadtxt( - os.path.join(cfg.analysis_dir, 'spots_%05d.out' %i) - ) - #... do all predicted? - valid_spt = this_st[:, 0] >= 0 - #valid_spt = np.ones(len(this_st), dtype=bool) + overlap_table_dict = {} + for det_key, panel in instr.detectors.iteritems(): + st = [] + for i in range(ngrains): + this_st = np.loadtxt( + os.path.join(cfg.analysis_dir, + os.path.join(det_key, 'spots_%05d.out' % i) + ) + ) - angs = this_st[valid_spt, 7:10] + # ??? do all predicted? + valid_spt = this_st[:, 0] >= 0 + # valid_spt = np.ones(len(this_st), dtype=bool) - dvec = xfcapi.anglesToDVec( - angs, - chi=icfg['oscillation_stage']['chi'] - ) + # !!! double check this is still correct + angs = this_st[valid_spt, 7:10] - # [ grainID, reflID, hklID, D_s[0], D_s[1], D_s[2], tth, eta, ome ] - st.append( - np.hstack([ - i*np.ones((sum(valid_spt), 1)), - this_st[valid_spt, :2], - dvec, + # get unit diffraction vectors + dvec = xfcapi.anglesToDVec( angs, - ]) - ) + chi=instr.chi + ) - # make overlap table - # [[range_0], [range_1], ..., [range_n]] - # range_0 = [grainIDs, reflIDs, hklIDs] that are within tol - overlap_table = [] - ii = 0 - for pid in pids: - tmp = []; a = []; b = []; c = [] - for j in range(len(pid)): - a.append( - np.vstack( - [st[i][st[i][:, 2] == pid[j], 3:6] for i in range(len(st))] - ) + # [ grainID, reflID, hklID, D_s[0], D_s[1], D_s[2], tth, eta, ome ] + st.append( + np.hstack([ + i*np.ones((sum(valid_spt), 1)), + this_st[valid_spt, :2], + dvec, + angs, + ]) ) - b.append( - np.vstack( - [st[i][st[i][:, 2] == pid[j], 0:3] for i in range(len(st))] + + # ========================================================================= + # make overlap table + # ========================================================================= + # [[range_0], [range_1], ..., [range_n]] + # range_0 = [grainIDs, reflIDs, hklIDs] that are within tol + overlap_table = [] + ii = 0 + for pid in pids: + print("processing ring set %d" % ii) + start0 = time.clock() + tmp = [] + a = [] # unit diffraction vectors in sample frame + b = [] # [grainID, reflID, hklID] + c = [] # predicted angles [tth, eta, ome] + for j in range(len(pid)): + a.append( + np.vstack( + [st[i][st[i][:, 2] == pid[j], 3:6] + for i in range(len(st))] + ) ) - ) - c.append( - np.vstack( - [st[i][st[i][:, 2] == pid[j], 6:9] for i in range(len(st))] + b.append( + np.vstack( + [st[i][st[i][:, 2] == pid[j], 0:3] + for i in range(len(st))] + ) ) - ) - pass - a = np.vstack(a) - b = np.vstack(b) - c = np.vstack(c) - if len(a) > 0: - # run dbscan - core_samples, labels = dbscan( - a, - eps=eps, - min_samples=2, - metric='minkowski', p=2, - ) - - cl = np.array(labels, dtype=int) # convert to array - noise_points = cl == -1 # index for marking noise - cl += 1 # move index to 1-based instead of 0 - cl[noise_points] = -1 # re-mark noise as -1 - - # extract number of clusters - if np.any(cl == -1): - nblobs = len(np.unique(cl)) - 1 - else: - nblobs = len(np.unique(cl)) - - for i in range(1, nblobs+1): - # put in check on omega here - these_angs = c[np.where(cl == i)[0], :] - local_cl = cluster.hierarchy.fclusterdata( - these_angs[:, 1:], - eps, - criterion='distance', - metric=adist + c.append( + np.vstack( + [st[i][st[i][:, 2] == pid[j], 6:9] + for i in range(len(st))] ) - local_nblobs = len(np.unique(local_cl)) - if local_nblobs < len(these_angs): - for j in range(1, local_nblobs + 1): - npts = sum(local_cl == j) - if npts >= 2: - cl_idx = np.where(local_cl == j)[0] - #import pdb; pdb.set_trace() - tmp.append( - b[np.where(cl == i)[0][cl_idx], :] - ) - print "processing ring set %d" %ii - ii += 1 - overlap_table.append(tmp) - return overlap_table - + ) + pass + a = np.vstack(a) # unit diffraction vectors in sample frame + b = np.vstack(b) # [grainID, reflID, hklID] + c = np.vstack(c) # predicted angles [tth, eta, ome] + if len(a) > 0: + # run dbscan + core_samples, labels = dbscan( + a, + eps=eps, + min_samples=2, + metric='minkowski', p=2, + ) + cl, nblobs = postprocess_dbscan(labels) + elapsed0 = time.clock() - start0 + print("\tdbscan took %.2f seconds" % elapsed0) + # import pdb; pdb.set_trace() + print("\tcollapsing incidentals for %d candidates..." % nblobs) + start1 = time.clock() # time this + for i in range(1, nblobs + 1): + # put in check on omega here + these_angs = c[np.where(cl == i)[0], :] + # local_cl = cluster.hierarchy.fclusterdata( + # these_angs[:, 1:], + # eps, + # criterion='distance', + # metric=adist + # ) + # local_nblobs = len(np.unique(local_cl)) + _, local_labels = dbscan( + these_angs[:, 1:], + eps=eps, + min_samples=2, + metric=adist, + n_jobs=-1, + ) + local_cl, local_nblobs = postprocess_dbscan(local_labels) + + if local_nblobs < len(these_angs): + for j in range(1, local_nblobs + 1): + npts = sum(local_cl == j) + if npts >= 2: + cl_idx = np.where(local_cl == j)[0] + # import pdb; pdb.set_trace() + tmp.append( + b[np.where(cl == i)[0][cl_idx], :] + ) + elapsed1 = time.clock() - start1 + print("\tomega filtering took %.2f seconds" % elapsed1) + ii += 1 + overlap_table.append(tmp) + overlap_table_dict[det_key] = overlap_table + return overlap_table_dict + + def build_discrete_cmap(ngrains): - + # define the colormap - cmap = plt.cm.jet - + cmap = plt.cm.inferno + # extract all colors from the .jet map cmaplist = [cmap(i) for i in range(cmap.N)] - + # create the new map cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N) - + # define the bins and normalize bounds = np.linspace(0, ngrains, ngrains+1) norm = BoundaryNorm(bounds, cmap.N) return cmap, norm - -#%% + + +# ============================================================================= +# %% CLI +# ============================================================================= + if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Make overlap table from cfg file') + parser = argparse.ArgumentParser( + description='Make overlap table from cfg file') parser.add_argument( - 'cfg', metavar='cfg_filename', + 'cfg', metavar='cfg_filename', type=str, help='a YAML config filename') parser.add_argument( - '-m', '--multiplier', - help='multiplier on angular tolerance', + '-m', '--multiplier', + help='multiplier on angular tolerance', type=float, default=0.5) + parser.add_argument( + '-b', '--block-id', + help='block-id in config', + type=int, default=0) args = vars(parser.parse_args(sys.argv[1:])) - - cfg = config.open(args['cfg'])[0] - print "loaded config file %s" %args['cfg'] - overlap_table = build_overlap_table(cfg) - np.savez(os.path.join(cfg.analysis_dir, 'overlap_table.npz'), *overlap_table) -#%% -#fig = plt.figure() -#ax = fig.add_subplot(111, projection='3d') -# -#etas = np.radians(np.linspace(0, 359, num=360)) -#cx = np.cos(etas) -#cy = np.sin(etas) -#cz = np.zeros_like(etas) -# -#ax.plot(cx, cy, cz, c='b') -#ax.plot(cx, cz, cy, c='g') -#ax.plot(cz, cx, cy, c='r') -#ax.scatter3D(a[:, 0], a[:, 1], a[:, 2], c=b[:, 0], cmap=cmap, norm=norm, marker='o', s=20) -# -#ax.set_xlabel(r'$\mathbf{\mathrm{X}}_s$') -#ax.set_ylabel(r'$\mathbf{\mathrm{Y}}_s$') -#ax.set_zlabel(r'$\mathbf{\mathrm{Z}}_s$') -# -#ax.elev = 124 -#ax.azim = -90 -# -#ax.axis('equal') -# -##fname = "overlaps_%03d.png" -##for i in range(360): -## ax.azim += i -## fig.savefig( -## fname %i, dpi=200, facecolor='w', edgecolor='w', -## orientation='landcape') + + print("loaded config file %s" % args['cfg']) + print("will use block %d" % args['block_id']) + cfg = config.open(args['cfg'])[args['block_id']] + overlap_table = build_overlap_table(cfg, tol_mult=args['multiplier']) + np.savez(os.path.join(cfg.analysis_dir, 'overlap_table.npz'), + *overlap_table) + + +# ============================================================================= +# %% OPTIONAL PLOTTING +# ============================================================================= + +if do_plots: + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + + etas = np.radians(np.linspace(0, 359, num=360)) + cx = np.cos(etas) + cy = np.sin(etas) + cz = np.zeros_like(etas) + + ax.plot(cx, cy, cz, c='b') + ax.plot(cx, cz, cy, c='g') + ax.plot(cz, cx, cy, c='r') + + # !!! need a and b + cmap, norm = build_discrete_cmap(len(b)) + ax.scatter3D(a[:, 0], a[:, 1], a[:, 2], c=b[:, 0], + cmap=cmap, norm=norm, marker='o', s=20) + + ax.set_xlabel(r'$\mathbf{\mathrm{X}}_s$') + ax.set_ylabel(r'$\mathbf{\mathrm{Y}}_s$') + ax.set_zlabel(r'$\mathbf{\mathrm{Z}}_s$') + + ax.elev = 124 + ax.azim = -90 + + ax.axis('equal') + + if save_fig: + fname = "overlaps_%03d.png" + for i in range(360): + ax.azim += i + fig.savefig( + fname % i, dpi=200, facecolor='w', edgecolor='w', + orientation='landcape') diff --git a/scripts/make_imageseriesh5.py b/scripts/make_imageseriesh5.py new file mode 100755 index 00000000..1afddede --- /dev/null +++ b/scripts/make_imageseriesh5.py @@ -0,0 +1,267 @@ +#! /usr/bin/env python +# +"""Make an imageseries from a list of image files +""" +from __future__ import print_function + +import sys +import argparse +import logging +import time + +# Put this before fabio import and reset level if you +# want to control its import warnings. +logging.basicConfig(level=logging.INFO) + +import numpy +import h5py +import fabio + +# Error messages + +ERR_NO_FILE = 'Append specified, but could not open file' +ERR_NO_DATA = 'Append specified, but dataset not found in file' +ERR_OVERWRITE = 'Failed to create new dataset. Does it already exist?' +ERR_SHAPE = 'Image shape not consistent with previous images' +ERR_NOEMPTY = 'dark-from-empty specified, but number of empty frames not given' +ERR_OMEGASPEC = 'Must specify both omega-min and omega-max if either is given' + +DSetPath = lambda f, p: "%s['%s']" % (f, p) + +class MakeImageSeriesError(Exception): + """Class for MakeImageSeriesError errors""" + def __init__(self, message): + self.message = message + return + + def __str__(self): + return self.message + +class ImageFiles(object): + """List of image files in sequence""" + def __init__(self, a): + """a is a namespace from parser""" + self.files = a.imagefiles + self.nfiles = len(self.files) + self.nempty = a.empty + self.maxframes = a.max_frames + #self._setloglevel(a) + + self._info() + self.h5opts = self._seth5opts(a) + + + self.ntowrite = numpy.min((self.maxframes, self.nframes))\ + if self.maxframes > 0 else self.nframes + + self.outfile = a.outfile + self.dgrppath = a.dset + self.dsetpath = '/'.join((a.dset, 'images')) + + def _seth5opts(self, a): + h5d = {} + + # compression type and level + clevel = min(a.compression_level, 9) + if clevel > 0: + h5d['compression'] = 'gzip' + h5d['compression_opts'] = clevel + logging.info('compression level: %s' % clevel) + else: + logging.info('compression off') + + # chunk size (chunking automatically on with compression) + ckb = 1024*a.chunk_KB + if ckb <= 0: + block = self.shape + else: + # take some number of rows: + # * at least one + # * no more than number of rows + sh0, sh1 = self.shape + bpp = self.dtype.itemsize + nrows = min(ckb / (bpp * sh1), sh0) + nrows = max(nrows, 1) # at least one row + block = (nrows, sh1) + h5d['chunks'] = (1,) + block + logging.info('chunk size: %s X %s' % block) + + return h5d + + @staticmethod + def _setloglevel(a): + # Not working: need to understand logging more + if a.log_level == 'debug': + logging.basicConfig(level=logging.DEBUG) + elif a.log_level == 'info': + logging.basicConfig(level=logging.INFO) + + @staticmethod + def _checkvalue(v, vtest, msg): + """helper: ensure value set conistently""" + if v is None: + val = vtest + else: + if vtest != v: + raise MakeImageSeriesError(msg) + else: + val = v + + return val + + def _info(self): + """basic info: dtype, shape, nframes, and verify consistency""" + cn = None + shp = None + dtp = None + + nf = 0 + for imgf in self.files: + img = fabio.open(imgf) + dat = img.data + shp = self._checkvalue(shp, dat.shape, "inconsistent image shapes") + dtp = self._checkvalue(dtp, dat.dtype, "inconsistent image dtypes") + cn = self._checkvalue(cn, img.classname, "inconsistent image types") + if img.nframes >= self.nempty: + nf += img.nframes - self.nempty + else: + raise MakeImageSeriesError("more empty frames than images") + + self.nframes = nf + self.shape = shp + self.dtype = dtp + self.imagetype = cn + + def describe(self): + print('Number of Files: %d' % self.nfiles) + print('... image type: %s' % self.imagetype) + print('... image dimensions: %d X %d' % self.shape) + print('... image data type: %s' % self.dtype) + print('... empty frames per file: %d' % self.nempty) + print('... number of nonempty frames: %d' % self.nframes) + maxf = self.maxframes if self.maxframes > 0 else 'unlimited' + print('... max frames requested: %s' % maxf) + print('... will write: %d' % self.ntowrite) + + def opendset(self): + """open the HDF5 data set""" + # note: compression implies chunked storage + msg = 'writing to file/path: %s:%s' % (self.outfile, self.dgrppath) + logging.info(msg) + + # grab file object + f = h5py.File(self.outfile, "a") + try: + shp = (self.ntowrite,) + self.shape + chunks = (1, self.shape[0], self.shape[1]) + ds = f.create_dataset(self.dsetpath, shp, dtype=self.dtype, + **self.h5opts + ) + except Exception as e: + errmsg = '%s: %s\n... exception: ' % \ + (ERR_OVERWRITE, DSetPath(self.outfile, self.dsetpath)) + raise MakeImageSeriesError(errmsg + str(e)) + + return f, ds + + def write(self): + """write to HDF5 file""" + f, ds = self.opendset() + # + # Now add the images + # + start_time = time.clock() # time this + nframes = 0 # number completed + print_every = 1; marker = " ."; + print('Frames written (of %s):' % self.ntowrite, end="") + for i in range(self.nfiles): + if nframes >= self.ntowrite: break + + logging.debug('processing file %d of %d' % (i+1, self.nfiles)) + img_i = fabio.open(self.files[i]) + nfi = img_i.nframes + for j in range(nfi): + msg = '... file %d/image %d' % (i, j) + logging.debug(msg) + if j < self.nempty: + logging.debug('... empty frame ... skipping') + else: + ds[nframes, :, :] = img_i.data + nframes += 1 + if numpy.mod(nframes, print_every) == 0: + print(marker, nframes, end="") + print_every *= 2 + sys.stdout.flush() + logging.debug('... wrote image %s of %s' %\ + (nframes, self.ntowrite)) + if nframes >= self.ntowrite: + logging.debug('wrote last frame: stopping') + break + if j < nfi - 1: + # on last frame in file, fabio will look for next file + img_i = img_i.next() + + f.close() + print("\nTime to write: %f seconds " %(time.clock()-start_time)) + +def set_options(): + """Set options for command line""" + parser = argparse.ArgumentParser(description="imageseries builder") + + parser.add_argument("-i", "--info", help="describe the input files and quit", + action="store_true") + + # file options + parser.add_argument("-o", "--outfile", + help="name of HDF5 output file", + default="imageseries.h5") + help_d = "path to HDF5 data group" + parser.add_argument("-d", "--dset", + help=help_d, + metavar="PATH", default="/imageseries") + + # image options + parser.add_argument("imagefiles", nargs="+", help="image files") + + parser.add_argument("--empty", "--blank", + help="number of blank frames in beginning of file", + metavar="N", type=int, action="store", default=0) + parser.add_argument("--max-frames", + help="maximum number of frames to write", + metavar="M", type=int, action="store", default=0) + + # compression/chunking + help_d = "compression level for gzip (1-9); 0 or less for no compression; "\ + "above 9 sets level to 9" + parser.add_argument("-c", "--compression-level", + help=help_d, + metavar="LEVEL", type=int, action="store", default=4) + help_d = "target chunk size in KB (0 means single image size)" + parser.add_argument("--chunk-KB", + help=help_d, + metavar="K", type=int, action="store", default=0) + + return parser + +def execute(args, **kwargs): + """Main execution + + * kwargs added to allow passing further options when not called from + command line + """ + p = set_options() + a = p.parse_args(args) + # logging.info(str(a)) + + ifiles = ImageFiles(a) + + if a.info: + ifiles.describe() + else: + ifiles.write() + +if __name__ == '__main__': + # + # run + # + execute(sys.argv[1:]) diff --git a/scripts/new_simulate_nf.py b/scripts/new_simulate_nf.py new file mode 100644 index 00000000..bde60e18 --- /dev/null +++ b/scripts/new_simulate_nf.py @@ -0,0 +1,1096 @@ +""" +Refactor of simulate_nf so that an experiment is mocked up. + +Also trying to minimize imports +""" +from __future__ import print_function + +import os +import logging + +import numpy as np +import numba +import yaml +import argparse +import time +import contextlib +import multiprocessing +import tempfile +import shutil + +# import of hexrd modules +import hexrd +from hexrd import constants +from hexrd.xrd import transforms_CAPI as xfcapi +from hexrd.xrd import xrdutil +import hexrd.xrd.material + +from skimage.morphology import dilation as ski_dilation + +beam = constants.beam_vec +Z_l = constants.lab_z +vInv_ref = constants.identity_6x1 + + +# ============================================================================== +# %% SOME SCAFFOLDING +# ============================================================================== + + +class ProcessController(object): + """This is a 'controller' that provides the necessary hooks to + track the results of the process as well as to provide clues of + the progress of the process""" + + def __init__(self, result_handler=None, progress_observer=None, ncpus=1, + chunk_size=100): + self.rh = result_handler + self.po = progress_observer + self.ncpus = ncpus + self.chunk_size = chunk_size + self.limits = {} + self.timing = [] + + # progress handling ------------------------------------------------------- + + def start(self, name, count): + self.po.start(name, count) + t = time.time() + self.timing.append((name, count, t)) + + def finish(self, name): + t = time.time() + self.po.finish() + entry = self.timing.pop() + assert name == entry[0] + total = t - entry[2] + logging.info("%s took %8.3fs (%8.6fs per item).", + entry[0], total, total/entry[1]) + + def update(self, value): + self.po.update(value) + + # result handler ---------------------------------------------------------- + + def handle_result(self, key, value): + logging.debug("handle_result (%(key)s)", locals()) + self.rh.handle_result(key, value) + + # value limitting --------------------------------------------------------- + def set_limit(self, key, limit_function): + if key in self.limits: + logging.warn("Overwritting limit funtion for '%(key)s'", locals()) + + self.limits[key] = limit_function + + def limit(self, key, value): + try: + value = self.limits[key](value) + except KeyError: + pass + except Exception: + logging.warn("Could not apply limit to '%(key)s'", locals()) + + return value + + # configuration ---------------------------------------------------------- + + def get_process_count(self): + return self.ncpus + + def get_chunk_size(self): + return self.chunk_size + + +def null_progress_observer(): + class NullProgressObserver(object): + def start(self, name, count): + pass + + def update(self, value): + pass + + def finish(self): + pass + + return NullProgressObserver() + + +def progressbar_progress_observer(): + from progressbar import ProgressBar, Percentage, Bar + + class ProgressBarProgressObserver(object): + def start(self, name, count): + self.pbar = ProgressBar(widgets=[name, Percentage(), Bar()], + maxval=count) + self.pbar.start() + + def update(self, value): + self.pbar.update(value) + + def finish(self): + self.pbar.finish() + + return ProgressBarProgressObserver() + + +def forgetful_result_handler(): + class ForgetfulResultHandler(object): + def handle_result(self, key, value): + pass # do nothing + + return ForgetfulResultHandler() + + +def saving_result_handler(filename): + """returns a result handler that saves the resulting arrays into a file + with name filename""" + class SavingResultHandler(object): + def __init__(self, file_name): + self.filename = file_name + self.arrays = {} + + def handle_result(self, key, value): + self.arrays[key] = value + + def __del__(self): + logging.debug("Writing arrays in %(filename)s", self.__dict__) + try: + np.savez_compressed(open(self.filename, "wb"), **self.arrays) + except IOError: + logging.error("Failed to write %(filename)s", self.__dict__) + + return SavingResultHandler(filename) + + +def checking_result_handler(filename): + """returns a return handler that checks the results against a + reference file. + + The Check will consider a FAIL either a result not present in the + reference file (saved as a numpy savez or savez_compressed) or a + result that differs. It will consider a PARTIAL PASS if the + reference file has a shorter result, but the existing results + match. A FULL PASS will happen when all existing results match + + """ + class CheckingResultHandler(object): + def __init__(self, reference_file): + """Checks the result against those save in 'reference_file'""" + logging.info("Loading reference results from '%s'", reference_file) + self.reference_results = np.load(open(reference_file, 'rb')) + + def handle_result(self, key, value): + if key in ['experiment', 'image_stack']: + return # ignore these + + try: + reference = self.reference_results[key] + except KeyError as e: + logging.warning("%(key)s: %(e)s", locals()) + reference = None + + if reference is None: + msg = "'{0}': No reference result." + logging.warn(msg.format(key)) + + try: + if key == "confidence": + reference = reference.T + value = value.T + + check_len = min(len(reference), len(value)) + test_passed = np.allclose(value[:check_len], + reference[:check_len]) + + if not test_passed: + msg = "'{0}': FAIL" + logging.warn(msg.format(key)) + lvl = logging.WARN + elif len(value) > check_len: + msg = "'{0}': PARTIAL PASS" + lvl = logging.WARN + else: + msg = "'{0}': FULL PASS" + lvl = logging.INFO + logging.log(lvl, msg.format(key)) + except Exception as e: + msg = "%(key)s: Failure trying to check the results.\n%(e)s" + logging.error(msg, locals()) + + return CheckingResultHandler(filename) + + +# ============================================================================== +# %% SETUP FUNCTION +# ============================================================================== +def mockup_experiment(): + # user options + # each grain is provided in the form of a quaternion. + + # The following array contains the quaternions for the array. Note that the + # quaternions are in the columns, with the first row (row 0) being the real + # part w. We assume that we are dealing with unit quaternions + + quats = np.array([[0.91836393, 0.90869942], + [0.33952917, 0.18348350], + [0.17216207, 0.10095837], + [0.10811041, 0.36111851]]) + + n_grains = quats.shape[-1] # last dimension provides the number of grains + phis = 2.*np.arccos(quats[0, :]) # phis are the angles for the quaternion + # ns contains the rotation axis as an unit vector + ns = hexrd.matrixutil.unitVector(quats[1:, :]) + exp_maps = np.array([phis[i]*ns[:, i] for i in range(n_grains)]) + rMat_c = hexrd.xrd.rotations.rotMatOfQuat(quats) + + cvec = np.arange(-25, 26) + X, Y, Z = np.meshgrid(cvec, cvec, cvec) + + crd0 = 1e-3*np.vstack([X.flatten(), Y.flatten(), Z.flatten()]).T + crd1 = crd0 + np.r_[0.100, 0.100, 0] + crds = np.array([crd0, crd1]) + + # make grain parameters + grain_params = [] + for i in range(n_grains): + for j in range(len(crd0)): + grain_params.append( + np.hstack([exp_maps[i, :], crds[i][j, :], vInv_ref]) + ) + + # scan range and period + ome_period = (0, 2*np.pi) + ome_range = [ome_period, ] + ome_step = np.radians(1.) + nframes = 0 + for i in range(len(ome_range)): + nframes += int((ome_range[i][1]-ome_range[i][0])/ome_step) + + ome_edges = np.arange(nframes+1)*ome_step + + # instrument + with open('./retiga.yml', 'r') as fildes: + instr_cfg = yaml.load(fildes) + + tiltAngles = instr_cfg['detector']['transform']['tilt_angles'] + tVec_d = np.array(instr_cfg['detector']['transform']['t_vec_d']) + chi = instr_cfg['oscillation_stage']['chi'] + tVec_s = np.array(instr_cfg['oscillation_stage']['t_vec_s']) + rMat_d = xfcapi.makeDetectorRotMat(tiltAngles) + rMat_s = xfcapi.makeOscillRotMat([chi, 0.]) + + pixel_size = instr_cfg['detector']['pixels']['size'] + nrows = instr_cfg['detector']['pixels']['rows'] + ncols = instr_cfg['detector']['pixels']['columns'] + + col_ps = pixel_size[1] + row_ps = pixel_size[0] + + panel_dims = [(-0.5*ncols*col_ps, -0.5*nrows*row_ps), + (0.5*ncols*col_ps, 0.5*nrows*row_ps)] + + x_col_edges = col_ps * (np.arange(ncols + 1) - 0.5*ncols) + y_row_edges = row_ps * (np.arange(nrows, -1, -1) - 0.5*nrows) + rx, ry = np.meshgrid(x_col_edges, y_row_edges) + + gcrds = xfcapi.detectorXYToGvec(np.vstack([rx.flatten(), ry.flatten()]).T, + rMat_d, rMat_s, + tVec_d, tVec_s, np.zeros(3)) + + max_pixel_tth = np.amax(gcrds[0][0]) + detector_params = np.hstack([tiltAngles, tVec_d, chi, tVec_s]) + distortion = None + + # a different parametrization for the sensor + # (makes for faster quantization) + base = np.array([x_col_edges[0], + y_row_edges[0], + ome_edges[0]]) + deltas = np.array([x_col_edges[1] - x_col_edges[0], + y_row_edges[1] - y_row_edges[0], + ome_edges[1] - ome_edges[0]]) + inv_deltas = 1.0/deltas + clip_vals = np.array([ncols, nrows]) + + # dilation + max_diameter = np.sqrt(3)*0.005 + row_dilation = int(np.ceil(0.5 * max_diameter/row_ps)) + col_dilation = int(np.ceil(0.5 * max_diameter/col_ps)) + + # crystallography data + from hexrd import valunits + gold = hexrd.xrd.material.Material('gold') + gold.sgnum = 225 + gold.latticeParameters = [4.0782, ] + gold.hklMax = 200 + gold.beamEnergy = valunits.valWUnit("wavelength", "ENERGY", 52, "keV") + gold.planeData.exclusions = None + gold.planeData.tThMax = max_pixel_tth # note this comes from info in the detector + + ns = argparse.Namespace() + # grains related information + ns.n_grains = n_grains # this can be derived from other values... + ns.rMat_c = rMat_c # n_grains rotation matrices (one per grain) + ns.exp_maps = exp_maps # n_grains exp_maps --angle * rotation axis-- (one per grain) + + ns.plane_data = gold.planeData + ns.detector_params = detector_params + ns.pixel_size = pixel_size + ns.ome_range = ome_range + ns.ome_period = ome_period + ns.x_col_edges = x_col_edges + ns.y_row_edges = y_row_edges + ns.ome_edges = ome_edges + ns.ncols = ncols + ns.nrows = nrows + ns.nframes = nframes # used only in simulate... + ns.rMat_d = rMat_d + ns.tVec_d = tVec_d + ns.chi = chi # note this is used to compute S... why is it needed? + ns.tVec_s = tVec_s + ns.rMat_c = rMat_c + ns.row_dilation = row_dilation + ns.col_dilation = col_dilation + ns.distortion = distortion + ns.panel_dims = panel_dims # used only in simulate... + ns.base = base + ns.inv_deltas = inv_deltas + ns.clip_vals = clip_vals + + return grain_params, ns + + +# ============================================================================= +# %% OPTIMIZED BITS +# ============================================================================= + +# Some basic 3d algebra ======================================================= +@numba.njit +def _v3_dot(a, b): + return a[0]*b[0] + a[1]*b[1] + a[2]*b[2] + + +@numba.njit +def _m33_v3_multiply(m, v, dst): + v0 = v[0] + v1 = v[1] + v2 = v[2] + dst[0] = m[0, 0]*v0 + m[0, 1]*v1 + m[0, 2]*v2 + dst[1] = m[1, 0]*v0 + m[1, 1]*v1 + m[1, 2]*v2 + dst[2] = m[2, 0]*v0 + m[2, 1]*v1 + m[2, 2]*v2 + + return dst + + +@numba.njit +def _v3_normalized(src, dst): + v0 = src[0] + v1 = src[1] + v2 = src[2] + sqr_norm = v0*v0 + v1*v1 + v2*v2 + inv_norm = 1.0 if sqr_norm == 0.0 else 1./np.sqrt(sqr_norm) + + dst[0] = v0 * inv_norm + dst[1] = v1 * inv_norm + dst[2] = v2 * inv_norm + + return dst + + +@numba.njit +def _make_binary_rot_mat(src, dst): + v0 = src[0] + v1 = src[1] + v2 = src[2] + + dst[0, 0] = 2.0*v0*v0 - 1.0 + dst[0, 1] = 2.0*v0*v1 + dst[0, 2] = 2.0*v0*v2 + dst[1, 0] = 2.0*v1*v0 + dst[1, 1] = 2.0*v1*v1 - 1.0 + dst[1, 2] = 2.0*v1*v2 + dst[2, 0] = 2.0*v2*v0 + dst[2, 1] = 2.0*v2*v1 + dst[2, 2] = 2.0*v2*v2 - 1.0 + + return dst + + +# code transcribed in numba from transforms module ============================ + +# This is equivalent to the transform module anglesToGVec, but written in +# numba. This should end in a module to share with other scripts +@numba.njit +def _anglesToGVec(angs, rMat_ss, rMat_c): + """From a set of angles return them in crystal space""" + result = np.empty_like(angs) + for i in range(len(angs)): + cx = np.cos(0.5*angs[i, 0]) + sx = np.sin(0.5*angs[i, 0]) + cy = np.cos(angs[i, 1]) + sy = np.sin(angs[i, 1]) + g0 = cx*cy + g1 = cx*sy + g2 = sx + + # with g being [cx*xy, cx*sy, sx] + # result = dot(rMat_c, dot(rMat_ss[i], g)) + t0_0 = rMat_ss[ i, 0, 0]*g0 + rMat_ss[ i, 1, 0]*g1 + rMat_ss[ i, 2, 0]*g2 + t0_1 = rMat_ss[ i, 0, 1]*g0 + rMat_ss[ i, 1, 1]*g1 + rMat_ss[ i, 2, 1]*g2 + t0_2 = rMat_ss[ i, 0, 2]*g0 + rMat_ss[ i, 1, 2]*g1 + rMat_ss[ i, 2, 2]*g2 + + result[i, 0] = rMat_c[0, 0]*t0_0 + rMat_c[ 1, 0]*t0_1 + rMat_c[ 2, 0]*t0_2 + result[i, 1] = rMat_c[0, 1]*t0_0 + rMat_c[ 1, 1]*t0_1 + rMat_c[ 2, 1]*t0_2 + result[i, 2] = rMat_c[0, 2]*t0_0 + rMat_c[ 1, 2]*t0_1 + rMat_c[ 2, 2]*t0_2 + + return result + + +# This is equivalent to the transform's module gvecToDetectorXYArray, +# but written in numba. +# As of now, it is not a good replacement as efficient allocation of the +# temporary arrays is not competitive with the stack allocation using in +# the C version of the code (WiP) + +# tC varies per coord +# gvec_cs, rSm varies per grain +# +# gvec_cs +@numba.jit() +def _gvec_to_detector_array(vG_sn, rD, rSn, rC, tD, tS, tC): + """ beamVec is the beam vector: (0, 0, -1) in this case """ + ztol = xrdutil.epsf + p3_l = np.empty((3,)) + tmp_vec = np.empty((3,)) + vG_l = np.empty((3,)) + tD_l = np.empty((3,)) + norm_vG_s = np.empty((3,)) + norm_beam = np.empty((3,)) + tZ_l = np.empty((3,)) + brMat = np.empty((3,3)) + result = np.empty((len(rSn), 2)) + + _v3_normalized(beam, norm_beam) + _m33_v3_multiply(rD, Z_l, tZ_l) + + for i in xrange(len(rSn)): + _m33_v3_multiply(rSn[i], tC, p3_l) + p3_l += tS + p3_minus_p1_l = tD - p3_l + + num = _v3_dot(tZ_l, p3_minus_p1_l) + _v3_normalized(vG_sn[i], norm_vG_s) + + _m33_v3_multiply(rC, norm_vG_s, tmp_vec) + _m33_v3_multiply(rSn[i], tmp_vec, vG_l) + + bDot = -_v3_dot(norm_beam, vG_l) + + if bDot < ztol or bDot > 1.0 - ztol: + result[i, 0] = np.nan + result[i, 1] = np.nan + continue + + _make_binary_rot_mat(vG_l, brMat) + _m33_v3_multiply(brMat, norm_beam, tD_l) + denom = _v3_dot(tZ_l, tD_l) + + if denom < ztol: + result[i, 0] = np.nan + result[i, 1] = np.nan + continue + + u = num/denom + tmp_res = u*tD_l - p3_minus_p1_l + result[i, 0] = _v3_dot(tmp_res, rD[:, 0]) + result[i, 1] = _v3_dot(tmp_res, rD[:, 1]) + + return result + + +@numba.njit +def _quant_and_clip_confidence(coords, angles, image, + base, inv_deltas, clip_vals): + """quantize and clip the parametric coordinates in coords + angles + + coords - (..., 2) array: input 2d parametric coordinates + angles - (...) array: additional dimension for coordinates + base - (3,) array: base value for quantization (for each dimension) + inv_deltas - (3,) array: inverse of the quantum size (for each dimension) + clip_vals - (2,) array: clip size (only applied to coords dimensions) + + clipping is performed on ranges [0, clip_vals[0]] for x and + [0, clip_vals[1]] for y + + returns an array with the quantized coordinates, with coordinates + falling outside the clip zone filtered out. + + """ + count = len(coords) + + in_sensor = 0 + matches = 0 + for i in range(count): + xf = coords[i, 0] + yf = coords[i, 1] + + xf = np.floor((xf - base[0]) * inv_deltas[0]) + if not xf >= 0.0: + continue + if not xf < clip_vals[0]: + continue + + yf = np.floor((yf - base[1]) * inv_deltas[1]) + + if not yf >= 0.0: + continue + if not yf < clip_vals[1]: + continue + + zf = np.floor((angles[i] - base[2]) * inv_deltas[2]) + + in_sensor += 1 + + x, y, z = int(xf), int(yf), int(zf) + + x_byte = x // 8 + x_off = 7 - (x % 8) + if image[z, y, x_byte] & (1 << x_off): + matches += 1 + + return 0 if in_sensor == 0 else float(matches)/float(in_sensor) + + +# ============================================================================== +# %% DIFFRACTION SIMULATION +# ============================================================================== + +def get_simulate_diffractions(grain_params, experiment, + cache_file='gold_cubes.npy', + controller=None): + """getter functions that handles the caching of the simulation""" + try: + image_stack = np.load(cache_file, mmap_mode='r', allow_pickle=False) + except Exception: + image_stack = simulate_diffractions(grain_params, experiment, + controller=controller) + np.save(cache_file, image_stack) + + controller.handle_result('image_stack', image_stack) + + return image_stack + + +def simulate_diffractions(grain_params, experiment, controller): + """actual forward simulation of the diffraction""" + + # use a packed array for the image_stack + array_dims = (experiment.nframes, + experiment.ncols, + ((experiment.nrows - 1)//8) + 1) + image_stack = np.zeros(array_dims, dtype=np.uint8) + + count = len(grain_params) + subprocess = 'simulate diffractions' + + _project = xrdutil._project_on_detector_plane + rD = experiment.rMat_d + chi = experiment.chi + tD = experiment.tVec_d + tS = experiment.tVec_s + distortion = experiment.distortion + + eta_range = [(-np.pi, np.pi), ] + ome_range = experiment.ome_range + ome_period = (-np.pi, np.pi) + + full_hkls = xrdutil._fetch_hkls_from_planedata(experiment.plane_data) + bMat = experiment.plane_data.latVecOps['B'] + wlen = experiment.plane_data.wavelength + + controller.start(subprocess, count) + for i in range(count): + rC = xfcapi.makeRotMatOfExpMap(grain_params[i][0:3]) + tC = np.ascontiguousarray(grain_params[i][3:6]) + vInv_s = np.ascontiguousarray(grain_params[i][6:12]) + ang_list = np.vstack(xfcapi.oscillAnglesOfHKLs(full_hkls[:, 1:], chi, + rC, bMat, wlen, + vInv=vInv_s)) + # hkls not needed here + all_angs, _ = xrdutil._filter_hkls_eta_ome(full_hkls, ang_list, + eta_range, ome_range) + all_angs[:, 2] = xfcapi.mapAngle(all_angs[:, 2], ome_period) + + det_xy, _ = _project(all_angs, rD, rC, chi, tD, + tC, tS, distortion) + + _write_pixels(det_xy, all_angs[:, 2], image_stack, experiment.base, + experiment.inv_deltas, experiment.clip_vals) + + controller.update(i+1) + + controller.finish(subprocess) + return image_stack + + +# This part is critical for the performance of simulate diffractions. It +# basically "renders" the "pixels". It takes the coordinates, quantizes to an +# image coordinate and writes to the appropriate image in the stack. Note +# that it also performs clipping based on inv_deltas and clip_vals. +# +# Note: This could be easily modified so that instead of using an array of +# booleans, an array of uint8 could be used so the image is stored +# with a bit per pixel. + +@numba.njit +def _write_pixels(coords, angles, image, base, inv_deltas, clip_vals): + count = len(coords) + for i in range(count): + x = int(np.floor((coords[i, 0] - base[0]) * inv_deltas[0])) + + if x < 0 or x >= clip_vals[0]: + continue + + y = int(np.floor((coords[i, 1] - base[1]) * inv_deltas[1])) + + if y < 0 or y >= clip_vals[1]: + continue + + z = int(np.floor((angles[i] - base[2]) * inv_deltas[2])) + + x_byte = x // 8 + x_off = 7 - (x % 8) + image[z, y, x_byte] |= (1 << x_off) + + +# ============================================================================== +# %% ORIENTATION TESTING +# ============================================================================== +def test_orientations(image_stack, experiment, controller): + """grand loop precomputing the grown image stack + + image-stack -- is the image stack to be tested against. + + experiment -- A bunch of experiment related parameters. + + controller -- An external object implementing the hooks to notify progress + as well as figuring out what to do with results. + """ + + # extract some information needed ========================================= + # number of grains, number of coords (maybe limited by call), projection + # function to use, chunk size to use if multiprocessing and the number + # of cpus. + n_grains = experiment.n_grains + chunk_size = controller.get_chunk_size() + ncpus = controller.get_process_count() + + # generate angles ========================================================= + # all_angles will be a list containing arrays for the different angles to + # use, one entry per grain. + # + # Note that the angle generation is driven by the exp_maps in the experiment + all_angles = evaluate_diffraction_angles(experiment, controller) + + # generate coords ========================================================= + # The grid of coords to use to test + test_crds = generate_test_grid(-0.25, 0.25, 101) + n_coords = controller.limit('coords', len(test_crds)) + + # first, perform image dilation =========================================== + # perform image dilation (using scikit_image dilation) + subprocess = 'dilate image_stack' + dilation_shape = np.ones((2*experiment.row_dilation + 1, + 2*experiment.col_dilation + 1), + dtype=np.uint8) + image_stack_dilated = np.empty_like(image_stack) + dilated = np.empty((image_stack.shape[-2], image_stack.shape[-1]<<3), + dtype=np.bool) + n_images = len(image_stack) + controller.start(subprocess, n_images) + for i_image in range(n_images): + to_dilate = np.unpackbits(image_stack[i_image], axis=-1) + ski_dilation(to_dilate, dilation_shape, + out=dilated) + image_stack_dilated[i_image] = np.packbits(dilated, axis=-1) + controller.update(i_image+1) + controller.finish(subprocess) + + # precompute per-grain stuff ============================================== + # gVec_cs and rmat_ss can be precomputed, do so. + subprocess = 'precompute gVec_cs' + controller.start(subprocess, len(all_angles)) + precomp = [] + for i, angs in enumerate(all_angles): + rmat_ss = xfcapi.makeOscillRotMatArray(experiment.chi, angs[:,2]) + gvec_cs = _anglesToGVec(angs, rmat_ss, experiment.rMat_c[i]) + precomp.append((gvec_cs, rmat_ss)) + controller.finish(subprocess) + + # grand loop ============================================================== + # The near field simulation 'grand loop'. Where the bulk of computing is + # performed. We are looking for a confidence matrix that has a n_grains + chunks = xrange(0, n_coords, chunk_size) + subprocess = 'grand_loop' + controller.start(subprocess, n_coords) + finished = 0 + ncpus = min(ncpus, len(chunks)) + + logging.info('Checking confidence for %d coords, %d grains.', + n_coords, n_grains) + confidence = np.empty((n_grains, n_coords)) + if ncpus > 1: + global _multiprocessing_start_method + logging.info('Running multiprocess %d processes (%s)', + ncpus, _multiprocessing_start_method) + with grand_loop_pool(ncpus=ncpus, state=(chunk_size, + image_stack_dilated, + all_angles, precomp, test_crds, + experiment)) as pool: + for rslice, rvalues in pool.imap_unordered(multiproc_inner_loop, + chunks): + count = rvalues.shape[1] + confidence[:, rslice] = rvalues + finished += count + controller.update(finished) + else: + logging.info('Running in a single process') + for chunk_start in chunks: + chunk_stop = min(n_coords, chunk_start+chunk_size) + rslice, rvalues = _grand_loop_inner(image_stack_dilated, all_angles, + precomp, test_crds, experiment, + start=chunk_start, + stop=chunk_stop) + count = rvalues.shape[1] + confidence[:, rslice] = rvalues + finished += count + controller.update(finished) + + controller.finish(subprocess) + controller.handle_result("confidence", confidence) + + +def evaluate_diffraction_angles(experiment, controller=None): + """Uses simulateGVecs to generate the angles used per each grain. + returns a list containg one array per grain. + + experiment -- a bag of experiment values, including the grains specs and other + required parameters. + """ + # extract required data from experiment + exp_maps = experiment.exp_maps + plane_data = experiment.plane_data + detector_params = experiment.detector_params + pixel_size = experiment.pixel_size + ome_range = experiment.ome_range + ome_period = experiment.ome_period + + panel_dims_expanded = [(-10, -10), (10, 10)] + subprocess='evaluate diffraction angles' + pbar = controller.start(subprocess, + len(experiment.exp_maps)) + all_angles = [] + ref_gparams = np.array([0., 0., 0., 1., 1., 1., 0., 0., 0.]) + for i, exp_map in enumerate(experiment.exp_maps): + gparams = np.hstack([exp_map, ref_gparams]) + sim_results = xrdutil.simulateGVecs(plane_data, + detector_params, + gparams, + panel_dims=panel_dims_expanded, + pixel_pitch=pixel_size, + ome_range=ome_range, + ome_period=ome_period, + distortion=None) + all_angles.append(sim_results[2]) + controller.update(i+1) + pass + controller.finish(subprocess) + + return all_angles + + +def _grand_loop_inner(image_stack, angles, precomp, + coords, experiment, start=0, stop=None): + """Actual simulation code for a chunk of data. It will be used both, + in single processor and multiprocessor cases. Chunking is performed + on the coords. + + image_stack -- the image stack from the sensors + angles -- the angles (grains) to test + coords -- all the coords to test + precomp -- (gvec_cs, rmat_ss) precomputed for each grain + experiment -- bag with experiment parameters + start -- chunk start offset + stop -- chunk end offset + """ + + t = time.time() + n_coords = len(coords) + n_angles = len(angles) + + # experiment geometric layout parameters + rD = experiment.rMat_d + rCn = experiment.rMat_c + tD = experiment.tVec_d + tS = experiment.tVec_s + + # experiment panel related configuration + base = experiment.base + inv_deltas = experiment.inv_deltas + clip_vals = experiment.clip_vals + distortion = experiment.distortion + + _to_detector = xfcapi.gvecToDetectorXYArray + #_to_detector = _gvec_to_detector_array + stop = min(stop, n_coords) if stop is not None else n_coords + + distortion_fn = None + if distortion is not None and len(distortion > 0): + distortion_fn, distortion_args = distortion + + acc_detector = 0.0 + acc_distortion = 0.0 + acc_quant_clip = 0.0 + confidence = np.zeros((n_angles, stop-start)) + grains = 0 + crds = 0 + + if distortion_fn is None: + for igrn in xrange(n_angles): + angs = angles[igrn]; rC = rCn[igrn] + gvec_cs, rMat_ss = precomp[igrn] + grains += 1 + for icrd in xrange(start, stop): + t0 = time.time() + det_xy = _to_detector(gvec_cs, rD, rMat_ss, rC, tD, tS, coords[icrd]) + t1 = time.time() + c = _quant_and_clip_confidence(det_xy, angs[:,2], image_stack, + base, inv_deltas, clip_vals) + t2 = time.time() + acc_detector += t1 - t0 + acc_quant_clip += t2 - t1 + crds += 1 + confidence[igrn, icrd - start] = c + else: + for igrn in xrange(n_angles): + angs = angles[igrn]; rC = rCn[igrn] + gvec_cs, rMat_ss = precomp[igrn] + grains += 1 + for icrd in xrange(start, stop): + t0 = time.time() + det_xy = _to_detector(gvec_cs, rD, rMat_ss, rC, tD, tS, coords[icrd]) + t1 = time.time() + det_xy = distortion_fn(tmp_xys, distortion_args, invert=True) + t2 = time.time() + c = _quant_and_clip_confidence(det_xy, angs[:,2], image_stack, + base, inv_deltas, clip_vals) + t3 = time.time() + acc_detector += t1 - t0 + acc_distortion += t2 - t1 + acc_quant_clip += t3 - t2 + crds += 1 + confidence[igrn, icrd - start] = c + + t = time.time() - t + return slice(start, stop), confidence + + +def generate_test_grid(low, top, samples): + """generates a test grid of coordinates""" + cvec_s = np.linspace(low, top, samples) + Xs, Ys, Zs = np.meshgrid(cvec_s, cvec_s, cvec_s) + return np.vstack([Xs.flatten(), Ys.flatten(), Zs.flatten()]).T + + +# Multiprocessing bits ======================================================== +# +# The parallellized part of test_orientations uses some big arrays as part of +# the state that needs to be communicated to the spawn processes. +# +# On fork platforms, take advantage of process memory inheritance. +# +# On non fork platforms, rely on joblib dumping the state to disk and loading +# back in the target processes, pickling only the minimal information to load +# state back. Pickling the big arrays directly was causing memory errors and +# would be less efficient in memory (as joblib memmaps by default the big +# arrays, meaning they may be shared between processes). + +def multiproc_inner_loop(chunk): + """function to use in multiprocessing that computes the simulation over the + task's alloted chunk of data""" + + chunk_size = _mp_state[0] + n_coords = len(_mp_state[4]) + chunk_stop = min(n_coords, chunk+chunk_size) + return _grand_loop_inner(*_mp_state[1:], start=chunk, stop=chunk_stop) + + +def worker_init(id_state, id_exp): + """process initialization function. This function is only used when the + child processes are spawned (instead of forked). When using the fork model + of multiprocessing the data is just inherited in process memory.""" + import joblib + + global _mp_state + state = joblib.load(id_state) + experiment = joblib.load(id_exp) + _mp_state = state + (experiment,) + +@contextlib.contextmanager +def grand_loop_pool(ncpus, state): + """function that handles the initialization of multiprocessing. It handles + properly the use of spawned vs forked multiprocessing. The multiprocessing + can be either 'fork' or 'spawn', with 'spawn' being required in non-fork + platforms (like Windows) and 'fork' being preferred on fork platforms due + to its efficiency. + """ + # state = ( chunk_size, + # image_stack, + # angles, + # precomp, + # coords, + # experiment ) + global _multiprocessing_start_method + if _multiprocessing_start_method == 'fork': + # Use FORK multiprocessing. + + # All read-only data can be inherited in the process. So we "pass" it as + # a global that the child process will be able to see. At the end of the + # processing the global is removed. + global _mp_state + _mp_state = state + pool = multiprocessing.Pool(ncpus) + yield pool + del (_mp_state) + else: + # Use SPAWN multiprocessing. + + # As we can not inherit process data, all the required data is + # serialized into a temporary directory using joblib. The + # multiprocessing pool will have the "worker_init" as initialization + # function that takes the key for the serialized data, which will be + # used to load the parameter memory into the spawn process (also using + # joblib). In theory, joblib uses memmap for arrays if they are not + # compressed, so no compression is used for the bigger arrays. + import joblib + tmp_dir = tempfile.mkdtemp(suffix='-nf-grand-loop') + try: + # dumb dumping doesn't seem to work very well.. do something ad-hoc + logging.info('Using "%s" as temporary directory.', tmp_dir) + + id_exp = joblib.dump(state[-1], + os.path.join(tmp_dir, + 'grand-loop-experiment.gz'), + compress=True) + id_state = joblib.dump(state[:-1], + os.path.join(tmp_dir, 'grand-loop-data')) + pool = multiprocessing.Pool(ncpus, worker_init, + (id_state[0], id_exp[0])) + yield pool + finally: + logging.info('Deleting "%s".', tmp_dir) + shutil.rmtree(tmp_dir) + + +# ============================================================================== +# %% SCRIPT ENTRY AND PARAMETER HANDLING +# ============================================================================== +def main(args, controller): + grain_params, experiment = mockup_experiment() + controller.handle_result('experiment', experiment) + controller.handle_result('grain_params', grain_params) + image_stack = get_simulate_diffractions(grain_params, experiment, + controller=controller) + + test_orientations(image_stack, experiment, + controller=controller) + + +def parse_args(): + try: + default_ncpus = multiprocessing.cpu_count() + except NotImplementedError: + default_ncpus = 1 + + parser = argparse.ArgumentParser() + parser.add_argument("--inst-profile", action='append', default=[], + help="instrumented profile") + parser.add_argument("--generate", + help="generate file with intermediate results") + parser.add_argument("--check", + help="check against an file with intermediate results") + parser.add_argument("--limit", type=int, + help="limit the size of the run") + parser.add_argument("--ncpus", type=int, default=default_ncpus, + help="number of processes to use") + parser.add_argument("--chunk-size", type=int, default=100, + help="chunk size for use in multiprocessing/reporting") + parser.add_argument("--force-spawn-multiprocessing", action='store_true', + help="force using spawn as the multiprocessing method") + args = parser.parse_args() + + # keys = ['inst_profile', 'generate', 'check', 'limit', 'ncpus', 'chunk_size'] + # print('\n'.join([': '.join([key, str(getattr(args, key))]) for key in keys])) + + return args + + +def build_controller(args): + # builds the controller to use based on the args + + # result handle + progress_handler = progressbar_progress_observer() + + if args.check is not None: + if args.generate is not None: + logging.warn( + "generating and checking can not happen at the same time, " + + "going with checking") + + result_handler = checking_result_handler(args.check) + elif args.generate is not None: + result_handler = saving_result_handler(args.generate) + else: + result_handler = forgetful_result_handler() + + # if args.ncpus > 1 and os.name == 'nt': + # logging.warn("Multiprocessing on Windows is disabled for now") + # args.ncpus = 1 + + controller = ProcessController(result_handler, progress_handler, + ncpus=args.ncpus, + chunk_size=args.chunk_size) + if args.limit is not None: + controller.set_limit('coords', lambda x: min(x, args.limit)) + + return controller + + +# assume that if os has fork, it will be used by multiprocessing. +# note that on python > 3.4 we could use multiprocessing get_start_method and +# set_start_method for a cleaner implementation of this functionality. +_multiprocessing_start_method = 'fork' if hasattr(os, 'fork') else 'spawn' + +if __name__ == '__main__': + FORMAT="%(relativeCreated)12d [%(process)6d/%(thread)6d] %(levelname)8s: %(message)s" + logging.basicConfig(level=logging.NOTSET, + format=FORMAT) + args = parse_args() + + if len(args.inst_profile) > 0: + from hexrd.utils import profiler + + logging.debug("Instrumenting functions") + profiler.instrument_all(args.inst_profile) + + if args.force_spawn_multiprocessing: + _multiprocessing_start_method = 'spawn' + + controller = build_controller(args) + main(args, controller) + del controller + + if len(args.inst_profile) > 0: + logging.debug("Dumping profiler results") + profiler.dump_results(args.inst_profile) diff --git a/scripts/nf_munge.py b/scripts/nf_munge.py new file mode 100644 index 00000000..269c7d01 --- /dev/null +++ b/scripts/nf_munge.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Tue Jun 19 18:43:52 2018 + +@author: s1iduser +""" +from skimage import io + +# %% +img_stem = 'MapZBeam_5mmTo9mm_%s__%06d.tif' + +im_or = ['m90deg', '0deg', '90deg'] +im_idx = 31 + +img_list = [io.imread(img_stem % (i, im_idx)) for i in im_or] + +# %% +for i, img in enumerate(img_list): + fig, ax = plt.subplots() + + ax.imshow(img, cmap=plt.cm.inferno, vmin=np.percentile(img, 50)) + fig.suptitle("%s" % im_or[i]) + ax.axis('normal') + +# %% +x_range = [0, 2047] # eyeballed +y_range = [1990, 2040] # eyeballed + +beam_img_list = [img[np.ix_(range(*y_range), range(*x_range))] for img in img_list] + +sinogram = np.vstack([np.sum(bimg, axis=0) for bimg in beam_img_list]) + + +# %% +pix_size = 0.00148 + +left = pix_size*np.r_[361, 2015] +right = pix_size*np.r_[1669, 2029] + +diff = right - left +incl = np.degrees(np.arctan2(diff[1], diff[0])) \ No newline at end of file diff --git a/scripts/post_process_stress.py b/scripts/post_process_stress.py new file mode 100644 index 00000000..fd6d9223 --- /dev/null +++ b/scripts/post_process_stress.py @@ -0,0 +1,154 @@ +# ============================================================ +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on downloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License (as published by the Free Software +# Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ +#%% +import sys +import cPickle as cpl + +import numpy as np + +import argparse + +from hexrd import matrixutil as mutil +from hexrd.xrd import rotations as rot + +#%% Extract stress data from grains.out data + + +def post_process_stress(grain_data,c_mat_C,schmid_T_list=None): + num_grains=grain_data.shape[0] + + stress_S=np.zeros([num_grains,6]) + stress_C=np.zeros([num_grains,6]) + hydrostatic=np.zeros([num_grains,1]) + pressure=np.zeros([num_grains,1]) + von_mises=np.zeros([num_grains,1]) + + if schmid_T_list is not None: + num_slip_systems=schmid_T_list.shape[0] + RSS=np.zeros([num_grains,num_slip_systems]) + + + for jj in np.arange(num_grains): + + expMap=np.atleast_2d(grain_data[jj,3:6]).T + strainTmp=np.atleast_2d(grain_data[jj,15:21]).T + + #Turn exponential map into an orientation matrix + Rsc=rot.rotMatOfExpMap(expMap) + + strainTenS = np.zeros((3, 3), dtype='float64') + strainTenS[0, 0] = strainTmp[0] + strainTenS[1, 1] = strainTmp[1] + strainTenS[2, 2] = strainTmp[2] + strainTenS[1, 2] = strainTmp[3] + strainTenS[0, 2] = strainTmp[4] + strainTenS[0, 1] = strainTmp[5] + strainTenS[2, 1] = strainTmp[3] + strainTenS[2, 0] = strainTmp[4] + strainTenS[1, 0] = strainTmp[5] + + + strainTenC=np.dot(np.dot(Rsc.T,strainTenS),Rsc) + strainVecC = mutil.strainTenToVec(strainTenC) + + + #Calculate stress + stressVecC=np.dot(c_mat_C,strainVecC) + stressTenC = mutil.stressVecToTen(stressVecC) + stressTenS = np.dot(np.dot(Rsc,stressTenC),Rsc.T) + stressVecS = mutil.stressTenToVec(stressTenS) + + #Calculate hydrostatic stress + hydrostaticStress=(stressVecS[:3].sum()/3) + + + #Calculate Von Mises Stress + devStressS=stressTenS-hydrostaticStress*np.identity(3) + vonMisesStress=np.sqrt((3/2)*(devStressS**2).sum()) + + + #Project on to slip systems + if schmid_T_list is not None: + for ii in np.arange(num_slip_systems): + RSS[jj,ii]=np.abs((stressTenC*schmid_T_list[ii,:,:]).sum()) + + + stress_S[jj,:]=stressVecS.flatten() + stress_C[jj,:]=stressVecC.flatten() + + hydrostatic[jj,0]=hydrostaticStress + pressure[jj,0]=-hydrostaticStress + von_mises[jj,0]=vonMisesStress + + stress_data=dict() + + stress_data['stress_S']=stress_S + stress_data['stress_C']=stress_C + stress_data['hydrostatic']=hydrostatic + stress_data['pressure']=pressure + stress_data['von_mises']=von_mises + + if schmid_T_list is not None: + stress_data['RSS']=RSS + + return stress_data + +#%% Command Line Access +if __name__ == '__main__': + """ + USAGE : python post_process_stress grains_file stiffness_mat_file output_file_stem schmid_tensors_file + """ + parser = argparse.ArgumentParser(description='Post Process HEXRD Grains File To Extract Stress Tensor and Associated Quantities (Assuming Small Strain, Linear Elasticty)') + + + parser.add_argument('grains_file', type=str) + parser.add_argument('stiffness_mat_file', type=str) + parser.add_argument('output_file_stem', type=str) + parser.add_argument('--schmid_tensors_file', type=str, default=None) + + args = vars(parser.parse_args(sys.argv[1:])) + + + grain_data=np.loadtxt(args['grains_file']) + + c_mat=np.loadtxt(args['stiffness_mat_file']) + + #Extract Schmid Tensors from txt file + if args['schmid_tensors_file'] is not None: + T_vec = np.atleast_2d(np.loadtxt(args['schmid_tensors_file'])) + num_ten=T_vec.shape[0] + T=np.zeros([num_ten,3,3]) + for i in np.arange(num_ten): + T[i,:,:]=T_vec[i,:].reshape([3,3]) + + stress_data=post_process_stress(grain_data,c_mat,T) + + else: + stress_data=post_process_stress(grain_data,c_mat) + + + cpl.dump(stress_data, open( args['output_file_stem']+'.cpl', "wb" ) ) diff --git a/scripts/process_nf_grain_map.py b/scripts/process_nf_grain_map.py new file mode 100644 index 00000000..d2f545e4 --- /dev/null +++ b/scripts/process_nf_grain_map.py @@ -0,0 +1,297 @@ +# %% IMPORTS + + +import numpy as np + +import matplotlib.pyplot as plt + +import multiprocessing as mp + +import os + +from hexrd.grainmap import nfutil +from hexrd.grainmap import tomoutil +from hexrd.grainmap import vtkutil + +# ============================================================================= +# %% FILES TO LOAD -CAN BE EDITED +# ============================================================================= +# These files are attached, retiga.yml is a detector configuration file +# The near field detector was already calibrated + +# A materials file, is a cPickle file which contains material information like +# lattice parameters necessary for the reconstruction +det_file = '/####/retiga.yml' +mat_file = '/####/materials.cpl' + +# ============================================================================= +# %% OUTPUT INFO -CAN BE EDITED +# ============================================================================= + +output_dir = '/####/' +output_stem = '/####/' + +# ============================================================================= +# %% NEAR FIELD DATA FILES -CAN BE EDITED +# ============================================================================= + +# These are the near field data files used for the reconstruction, a grains.out +# file from the far field analaysis is used as orientation guess for the grid +# that will be used for the near field reconstruction +grain_out_file = '/####/grains.out' + +# Locations of near field images +data_folder = '/###/' + +img_start = '/##/' +num_imgs = 1441 + +img_nums = np.arange(img_start, img_start + num_imgs, 1) + +# ============================================================================= +# %% TOMOGRAPHY DATA FILES -CAN BE EDITED +# ============================================================================= + +# Locations of tomography bright field images +tbf_data_folder = '/####/' + +tbf_img_start = '/##/' +tbf_num_imgs = 20 + +# Locations of tomography images +tomo_data_folder = '/####/' + +tomo_img_start = '/##/' +tomo_num_imgs = 720 + +# ============================================================================= +# %% USER OPTIONS -CAN BE EDITED +# ============================================================================= + +x_ray_energy = '/###/' # keV + +# name of the material for the reconstruction +mat_name = 'MAT_NAME' + +# reconstruction with misorientation included, for many grains, +# this will quickly make the reconstruction size unmanagable +misorientation_bnd = 0.0 # degrees +misorientation_spacing = 0.25 # degrees + +beam_stop_width = 0.55 # mm, assumed to be in the center of the detector + +ome_range_deg = [(0., 360.), ] # degrees + +# maximu bragg angle 2theta in degrees +# if -1, all peaks that will hit the detector are calculated +max_tth = -1. + +# image processing +num_for_dark = 250 # num images to use for median data +threshold = 3. + +# !!! DO NOT CHANGE ANY OF THESE UNLESS YOU KNOW WHAT YOU ARE DOING +num_erosions = 3 # num iterations of images erosion +num_dilations = 2 # num iterations of images erosion +ome_dilation_iter = 1 # num iterations of 3d image stack dilations +chunk_size = 500 # chunksize for multiprocessing + +# thresholds for accepting FF grains in NF reconstruction +min_completeness = 0.5 +max_chisq = 0.05 + +# tomography options +layer_row = 1024 # row of layer to use to find the specimen cross section + +# TOMO OPTIONS +# !!! Don't change these unless you know what you are doing +# this will close small holes and remove noise +recon_thresh = 0.00006 # usually varies between 0.0001 and 0.0005 +noise_obj_size = 5000 +min_hole_size = 5000 + +# cross sectional to reconstruct (should be at least 20%-30% over sample width) +cross_sectional_dim = 1.00 + +voxel_spacing = 0.005 # voxel spacing for the near field reconstruction in mm +v_bnds = [-0.005, 0.005] # vertical (y) reconstruction voxel bounds in mm + +# ============================================================================= +# %% LOAD GRAIN DATA +# ============================================================================= + +experiment, nf_to_ff_id_map = nfutil.gen_trial_exp_data( + grain_out_file, det_file, mat_file, x_ray_energy, mat_name, + max_tth, min_completeness, max_chisq, misorientation_bnd, + misorientation_spacing, ome_range_deg, num_imgs, beam_stop_width +) + +# ============================================================================= +# %% TOMO PROCESSING - GENERATE BRIGHT FIELD +# ============================================================================= + +tbf = tomoutil.gen_bright_field( + tbf_data_folder, tbf_img_start, tbf_num_imgs, + experiment.nrows, experiment.ncols +) + +# ============================================================================= +# %% TOMO PROCESSING - BUILD RADIOGRAPHS +# ============================================================================= + +rad_stack = tomoutil.gen_attenuation_rads( + tomo_data_folder, tbf, tomo_img_start, tomo_num_imgs, + experiment.nrows, experiment.ncols +) + +# ============================================================================= +# %% TOMO PROCESSING - INVERT SINOGRAM +# ============================================================================= + +reconstruction_fbp = tomoutil.tomo_reconstruct_layer( + rad_stack, cross_sectional_dim, layer_row=layer_row, + start_tomo_ang=ome_range_deg[0][0], end_tomo_ang=ome_range_deg[0][1], + tomo_num_imgs=tomo_num_imgs, center=experiment.detector_params[3] +) + +# ============================================================================= +# %% TOMO PROCESSING - VIEW RAW FILTERED BACK PROJECTION +# ============================================================================= + +plt.close('all') +plt.imshow(reconstruction_fbp, vmin=0.75e-4, vmax=2e-4) +# Use this image to view the raw reconstruction, estimate threshold levels. and +# figure out if the rotation axis position needs to be corrected + + +# ============================================================================= +# %% TOMO PROCESSING - CLEAN TOMO RECONSTRUCTION +# ============================================================================= + +binary_recon = tomoutil.threshold_and_clean_tomo_layer( + reconstruction_fbp, + recon_thresh, + noise_obj_size, + min_hole_size +) + +# ============================================================================= +# %% TOMO PROCESSING - RESAMPLE TOMO RECONSTRUCTION +# ============================================================================= + +tomo_mask = tomoutil.crop_and_rebin_tomo_layer( + binary_recon, recon_thresh, voxel_spacing, + experiment.pixel_size[0], cross_sectional_dim +) + +# ============================================================================= +# %% TOMO PROCESSING - VIEW TOMO_MASK FOR SAMPLE BOUNDS +# ============================================================================= + +plt.close('all') +plt.imshow(tomo_mask, interpolation='none') + +# ============================================================================= +# %% TOMO PROCESSING - CONSTRUCT DATA GRID +# ============================================================================= + +test_crds, n_crds, Xs, Ys, Zs = nfutil.gen_nf_test_grid_tomo( + tomo_mask.shape[1], tomo_mask.shape[0], + v_bnds, voxel_spacing +) + +# ============================================================================= +# %% NEAR FIELD - MAKE MEDIAN DARK +# ============================================================================= + +dark = nfutil.gen_nf_dark( + data_folder, img_nums, num_for_dark, + experiment.nrows, experiment.ncols +) + +# ============================================================================= +# %% NEAR FIELD - LOAD IMAGE DATA AND PROCESS +# ============================================================================= + +image_stack = nfutil.gen_nf_image_stack( + data_folder, img_nums, dark, + num_erosions, num_dilations, ome_dilation_iter, + threshold, experiment.nrows, experiment.ncols +) + +# ============================================================================= +# %% VIEW IMAGES FOR DEBUGGING TO LOOK AT IMAGE PROCESSING PARAMETERS +# ============================================================================= +plt.close('all') +img_to_view = 0 +plt.imshow(image_stack[img_to_view, :, :], interpolation='none') + +# ============================================================================= +# %% INSTANTIATE CONTROLLER - RUN BLOCK NO EDITING +# ============================================================================= + +progress_handler = nfutil.progressbar_progress_observer() +save_handler = nfutil.forgetful_result_handler() + +controller = nfutil.ProcessController(save_handler, progress_handler, + ncpus=mp.cpu_count(), + chunk_size=chunk_size) + +multiprocessing_start_method = 'fork' if hasattr(os, 'fork') else 'spawn' + +# ============================================================================= +# %% TEST ORIENTATIONS - RUN BLOCK NO EDITING +# ============================================================================= + +raw_confidence = nfutil.test_orientations( + image_stack, experiment, test_crds, + controller, multiprocessing_start_method +) + +# ============================================================================= +# %% POST PROCESS W WHEN TOMOGRAPHY HAS BEEN USED +# ============================================================================= + +grain_map, confidence_map = nfutil.process_raw_confidence( + raw_confidence, Xs.shape, + tomo_mask=tomo_mask, + id_remap=nf_to_ff_id_map +) + +# ============================================================================= +# %% SAVE RAW CONFIDENCE FILES +# =========================================================================== + +# This will be a very big file, don't save it if you don't need it +nfutil.save_raw_confidence( + output_dir, output_stem, raw_confidence, + id_remap=nf_to_ff_id_map +) + +# ============================================================================= +# %% SAVE PROCESSED GRAIN MAP DATA +# ============================================================================= + +nfutil.save_nf_data( + output_dir, output_stem, grain_map, confidence_map, + Xs, Ys, Zs, experiment.exp_maps, + id_remap=nf_to_ff_id_map +) + +# ============================================================================= +# %% PLOTTING SINGLE LAYERS FOR DEBUGGING +# ============================================================================= + +layer_no = 0 +nfutil.plot_ori_map( + grain_map, confidence_map, experiment.exp_maps, layer_no, + id_remap=nf_to_ff_id_map +) + +# ============================================================================= +# %% SAVE DATA AS VTK +# ============================================================================= + +vtkutil.output_grain_map_vtk( + output_dir, [output_stem], output_stem, 0.1 +) diff --git a/scripts/stitch_grains.py b/scripts/stitch_grains.py new file mode 100644 index 00000000..8e333053 --- /dev/null +++ b/scripts/stitch_grains.py @@ -0,0 +1,286 @@ +# ============================================================ +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on downloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License (as published by the Free Software +# Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ +############################################################################### + +#%% #Import Modules +import os + +import numpy as np + +import copy + +import cPickle as cpl + +from hexrd import matrixutil as mutil +from hexrd.xrd import rotations as rot +from hexrd.xrd import symmetry as sym + +import shutil + + +#%% Functions to preload + +def remove_duplicate_grains(grain_data,qsyms,dist_thresh=0.01,misorient_thresh=0.1,comp_diff=0.1): + total_grains=grain_data.shape[0] + + all_comp=grain_data[:,1] + grain_quats=rot.quatOfExpMap(grain_data[:,3:6].T) + dup_list=np.array([]) + + print 'Removing duplicate grains' + for i in np.arange(total_grains-1): + cur_pos=grain_data[i,3:6] + other_pos=grain_data[(i+1):,3:6] + xdist=cur_pos[0]-other_pos[:,0] + ydist=cur_pos[1]-other_pos[:,1] + zdist=cur_pos[2]-other_pos[:,2] + + dist=np.sqrt(xdist**2.+ydist**2.+zdist**2.) + + if np.min(dist)0): + grain_data[i,:]=grain_data[np.argmin(dist)+i+1,:] + + grain_data=np.delete(grain_data,dup_list,axis=0) + + print 'Removed %d Grains' % (len(dup_list)) + + grain_data[:,0]=np.arange(grain_data.shape[0]) + + return grain_data,dup_list + + + +def assemble_grain_data(grain_data_list,pos_offset=None,rotation_offset=None): + num_grain_files=len(grain_data_list) + + num_grains_list=[None]*num_grain_files + + for i in np.arange(num_grain_files): + num_grains_list[i]=grain_data_list[i].shape[0] + + num_grains=np.sum(num_grains_list) + + grain_data=np.zeros([num_grains,grain_data_list[0].shape[1]]) + + for i in np.arange(num_grain_files): + + tmp=copy.copy(grain_data_list[i]) + + if pos_offset is not None: + pos_tile=np.tile(pos_offset[:,i],[num_grains_list[i],1]) + tmp[:,6:9]=tmp[:,6:9]+pos_tile + #Needs Testing + if rotation_offset is not None: + rot_tile=np.tile(np.atleast_2d(rotation_offset[:,i]).T,[1,num_grains_list[i]]) + quat_tile=rot.quatOfExpMap(rot_tile) + grain_quats=rot.quatOfExpMap(tmp[:,3:6].T) + new_quats=rot.quatProduct(grain_quats,quat_tile) + + sinang = mutil.columnNorm(new_quats[1:,:]) + ang=2.*np.arcsin(sinang) + axis = mutil.unitVector(new_quats[1:,:]) + tmp[:,3:6]=np.tile(np.atleast_2d(ang).T,[1,3])*axis.T + + + grain_data[int(np.sum(num_grains_list[:i])):int(np.sum(num_grains_list[:(i+1)])),:]=tmp + + + old_grain_numbers=copy.copy(grain_data[:,0]) + grain_data[:,0]=np.arange(num_grains) + return grain_data,old_grain_numbers + +############################################################################### +#%% User Input +############################################################################### + + + +material_file_loc='/####/materials.cpl' # hexrd material file in cpickle format +mat_name='####' + +#grain_file_locs=['/nfs/chess/aux/cycles/2017-1/f2/hurley-568-1/angquartz-1-reduction-attempt3/aquartz_%s_v0'%(load_name),\ +# '/nfs/chess/aux/cycles/2017-1/f2/hurley-568-1/angquartz-1-reduction-attempt3/aquartz_%s_v1'%(load_name)]#Can be more than 2 + +grain_file_locs=['/####/layer_0',\ + '/####/layer_1']#Can be more than one granis file + +output_data=True +output_dir='/####' + + +#Position and Misorientation differences to merge grains +dist=0.05 #mm +misorientation=1. #degrees +completeness_diff=0.1 #if two grains are matched, the completenesses are checked, +#if the differences in completion are within completeness_diff, the grain values are averaged, +#if not, the data from the grain with higher completion is kept and the other data is discarded + + +#Offsets, these can be input as arguments +#Each dataset can have positional or rotation offsets +#Position offsets are in mm, 3 x n matrix where n is the number of grains.out files being stitched +#Rotation offsets exponential maps + +#3 x 2 examples +pos_offset=np.array([[0.,0.],[0.,0.],[0.,0.]]) +rot_offset=None + + +low_comp_thresh=0.6 +high_chi2_thresh=0.05 + + + +#vertical dispersion correction +vd_lin=0. #vol_strain/mm +vd_const= 0.#vol_strain + + +############################################################################### +#%% Load data +############################################################################### + +mat_list = cpl.load(open(material_file_loc, 'r')) +mat_idx = np.where([mat_list[i].name == mat_name for i in range(len(mat_list))])[0] + +# grab plane data, and useful things hanging off of it +pd = mat_list[mat_idx[0]].planeData +qsyms=sym.quatOfLaueGroup(pd.getLaueGroup()) + + +num_grain_files=len(grain_file_locs) + +grain_data_list=[None]*num_grain_files + + + + + +for i in np.arange(num_grain_files): + + grain_data_list[i]=np.loadtxt(os.path.join(grain_file_locs[i],'grains.out')) + + pos_0=grain_data_list[i][:,6:9] + grain_data_list[i][:,15]=grain_data_list[i][:,15]-(vd_lin*pos_0[:,1]+vd_const) + grain_data_list[i][:,16]=grain_data_list[i][:,16]-(vd_lin*pos_0[:,1]+vd_const) + grain_data_list[i][:,17]=grain_data_list[i][:,17]-(vd_lin*pos_0[:,1]+vd_const) + + good_comp=np.where(grain_data_list[i][:,1]>=low_comp_thresh)[0] + good_chi2=np.where(grain_data_list[i][:,2]<=high_chi2_thresh)[0] + + to_keep=np.intersect1d(good_comp,good_chi2) + + grain_data_list[i]=grain_data_list[i][to_keep,:] + + + +grain_data,old_grain_numbers=assemble_grain_data(grain_data_list,pos_offset,rot_offset) + +grain_data,dup_list=remove_duplicate_grains(grain_data,qsyms,dist,misorientation,completeness_diff) + +old_grain_numbers=np.delete(old_grain_numbers,dup_list) + +# +divisions=np.array(np.where(np.diff(old_grain_numbers)<0)[0]+1) + +num_blocks=len(divisions)+1 +old_grain_blocks=[None]*num_blocks + +for i in np.arange(num_blocks): + if i==0: + old_grain_blocks[i]=old_grain_numbers[:divisions[i]] + elif i==(num_blocks-1): + old_grain_blocks[i]=old_grain_numbers[divisions[i-1]:] + else: + old_grain_blocks[i]=old_grain_numbers[divisions[i-1]:divisions[i]] + + + + +############################################################################### +#%% Write data +############################################################################### + + + +if not os.path.exists(output_dir): + os.makedirs(output_dir) + + +if output_data: + print('Writing out grain data for ' +str(grain_data.shape[0]) + ' grains') + f = open(os.path.join(output_dir, 'grains.out'), 'w') + + header_items = ( + 'grain ID', 'completeness', 'chi2', + 'xi[0]', 'xi[1]', 'xi[2]', 'tVec_c[0]', 'tVec_c[1]', 'tVec_c[2]', + 'vInv_s[0]', 'vInv_s[1]', 'vInv_s[2]', 'vInv_s[4]*sqrt(2)', + 'vInv_s[5]*sqrt(2)', 'vInv_s[6]*sqrt(2)', 'ln(V[0,0])', + 'ln(V[1,1])', 'ln(V[2,2])', 'ln(V[1,2])', 'ln(V[0,2])', 'ln(V[0,1])', + ) + len_items = [] + for i in header_items[1:]: + temp = len(i) + len_items.append(temp if temp > 19 else 19) # for %19.12g + fmtstr = '#%13s ' + ' '.join(['%%%ds' % i for i in len_items]) + '\n' + f.write(fmtstr % header_items) + for i in np.arange(grain_data.shape[0]): + res_items = ( + grain_data[i,0], grain_data[i,1], grain_data[i,2], grain_data[i,3], grain_data[i,4], grain_data[i,5], + grain_data[i,6], grain_data[i,7], grain_data[i,8], grain_data[i,9], + grain_data[i,10], grain_data[i,11], grain_data[i,12], grain_data[i,13], + grain_data[i,14], grain_data[i,15], grain_data[i,16], grain_data[i,17], grain_data[i,18], + grain_data[i,19], grain_data[i,20], + ) + fmtstr = ( + '%14d ' + ' '.join(['%%%d.12g' % i for i in len_items]) + '\n' + ) + f.write(fmtstr % res_items) + + f.close() + + + counter=0 + for i in np.arange(len(old_grain_blocks)): + for j in np.arange(len(old_grain_blocks[i])): + shutil.copy2(os.path.join(grain_file_locs[i],'spots_%05.5d.out' % (old_grain_blocks[i][j])),os.path.join(output_dir,'spots_%05.5d.out' % (counter))) + counter=counter+1 + + + + + diff --git a/scripts/stitch_grains_files.py b/scripts/stitch_grains_files.py new file mode 100644 index 00000000..a5b9b12c --- /dev/null +++ b/scripts/stitch_grains_files.py @@ -0,0 +1,218 @@ +# ============================================================ +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on downloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License (as published by the Free Software +# Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ +#%% User Input +############################################################################### +hexrd_script_directory='###' + +material_file_loc='###' # hexrd material file in cpickle format +mat_name='###' + +grain_file_locs=['###',\ + '###']#Can be more than 2 + +output_data=True +output_dir='###' + + +#Position and Misorientation differences to merge grains +dist=0.05 #mm +misorientation=1. #degrees +completeness_diff=0.1 #if two grains are matched, the completenesses are checked, +#if the differences in completion are within completeness_diff, the grain values are averaged, +#if not, the data from the grain with higher completion is kept and the other data is discarded + + +#Offsets, these can be input as arguments +#Each dataset can have positional or rotation offsets +#Position offsets are in mm, 3 x n matrix where n is the number of grains.out files being stitched +#Rotation offsets exponential maps + +#3 x 2 examples +#pos_offset=np.array([[0.,0.],[0.,0.],[0.,0.]]) +#rot_offset=np.array([[0.,0.],[0.,0.],[0.,0.]]) +pos_offset=None +rot_offset=None + +############################################################################### + +#%% #Import Modules +import os + +import numpy as np + +import copy + +import cPickle as cpl + +from hexrd import matrixutil as mutil +from hexrd.xrd import rotations as rot +from hexrd.xrd import symmetry as sym + + + +#%% + +def remove_duplicate_grains(grain_data,qsyms,dist_thresh=0.01,misorient_thresh=0.1,comp_diff=0.1): + total_grains=grain_data.shape[0] + + all_comp=grain_data[:,1] + grain_quats=rot.quatOfExpMap(grain_data[:,3:6].T) + dup_list=np.array([]) + + print 'Removing duplicate grains' + for i in np.arange(total_grains-1): + cur_pos=grain_data[i,3:6] + other_pos=grain_data[(i+1):,3:6] + xdist=cur_pos[0]-other_pos[:,0] + ydist=cur_pos[1]-other_pos[:,1] + zdist=cur_pos[2]-other_pos[:,2] + + dist=np.sqrt(xdist**2.+ydist**2.+zdist**2.) + + if np.min(dist)0): + grain_data[i,:]=grain_data[np.argmin(dist)+i+1,:] + + grain_data=np.delete(grain_data,dup_list,axis=0) + + print 'Removed %d Grains' % (len(dup_list)) + + grain_data[:,0]=np.arange(grain_data.shape[0]) + + return grain_data + + + + +#%% + + +def assemble_grain_data(grain_data_list,pos_offset=None,rotation_offset=None): + num_grain_files=len(grain_data_list) + + num_grains_list=[None]*num_grain_files + + for i in np.arange(num_grain_files): + num_grains_list[i]=grain_data_list[i].shape[0] + + num_grains=np.sum(num_grains_list) + + grain_data=np.zeros([num_grains,grain_data_list[0].shape[1]]) + + for i in np.arange(num_grain_files): + + tmp=copy.copy(grain_data_list[i]) + + if pos_offset is not None: + pos_tile=np.tile(pos_offset[:,i],[num_grains_list[i],1]) + tmp[:,6:9]=tmp[:,6:9]+pos_tile + #Needs Testing + if rotation_offset is not None: + rot_tile=np.tile(np.atleast_2d(rotation_offset[:,i]).T,[1,num_grains_list[i]]) + quat_tile=rot.quatOfExpMap(rot_tile) + grain_quats=rot.quatOfExpMap(tmp[:,3:6].T) + new_quats=rot.quatProduct(grain_quats,quat_tile) + + sinang = mutil.columnNorm(new_quats[1:,:]) + ang=2.*np.arcsin(sinang) + axis = mutil.unitVector(new_quats[1:,:]) + tmp[:,3:6]=np.tile(np.atleast_2d(ang).T,[1,3])*axis.T + + + grain_data[int(np.sum(num_grains_list[:i])):int(np.sum(num_grains_list[:(i+1)])),:]=tmp + + + grain_data[:,0]=np.arange(num_grains) + return grain_data + + + +#%% Load data + +mat_list = cpl.load(open(material_file_loc, 'r')) +mat_idx = np.where([mat_list[i].name == mat_name for i in range(len(mat_list))])[0] + +# grab plane data, and useful things hanging off of it +pd = mat_list[mat_idx[0]].planeData +qsyms=sym.quatOfLaueGroup(pd.getLaueGroup()) + + +num_grain_files=len(grain_file_locs) + +grain_data_list=[None]*num_grain_files + +for i in np.arange(num_grain_files): + + + grain_data_list[i]=np.loadtxt(os.path.join(grain_file_locs[i],'grains.out')) + +grain_data=assemble_grain_data(grain_data_list,pos_offset,rot_offset) + +grain_data=remove_duplicate_grains(grain_data,qsyms,dist,misorientation,completeness_diff) + + +#%% Write data + +if output_data: + f = open(os.path.join(output_dir, 'grains.out'), 'w') + + header_items = ( + 'grain ID', 'completeness', 'chi2', + 'xi[0]', 'xi[1]', 'xi[2]', 'tVec_c[0]', 'tVec_c[1]', 'tVec_c[2]', + 'vInv_s[0]', 'vInv_s[1]', 'vInv_s[2]', 'vInv_s[4]*sqrt(2)', + 'vInv_s[5]*sqrt(2)', 'vInv_s[6]*sqrt(2)', 'ln(V[0,0])', + 'ln(V[1,1])', 'ln(V[2,2])', 'ln(V[1,2])', 'ln(V[0,2])', 'ln(V[0,1])', + ) + len_items = [] + for i in header_items[1:]: + temp = len(i) + len_items.append(temp if temp > 19 else 19) # for %19.12g + fmtstr = '#%13s ' + ' '.join(['%%%ds' % i for i in len_items]) + '\n' + f.write(fmtstr % header_items) + for i in grain_data.shape[0]: + res_items = ( + grain_data[i,0], grain_data[i,1], grain_data[i,2], grain_data[i,3], grain_data[i,4], grain_data[i,5], + grain_data[i,6], grain_data[i,7], grain_data[i,8], grain_data[i,9], + grain_data[i,10], grain_data[i,11], grain_data[i,12], grain_data[i,13], + grain_data[i,14], grain_data[i,15], grain_data[i,16], grain_data[i,17], grain_data[i,18], + grain_data[i,19], grain_data[i,20], + ) + fmtstr = ( + '%14d ' + ' '.join(['%%%d.12g' % i for i in len_items]) + '\n' + ) + f.write(fmtstr % res_items) + + f.close() diff --git a/scripts/strength_extraction.py b/scripts/strength_extraction.py new file mode 100644 index 00000000..d334e426 --- /dev/null +++ b/scripts/strength_extraction.py @@ -0,0 +1,159 @@ +# ============================================================ +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on downloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License (as published by the Free Software +# Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ +#%% User Input +############################################################################### +hexrd_script_directory='###' #Needs post_process_stress.py from the scripts directory + +c_mat_C_file='###' # text file containing the stiffness matrix in the crystal coordinate system (6x6) + +schmid_tensor_directory='###' +schmid_tensor_files=['###',"###'] + +num_load_steps=### + +processed_data_directory='###' +analysis_stem='###' + + + +############################################################################### +#%% #Import Modules +import os, sys + +import numpy as np + +import copy + +from matplotlib import pyplot as plt + +import hexrd.fitting.fitpeak as fitpeaks + +from scipy.stats import gaussian_kde + +sys.path.append(hexrd_script_directory) +import post_process_stress as stress_proc + + + +#%% Function For Extracting Strength +############################################################################### + + + +def extract_strength(stress_data,ss_bnds,completeness_mat=None,threshold=0.8): + + tau_star=np.zeros(len(stress_data)) + w_tau=np.zeros(len(stress_data)) + for i in np.arange(len(stress_data)): + + if completeness_mat is not None: + grains_to_use=np.where(completeness_mat[i]>threshold)[0] + max_rss=np.max(stress_data[i]['RSS'][grains_to_use,ss_bnds[0]:ss_bnds[-1]],1) + else: + max_rss=np.max(stress_data[i]['RSS'][:,ss_bnds[0]:ss_bnds[-1]],1) + + tau=np.linspace(0., 1.5*np.max(max_rss), num=2000) + + G = gaussian_kde(max_rss) + tau_pdf = G.evaluate(tau) + + + maxPt=np.argmax(tau_pdf) + tmp_pdf=copy.copy(tau_pdf) + tmp_pdf[:maxPt]=np.max(tau_pdf) + + pfit=fitpeaks.fit_pk_parms_1d([np.max(tau_pdf),tau[maxPt]+1e7,45e6],tau,tmp_pdf,pktype='tanh_stepdown') + + tau_star[i]=pfit[1] + w_tau[i]=pfit[2] + + return tau_star, w_tau + + +#%% Function For Extracting Strength + +def plot_strength_curve(tau_star,w_tau,macro_strain=None,plot_color='blue'): + if macro_strain is None: + macro_strain=np.arange(len(tau_star)) + + strain_fine=np.linspace(macro_strain[0],macro_strain[-1],1000) + interp_tau_star=np.interp(strain_fine,macro_strain,tau_star) + interp_w_tau=np.interp(strain_fine,macro_strain,w_tau) + + + plt.errorbar(strain_fine,interp_tau_star,yerr=interp_w_tau,color=plot_color, capthick=0) + plt.plot(macro_strain,tau_star,'s--',markerfacecolor=plot_color,markeredgecolor='k',markeredgewidth=1,color='k') + plt.plot(strain_fine,interp_tau_star+interp_w_tau,'k--',linewidth=2) + plt.plot(strain_fine,interp_tau_star-interp_w_tau,'k--',linewidth=2) + + plt.grid() + + +#%% Loading Data +############################################################################### + +#Load Stiffness Matrix +c_mat_C=np.loadtxt(c_mat_C_file) + +#Load Schmid Tensors +num_ss_fams=len(schmid_tensor_files) +num_per_sys=[None]*num_ss_fams +T_vecs=[None]*num_ss_fams +for i in np.arange(num_ss_fams): + T_vecs[i] = np.atleast_2d(np.loadtxt(os.path.join(schmid_tensor_directory,schmid_tensor_files[i]))) + num_per_sys[i]=T_vecs[i].shape[0] + +num_ten=int(np.sum(num_per_sys)) +T=np.zeros([num_ten,3,3]) +counter=0 +for j in np.arange(num_ss_fams): + for i in np.arange(num_per_sys[j]): + T[counter,:,:]=T_vecs[j][i,:].reshape([3,3]) + counter+=1 + +#Load and Process Stress Data +stress_data=[None]*(num_load_steps) +completeness=[None]*(num_load_steps) + +for i in np.arange(num_load_steps): + print('Processing Load ' + str(i)) + grain_data=np.loadtxt(os.path.join(processed_data_directory,analysis_stem + '%03d'%(i),'grains.out')) + completeness[i]=grain_data[:,1] + stress_data[i]=stress_proc.post_process_stress(grain_data,c_mat_C,T) + + + +#%% Extract Strengths from Different Slip System Families +tau_star=[None]*num_ss_fams +w_tau=[None]*num_ss_fams +for i in np.arange(num_ss_fams): + tau_star[i], w_tau[i] = extract_strength(stress_data,[int(np.sum(num_per_sys[:i])),int(np.sum(num_per_sys[:(i+1)]))],completeness,0.8) + +#%% Plot Slip System Strength Curves +plt.close('all') +plot_strength_curve(tau_star[2],w_tau[2],macro_strain=None,plot_color='blue') + diff --git a/scripts/virtual_diffractometer.py b/scripts/virtual_diffractometer.py new file mode 100644 index 00000000..2c6517e9 --- /dev/null +++ b/scripts/virtual_diffractometer.py @@ -0,0 +1,267 @@ +# ============================================================ +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on downloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License (as published by the Free Software +# Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ +#%% User Input +############################################################################### + +#File Locations +grains_file='###' #HEXRD Grains.out file +matl_file='###' #HEXRD Materials cpl +active_matl='###' +cfg_file='###' #HEXRD cfg yml +instr_file='###' #HEXRD instrument yml + + +#Output Image Location +output_location='###' +output_name='test'#Frame Cache Name + +#Script Options +det_psf_fwhm=2. +cts_per_event=1000. +delta_ome = 0.25 +min_I=5. + + +############################################################################### +#%% + +import os + + +import cPickle as cpl + +import numpy as np +import scipy as sp + +import yaml + +from hexrd import config + +from hexrd.xrd import distortion as dFuncs +from hexrd.xrd import transforms_CAPI as xfcapi + + +from hexrd.gridutil import cellIndices + + +from hexrd.xrd.xrdutil import simulateGVecs + +#%% File Loading +cfg = config.open(cfg_file)[0] # NOTE: always a list of cfg objects + +instr_cfg = yaml.load(open(instr_file, 'r')) + +mat_list = cpl.load(open(matl_file, 'r')) + +grain_params_list=np.loadtxt(grains_file) + +#%% Extract Quantities from Loaded data + +#Instrument Info +# stack into array for later +detector_params = np.hstack([ + instr_cfg['detector']['transform']['tilt_angles'], + instr_cfg['detector']['transform']['t_vec_d'], + instr_cfg['oscillation_stage']['chi'], + instr_cfg['oscillation_stage']['t_vec_s'], + ]) + +# pixel pitches +pixel_pitch = cfg.instrument.detector.pixels.size + +# panel dimensions calculated from pixel pitches +row_dim = pixel_pitch[0]*cfg.instrument.detector.pixels.rows +col_dim = pixel_pitch[1]*cfg.instrument.detector.pixels.columns + +# panel is ( (xmin, ymin), (xmax, ymax) ) +panel_dims = ( + (-0.5*col_dim, -0.5*row_dim), + ( 0.5*col_dim, 0.5*row_dim), + ) + +detector_x_edges = np.arange(cfg.instrument.detector.pixels.columns+1)*pixel_pitch[1] + panel_dims[0][0] +detector_y_edges = np.arange(cfg.instrument.detector.pixels.rows+1)*pixel_pitch[0] + panel_dims[0][1] + +# UGH! hard-coded distortion... still needs fixing when detector is rewritten +if instr_cfg['detector']['distortion']['function_name'] == 'GE_41RT': + distortion = (dFuncs.GE_41RT, + instr_cfg['detector']['distortion']['parameters'], + ) +else: + distortion = None + +#Image Info +nrows = int((panel_dims[1][1] - panel_dims[0][1]) / float(pixel_pitch[0])) +ncols = int((panel_dims[1][0] - panel_dims[0][0]) / float(pixel_pitch[1])) +row_edges = (np.arange(nrows+1)*pixel_pitch[0] + panel_dims[0][1])[::-1] +col_edges = np.arange(ncols+1)*pixel_pitch[1] + panel_dims[0][0] + + +nframes = int(360./float(delta_ome)) +ome_edges = np.arange(nframes + 1)*delta_ome - 180. + + +#extract transform objects; rotations and translations +# detector first, rotation, then translation +# - rotation takes comps from det frame to lab +rMat_d = xfcapi.makeDetectorRotMat(detector_params[:3]) +tVec_d = np.r_[detector_params[3:6]] + +# rotation stage (omega) +# - chi is ccw tilt about lab X; rMat_s is omega dependent +# - takes comps in sample to lab frame +chi = detector_params[6] +tVec_s = np.zeros((3,1)) + +# crystal; this will be a list of things, computed from quaternions +# - trivial case here... +rMat_c = np.eye(3) +tVec_c = np.zeros((3,1)) + + + +#Material Info +mat_name = cfg.material.active # str that is the material name in database + +# need to find the index of the active material +mat_idx = np.where([mat_list[i].name == mat_name for i in range(len(mat_list))])[0] + +# grab plane data, and useful things hanging off of it +plane_data = mat_list[mat_idx].planeData +plane_data.tThMax=np.radians(20) +plane_data.set_exclusions(np.zeros(len(plane_data.exclusions), dtype=bool)) + + +#%% Filters For Point Spread +def make_gaussian_filter(size,fwhm): + sigma=fwhm/(2.*np.sqrt(2.*np.log(2.))) +# size=[5,5] +# sigma=1. + gaussFilter=np.zeros(size) + cenRow=size[0]/2. + cenCol=size[1]/2. + + pixRowCens=np.arange(size[0])+0.5 + pixColCens=np.arange(size[1])+0.5 + + y=cenRow-pixRowCens + x=pixColCens-cenCol + + xv, yv = np.meshgrid(x, y, sparse=False) + + r=np.sqrt(xv**2.+yv**2.) + gaussFilter=np.exp(-r**2./(2*sigma**2)) + gaussFilter=gaussFilter/gaussFilter.sum() + + + return gaussFilter + +def make_lorentzian_filter(size,fwhm): + + gamma=fwhm/2. + + lorentzianFilter=np.zeros(size) + cenRow=size[0]/2. + cenCol=size[1]/2. + + pixRowCens=np.arange(size[0])+0.5 + pixColCens=np.arange(size[1])+0.5 + + y=cenRow-pixRowCens + x=pixColCens-cenCol + + xv, yv = np.meshgrid(x, y, sparse=False) + + r=np.sqrt(xv**2.+yv**2.) + lorentzianFilter=gamma**2 / ((r)**2 + gamma**2) + lorentzianFilter=lorentzianFilter/lorentzianFilter.sum() + + + return lorentzianFilter + +#%% +#Calculate Intercepts for diffraction events from grains + +pixel_data = [] + +for ii in np.arange(grain_params_list.shape[0]): + print "processing grain %d..." %ii + + simg = simulateGVecs(plane_data, detector_params, grain_params_list[ii,3:15],distortion=None) + + valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps = simg + + #ax.plot(valid_xy[:, 0], valid_xy[:, 1], 'b.', ms=2) + this_frame = sp.sparse.coo_matrix((nrows, ncols), np.uint16) + frame_indices = cellIndices(ome_edges, np.degrees(valid_ang[:, 2])) + i_row = cellIndices(row_edges, valid_xy[:, 1]) + j_col = cellIndices(col_edges, valid_xy[:, 0]) + pixel_data.append(np.vstack([i_row, j_col, frame_indices])) + + +pixd = np.hstack(pixel_data) + + + +frame_cache_data=[sp.sparse.coo_matrix([2048,2048],dtype='uint16')]*nframes + +filter_size=np.round(det_psf_fwhm*5) +if filter_size % 2 == 0: + filter_size+=1 + +psf_filter=make_gaussian_filter([filter_size,filter_size],det_psf_fwhm) + +#Make pad four fast fourier tranform +filterPad=np.zeros((nrows, ncols), dtype=float) +filterPad[:psf_filter.shape[0],:psf_filter.shape[1]]=psf_filter +filterPadTransform=np.fft.fft2(filterPad) + + +#Build images and apply point spread +for i in np.arange(nframes): + print "processing frame %d of %d" % (i,nframes) + + this_frame = np.zeros((nrows, ncols), dtype=float) + these_ij = pixd[:2, pixd[2, :] == i] + + this_frame[these_ij[0], these_ij[1]] += cts_per_event + + + this_frame_transform=np.fft.fft2(this_frame) + this_frame_convolved=np.real(np.fft.ifft2(this_frame_transform*filterPadTransform)) + tmp=np.where(this_frame_convolved= tth_hi) + pd.exclusions = excl + +panel_id = instr_cfg['detectors'].keys()[0] + +d = instr.detectors[panel_id] + +pangs, pxys = d.make_powder_rings([tth_avg, ]) + +#tth, peta = d.pixel_angles +#Y, X = d.pixel_coords +#xy = np.vstack([X.flatten(), Y.flatten()]).T +aps = d.angularPixelSize(pxys[0]) + +print "min angular pixel sizes: %.4f, %.4f" \ + %(np.degrees(np.min(aps[:, 0])), np.degrees(np.min(aps[:, 1]))) + +#%% set from looking at GUI +tth_size = np.degrees(np.min(aps[:, 0])) +eta_size = np.degrees(np.min(aps[:, 1])) + +tth0 = np.degrees(tth_avg) +eta0 = 0. + +tth_range = np.degrees(tth_hi - tth_lo) +eta_range = 360. + +ntth = int(tth_range/tth_size) +neta = int(eta_range/eta_size) + +tth_vec = tth_size*(np.arange(ntth) - 0.5*ntth - 1) + tth0 +eta_vec = eta_size*(np.arange(neta) - 0.5*neta - 1) + eta0 + +angpts = np.meshgrid(eta_vec, tth_vec, indexing='ij') +gpts = xfc.anglesToGVec( + np.vstack([ + np.radians(angpts[1].flatten()), + np.radians(angpts[0].flatten()), + np.zeros(neta*ntth) + ]).T, bHat_l=d.bvec) + +xypts = xfc.gvecToDetectorXY( + gpts, + d.rmat, np.eye(3), np.eye(3), + d.tvec, np.zeros(3), np.zeros(3), + beamVec=d.bvec) + +img2 = d.interpolate_bilinear(xypts, average_frame).reshape(neta, ntth) +img3 = copy.deepcopy(img2) +borders = np.isnan(img2) +img2[borders] = 0. +img3[borders] = 0. +img3 += np.min(img3) + 1 +img3 = np.log(img3) +img3[borders] = np.nan + +extent = ( + np.min(angpts[1]), np.max(angpts[1]), + np.min(angpts[0]), np.max(angpts[0]) +) + +fig, ax = plt.subplots(2, 1, sharex=True, sharey=False) +ax[0].imshow(img3.reshape(neta, ntth), + interpolation='nearest', + cmap=cm.plasma, vmax=None, + extent=extent, + origin='lower') +ax[1].plot(angpts[1][0, :], np.sum(img2, axis=0)/img2.size) +ax[0].axis('tight') +ax[0].grid(True) +ax[1].grid(True) +ax[0].set_ylabel(r'$\eta$ [deg]', size=18) +ax[1].set_xlabel(r'$2\theta$ [deg]', size=18) +ax[1].set_ylabel(r'Intensity (arbitrary)', size=18) + +plt.show() + + + +#%% Multipeak Kludge + +def fit_pk_obj_1d_mpeak(p,x,f0,pktype,num_pks): + + f=np.zeros(len(x)) + p=np.reshape(p,[num_pks,p.shape[0]/num_pks]) + for ii in np.arange(num_pks): + if pktype == 'gaussian': + f=f+pkfuncs._gaussian1d_no_bg(p[ii],x) + elif pktype == 'lorentzian': + f=f+pkfuncs._lorentzian1d_no_bg(p[ii],x) + elif pktype == 'pvoigt': + f=f+pkfuncs._pvoigt1d_no_bg(p[ii],x) + elif pktype == 'split_pvoigt': + f=f+pkfuncs._split_pvoigt1d_no_bg(p[ii],x) + + + resd = f-f0 + return resd + + + +#%% +#plt.close('all') + +num_tth=len(pd.getTTh()) + +x=angpts[1][0, :] +f=np.sum(img2, axis=0)/img2.size +pktype='pvoigt' +num_pks=num_tth + +ftol=1e-6 +xtol=1e-6 + +fitArgs=(x,f,pktype,num_pks) + +tth=matl.planeData.getTTh()*180./np.pi + + +p0=np.zeros([num_tth,4]) + +for ii in np.arange(num_tth): + pt=np.argmin(np.abs(x-tth[ii])) + + p0[ii,:]=[f[pt],tth[ii],0.1,0.5] + + + +p, outflag = optimize.leastsq(fit_pk_obj_1d_mpeak, p0, args=fitArgs,ftol=ftol,xtol=xtol) + +p=np.reshape(p,[num_pks,p.shape[0]/num_pks]) +f_fit=np.zeros(len(x)) + +for ii in np.arange(num_pks): + f_fit=f_fit+pkfuncs._pvoigt1d_no_bg(p[ii],x) + + +#plt.plot(x,f,'x') +#plt.hold('true') +#plt.plot(x,f_fit) +ax[1].plot(x, f_fit, 'm+', ms=1) + +#%% +fit_tths = p[:, 1] +fit_dsps = 0.5*wlen/np.sin(0.5*np.radians(fit_tths)) +nrml_strains = fit_dsps/pd.getPlaneSpacings() - 1. + +print nrml_strains +print "avg normal strain: %.3e" %np.average(nrml_strains) \ No newline at end of file diff --git a/share/example_calibration.yml b/share/example_calibration.yml index 0c3f6ae2..30ef1db8 100644 --- a/share/example_calibration.yml +++ b/share/example_calibration.yml @@ -1,23 +1,25 @@ +beam: + energy: 71.6760000000000 + vector: {azimuth: 90.0, polar_angle: 90.0} calibration_crystal: grain_id: 0 - inv_stretch: [0.9999910690173981, 0.9999801645520292, 1.0000532478330082, -7.6206360506652305e-06, - 2.3167627263591607e-06, -5.3587098260888435e-06] + inv_stretch: [1.0, 1.0, 1.0, 0.0, 0.0, 0.0] orientation: [0.6691581915988989, -0.9864605537384611, 0.7367040542122328] - position: [6.724776361571607e-05, 1.576112881919417e-05, 0.002666490586467067] -detector: - distortion: - function_name: GE_41RT - parameters: [-1.19340383e-05, -9.11913398e-05, -0.000511540815, 2.0, 2.0, 2.0] - id: GE - pixels: - columns: 2048 - rows: 2048 - size: [0.2, 0.2] - saturation_level: 14000.0 - transform: - t_vec_d: [-1.4481037291976095, -3.233712371397272, -1050.649899464198] - tilt_angles: [0.0005634301421575493, -0.003161010511305945, -0.00230772467063694] + position: [6.724776361571607e-05, 0.0, 0.002666490586467067] +detectors: + ge3: + distortion: + function_name: GE_41RT + parameters: [-1.19340383e-05, -9.11913398e-05, -0.000511540815, 2.0, 2.0, 2.0] + pixels: + columns: 2048 + rows: 2048 + size: [0.2, 0.2] + saturation_level: 14000.0 + transform: + t_vec_d: [-9.42714847e-01, -6.56494503e-01, -9.53912093e+02] + tilt_angles: [6.87105395e-03, 4.28568797e-04, 0.00000000e+00] oscillation_stage: - chi: -0.0009114106199393461 + chi: 0.0 t_vec_s: [0.0, 0.0, 0.0] diff --git a/share/pp_dexela.py b/share/pp_dexela.py new file mode 100644 index 00000000..4bbce863 --- /dev/null +++ b/share/pp_dexela.py @@ -0,0 +1,101 @@ +import time +import os + +from hexrd import imageseries + +PIS = imageseries.process.ProcessedImageSeries + +class PP_Dexela(object): + """PP_Dexela""" + PROCFMT = 'frame-cache' + RAWFMT = 'hdf5' + RAWPATH = '/imageseries' + DARKPCTILE = 50 + + def __init__(self, fname, omw, flips, frame_start=0): + """Constructor for PP_Dexela""" + # + self.fname = fname + self.omwedges = omw + self.flips = flips + self.frame_start = frame_start + self.use_frame_list = (self.frame_start > 0) + self.raw = imageseries.open(self.fname, self.RAWFMT, path=self.RAWPATH) + self._dark = None + + print 'On Init: ', self.nframes, self.fname, self.omwedges.nframes,\ + len(self.raw) + return + + @property + def oplist(self): + return [('dark', self.dark)] + self.flips + + @property + def framelist(self): + return range(self.frame_start, self.nframes) + # + # ============================== API + # + @property + def nframes(self): + return self.omwedges.nframes + + def omegas(self): + return self.omwedges.omegas + + def save_omegas(self, fname): + self.omwedges.save_omegas(fname) + + def processed(self): + if self.use_frame_list: + kw = dict(frame_list=self.framelist) + + return PIS(self.raw, self.oplist, **kw) + + @property + def dark(self, nframes=50): + """build and return dark image""" + if self._dark is None: + usenframes = min(nframes, self.nframes) + print "building dark images using %s frames (may take a while)"\ + " ... " % usenframes + start = time.clock() + self._dark = imageseries.stats.percentile( + self.raw, self.DARKPCTILE, nframes=usenframes + ) + elapsed = (time.clock() - start) + print "done building background (dakr) image: elapsed time is %f seconds" \ + % elapsed + + return self._dark + + def save_processed(self, name, threshold): + dname = '%s-fcache-dir' % name + tcname = '%s-fcache-tmp.yml' % name + fcname = '%s-fcache.yml' % name + cache = '%s-cachefile.npz' % name + omname = '%s-omegas.npy' % name + + pname = lambda s: os.path.join(dname, s) # prepend fc directory + + os.mkdir(dname) + + # Steps: + # * write frame cache with no omegas to temporary file + # * write omegas to file + # * modify temporary file to include omegas + imageseries.write(self.processed(), pname(tcname), self.PROCFMT, + threshold=threshold, + cache_file=cache) + self.save_omegas(pname(omname)) + # modify yaml + with open(pname(tcname), 'r') as f: + s = f.read() + m0 = 'meta: {}' + m1 = 'meta:\n omega: ! load-numpy-array %s' % omname + with open(pname(fcname), 'w') as f: + f.write(s.replace(m0, m1)) + os.remove(pname(tcname)) + + pass # end class diff --git a/share/pp_init.py b/share/pp_init.py new file mode 100644 index 00000000..3c2cf66f --- /dev/null +++ b/share/pp_init.py @@ -0,0 +1,50 @@ +import os + +from hexrd.imageseries import omega +from pp_dexela import PP_Dexela + +CHESS_BASE = '/nfs/chess/raw/current/f2/shade-560-1/LSHR-6' +CHESS_TMPL = '%d/ff/ff2_%05d.h5' + +def h5name(scan, file, base=CHESS_BASE): + path = CHESS_TMPL % (scan, file) + return os.path.join(base, path) + +# ==================== Inputs (should not need to alter above this line) + +## Room temp +#raw_scannumber = 32 +#raw_filenumber = 35 + +# ROOM TEMP +raw_scannumber = 81 +raw_filenumber = 45 + +## 100C +#raw_scannumber = 82 +#raw_filenumber = 46 + +## 300C +#raw_scannumber = 83 +#raw_filenumber = 47 + +flips = [('flip', 't'), ('flip', 'hv') ] + +nframes = 100 + +ostart = 0 +ostep = 0.25 +fstart = 5 +threshold = 150 + +# ==================== End Inputs (should not need to alter below this line) + +input_name = h5name(raw_scannumber, raw_filenumber) +output_name = input_name.split('/')[-1].split('.')[0] + +ostop = ostart + nframes*ostep +omw = omega.OmegaWedges(nframes) +omw.addwedge(ostart, ostop, nframes) + +ppd = PP_Dexela(input_name, omw, flips, frame_start=fstart) +ppd.save_processed(output_name, threshold) diff --git a/versioneer.py b/versioneer.py index 96d30a62..c010f63e 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,5 +1,5 @@ -# Version: 0.12 +# Version: 0.15 """ The Versioneer @@ -10,8 +10,12 @@ * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy - -[![Build Status](https://travis-ci.org/warner/python-versioneer.png?branch=master)](https://travis-ci.org/warner/python-versioneer) +* [![Latest Version] +(https://pypip.in/version/versioneer/badge.svg?style=flat) +](https://pypi.python.org/pypi/versioneer/) +* [![Build Status] +(https://travis-ci.org/warner/python-versioneer.png?branch=master) +](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update @@ -23,8 +27,8 @@ ## Quick Install * `pip install versioneer` to somewhere to your $PATH -* run `versioneer-installer` in your source tree: this installs `versioneer.py` -* follow the instructions below (also in the `versioneer.py` docstring) +* add a `[versioneer]` section to your setup.cfg (see below) +* run `versioneer install` in your source tree, commit the results ## Version Identifiers @@ -53,7 +57,7 @@ enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, -for example 'git describe --tags --dirty --always' reports things like +for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. @@ -67,16 +71,19 @@ Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to -dynamically ask the VCS tool for version information at import time. However, -when you use "setup.py build" or "setup.py sdist", `_version.py` in the new -copy is replaced by a small static file that contains just the generated -version data. +dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name -during the "git archive" command. As a result, generated tarballs will +during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. +To allow `setup.py` to compute a version too, a `versioneer.py` is added to +the top level of your source tree, next to `setup.py` and the `setup.cfg` +that configures it. This overrides several distutils/setuptools commands to +compute the version when invoked, and changes `setup.py build` and `setup.py +sdist` to replace `_version.py` with a small static file that contains just +the generated version data. ## Installation @@ -84,6 +91,10 @@ * `VCS`: the version control system you use. Currently accepts "git". +* `style`: the style of version string to be produced. See "Styles" below for + details. Defaults to "pep440", which looks like + `TAG[+DISTANCE.gSHORTHASH[.dirty]]`. + * `versionfile_source`: A project-relative pathname into which the generated version strings should @@ -91,7 +102,7 @@ `__init__.py` file, so it can be imported at runtime. If your project uses `src/myproject/__init__.py`, this should be `src/myproject/_version.py`. This file should be checked in to your VCS as usual: the copy created below - by `setup.py versioneer` will include code that parses expanded VCS + by `setup.py setup_versioneer` will include code that parses expanded VCS keywords in generated tarballs. The 'build' and 'sdist' commands will replace it with a copy that has just the calculated version string. @@ -99,11 +110,11 @@ therefore never import `_version.py`), since "setup.py sdist" -based trees still need somewhere to record the pre-calculated version strings. Anywhere in the source tree should do. If there is a `__init__.py` next to your - `_version.py`, the `setup.py versioneer` command (described below) will - append some `__version__`-setting assignments, if they aren't already + `_version.py`, the `setup.py setup_versioneer` command (described below) + will append some `__version__`-setting assignments, if they aren't already present. -* `versionfile_build`: +* `versionfile_build`: Like `versionfile_source`, but relative to the build directory instead of the source directory. These will differ when your setup.py uses @@ -127,41 +138,53 @@ * `parentdir_prefix`: - a string, frequently the same as tag_prefix, which appears at the start of - all unpacked tarball filenames. If your tarball unpacks into - 'myproject-1.2.0', this should be 'myproject-'. + a optional string, frequently the same as tag_prefix, which appears at the + start of all unpacked tarball filenames. If your tarball unpacks into + 'myproject-1.2.0', this should be 'myproject-'. To disable this feature, + just omit the field from your `setup.cfg`. -This tool provides one script, named `versioneer-installer`. That script does -one thing: write a copy of `versioneer.py` into the current directory. +This tool provides one script, named `versioneer`. That script has one mode, +"install", which writes a copy of `versioneer.py` into the current directory +and runs `versioneer.py setup` to finish the installation. To versioneer-enable your project: -* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your - source tree. +* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and + populating it with the configuration values you decided earlier (note that + the option names are not case-sensitive): + + ```` + [versioneer] + VCS = git + style = pep440 + versionfile_source = src/myproject/_version.py + versionfile_build = myproject/_version.py + tag_prefix = "" + parentdir_prefix = myproject- + ```` + +* 2: Run `versioneer install`. This will do the following: -* 2: add the following lines to the top of your `setup.py`, with the - configuration values you decided earlier: + * copy `versioneer.py` into the top of your source tree + * create `_version.py` in the right place (`versionfile_source`) + * modify your `__init__.py` (if one exists next to `_version.py`) to define + `__version__` (by calling a function from `_version.py`) + * modify your `MANIFEST.in` to include both `versioneer.py` and the + generated `_version.py` in sdist tarballs - import versioneer - versioneer.VCS = 'git' - versioneer.versionfile_source = 'src/myproject/_version.py' - versioneer.versionfile_build = 'myproject/_version.py' - versioneer.tag_prefix = '' # tags are like 1.2.0 - versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0' + `versioneer install` will complain about any problems it finds with your + `setup.py` or `setup.cfg`. Run it multiple times until you have fixed all + the problems. -* 3: add the following arguments to the setup() call in your setup.py: +* 3: add a `import versioneer` to your setup.py, and add the following + arguments to the setup() call: version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), -* 4: now run `setup.py versioneer`, which will create `_version.py`, and will - modify your `__init__.py` (if one exists next to `_version.py`) to define - `__version__` (by calling a function from `_version.py`). It will also - modify your `MANIFEST.in` to include both `versioneer.py` and the generated - `_version.py` in sdist tarballs. - -* 5: commit these changes to your VCS. To make sure you won't forget, - `setup.py versioneer` will mark everything it touched for addition. +* 4: commit these changes to your VCS. To make sure you won't forget, + `versioneer install` will mark everything it touched for addition using + `git add`. Don't forget to add `setup.py` and `setup.cfg` too. ## Post-Installation Usage @@ -181,9 +204,8 @@ * 1: git tag 1.0 * 2: git push; git push --tags -Currently, all version strings must be based upon a tag. Versioneer will -report "unknown" until your tree has at least one tag in its history. This -restriction will be fixed eventually (see issue #12). +Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at +least one tag in its history. ## Version-String Flavors @@ -192,64 +214,109 @@ `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. -Both functions return a dictionary with different keys for different flavors -of the version string: +Both functions return a dictionary with different flavors of version +information: + +* `['version']`: A condensed version string, rendered using the selected + style. This is the most commonly used value for the project's version + string. The default "pep440" style yields strings like `0.11`, + `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section + below for alternative styles. + +* `['full-revisionid']`: detailed revision identifier. For Git, this is the + full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". -* `['version']`: condensed tag+distance+shortid+dirty identifier. For git, - this uses the output of `git describe --tags --dirty --always` but strips - the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree - is like the "1076c97" commit but has uncommitted changes ("-dirty"), and - that this commit is two revisions ("-2-") beyond the "0.11" tag. For - released software (exactly equal to a known tag), the identifier will only - contain the stripped tag, e.g. "0.11". +* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that + this is only accurate if run in a VCS checkout, otherwise it is likely to + be False or None -* `['full']`: detailed revision identifier. For Git, this is the full SHA1 - commit id, followed by "-dirty" if the tree contains uncommitted changes, - e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty". +* `['error']`: if the version string could not be computed, this will be set + to a string describing the problem, otherwise it will be None. It may be + useful to throw an exception in setup.py if this is set, to avoid e.g. + creating tarballs with a version string of "unknown". -Some variants are more useful than others. Including `full` in a bug report -should allow developers to reconstruct the exact code being tested (or -indicate the presence of local changes that should be shared with the +Some variants are more useful than others. Including `full-revisionid` in a +bug report should allow developers to reconstruct the exact code being tested +(or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. -In the future, this will also include a -[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor -(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room -for a hash-based revision id), but is safe to use in a `setup.py` -"`version=`" argument. It also enables tools like *pip* to compare version -strings and evaluate compatibility constraint declarations. - -The `setup.py versioneer` command adds the following text to your -`__init__.py` to place a basic version in `YOURPROJECT.__version__`: +The installer adds the following text to your `__init__.py` to place a basic +version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions +## Styles + +The setup.cfg `style=` configuration controls how the VCS information is +rendered into a version string. + +The default style, "pep440", produces a PEP440-compliant string, equal to the +un-prefixed tag name for actual releases, and containing an additional "local +version" section with more detail for in-between builds. For Git, this is +TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags +--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the +tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and +that this commit is two revisions ("+2") beyond the "0.11" tag. For released +software (exactly equal to a known tag), the identifier will only contain the +stripped tag, e.g. "0.11". + +Other styles are available. See details.md in the Versioneer source tree for +descriptions. + +## Debugging + +Versioneer tries to avoid fatal errors: if something goes wrong, it will tend +to return a version of "0+unknown". To investigate the problem, run `setup.py +version`, which will run the version-lookup code in a verbose mode, and will +display the full contents of `get_versions()` (including the `error` string, +which may help identify what went wrong). + ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) -* re-run `versioneer-installer` in your source tree to replace your copy of - `versioneer.py` -* edit `setup.py`, if necessary, to include any new configuration settings +* edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes -* re-run `setup.py versioneer` to replace `SRC/_version.py` +* re-run `versioneer install` in your source tree, to replace + `SRC/_version.py` * commit any changed files -### Upgrading from 0.10 to 0.11 +### Upgrading to 0.15 -You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running -`setup.py versioneer`. This will enable the use of additional version-control -systems (SVN, etc) in the future. +Starting with this version, Versioneer is configured with a `[versioneer]` +section in your `setup.cfg` file. Earlier versions required the `setup.py` to +set attributes on the `versioneer` module immediately after import. The new +version will refuse to run (raising an exception during import) until you +have provided the necessary `setup.cfg` section. + +In addition, the Versioneer package provides an executable named +`versioneer`, and the installation process is driven by running `versioneer +install`. In 0.14 and earlier, the executable was named +`versioneer-installer` and was run without an argument. + +### Upgrading to 0.14 + +0.14 changes the format of the version string. 0.13 and earlier used +hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a +plus-separated "local version" section strings, with dot-separated +components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old +format, but should be ok with the new one. ### Upgrading from 0.11 to 0.12 Nothing special. +### Upgrading from 0.10 to 0.11 + +You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running +`setup.py setup_versioneer`. This will enable the use of additional +version-control systems (SVN, etc) in the future. + ## Future Directions This tool is designed to make it easily extended to other version-control @@ -272,27 +339,106 @@ """ -import os, sys, re, subprocess, errno -from setuptools import Command -from setuptools.command.sdist import sdist as _sdist -from setuptools.command.build_py import build_py as _build_py +from __future__ import print_function +try: + import configparser +except ImportError: + import ConfigParser as configparser +import errno +import json +import os +import re +import subprocess +import sys -# these configuration settings will be overridden by setup.py after it -# imports us -versionfile_source = None -versionfile_build = None -tag_prefix = None -parentdir_prefix = None -VCS = None + +class VersioneerConfig: + pass + + +def get_root(): + # we require that all commands are run from the project root, i.e. the + # directory that contains setup.py, setup.cfg, and versioneer.py . + root = os.path.realpath(os.path.abspath(os.getcwd())) + setup_py = os.path.join(root, "setup.py") + versioneer_py = os.path.join(root, "versioneer.py") + if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): + # allow 'python path/to/setup.py COMMAND' + root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) + setup_py = os.path.join(root, "setup.py") + versioneer_py = os.path.join(root, "versioneer.py") + if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): + err = ("Versioneer was unable to run the project root directory. " + "Versioneer requires setup.py to be executed from " + "its immediate directory (like 'python setup.py COMMAND'), " + "or in a way that lets it use sys.argv[0] to find the root " + "(like 'python path/to/setup.py COMMAND').") + raise VersioneerBadRootError(err) + try: + # Certain runtime workflows (setup.py install/develop in a setuptools + # tree) execute all dependencies in a single python process, so + # "versioneer" may be imported multiple times, and python's shared + # module-import table will cache the first one. So we can't use + # os.path.dirname(__file__), as that will find whichever + # versioneer.py was first imported, even in later projects. + me = os.path.realpath(os.path.abspath(__file__)) + if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]: + print("Warning: build in %s is using versioneer.py from %s" + % (os.path.dirname(me), versioneer_py)) + except NameError: + pass + return root + + +def get_config_from_root(root): + # This might raise EnvironmentError (if setup.cfg is missing), or + # configparser.NoSectionError (if it lacks a [versioneer] section), or + # configparser.NoOptionError (if it lacks "VCS="). See the docstring at + # the top of versioneer.py for instructions on writing your setup.cfg . + setup_cfg = os.path.join(root, "setup.cfg") + parser = configparser.SafeConfigParser() + with open(setup_cfg, "r") as f: + parser.readfp(f) + VCS = parser.get("versioneer", "VCS") # mandatory + + def get(parser, name): + if parser.has_option("versioneer", name): + return parser.get("versioneer", name) + return None + cfg = VersioneerConfig() + cfg.VCS = VCS + cfg.style = get(parser, "style") or "" + cfg.versionfile_source = get(parser, "versionfile_source") + cfg.versionfile_build = get(parser, "versionfile_build") + cfg.tag_prefix = get(parser, "tag_prefix") + cfg.parentdir_prefix = get(parser, "parentdir_prefix") + cfg.verbose = get(parser, "verbose") + return cfg + + +class NotThisMethod(Exception): + pass # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} +HANDLERS = {} + + +def register_vcs_handler(vcs, method): # decorator + def decorate(f): + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): assert isinstance(commands, list) p = None for c in commands: try: + dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr @@ -303,7 +449,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): if e.errno == errno.ENOENT: continue if verbose: - print("unable to run %s" % args[0]) + print("unable to run %s" % dispcmd) print(e) return None else: @@ -311,14 +457,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): print("unable to find command, tried %s" % (commands,)) return None stdout = p.communicate()[0].strip() - if sys.version >= '3': + if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: - print("unable to run %s (error)" % args[0]) + print("unable to run %s (error)" % dispcmd) return None return stdout - LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag @@ -327,24 +472,66 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): # that just contains the computed version number. # This file is released into the public domain. Generated by -# versioneer-0.12 (https://github.com/warner/python-versioneer) +# versioneer-0.15 (https://github.com/warner/python-versioneer) + +import errno +import os +import re +import subprocess +import sys + + +def get_keywords(): + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" + git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" + keywords = {"refnames": git_refnames, "full": git_full} + return keywords + -# these strings will be replaced by git during git-archive -git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" -git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" +class VersioneerConfig: + pass -# these strings are filled in when 'setup.py versioneer' creates _version.py -tag_prefix = "%(TAG_PREFIX)s" -parentdir_prefix = "%(PARENTDIR_PREFIX)s" -versionfile_source = "%(VERSIONFILE_SOURCE)s" -import os, sys, re, subprocess, errno +def get_config(): + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "%(STYLE)s" + cfg.tag_prefix = "%(TAG_PREFIX)s" + cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" + cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + pass + + +LONG_VERSION_PY = {} +HANDLERS = {} + + +def register_vcs_handler(vcs, method): # decorator + def decorate(f): + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): assert isinstance(commands, list) p = None for c in commands: try: + dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr @@ -355,7 +542,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): if e.errno == errno.ENOENT: continue if verbose: - print("unable to run %%s" %% args[0]) + print("unable to run %%s" %% dispcmd) print(e) return None else: @@ -363,26 +550,30 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): print("unable to find command, tried %%s" %% (commands,)) return None stdout = p.communicate()[0].strip() - if sys.version >= '3': + if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: - print("unable to run %%s (error)" %% args[0]) + print("unable to run %%s (error)" %% dispcmd) return None return stdout -def versions_from_parentdir(parentdir_prefix, root, verbose=False): +def versions_from_parentdir(parentdir_prefix, root, verbose): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: - print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %% - (root, dirname, parentdir_prefix)) - return None - return {"version": dirname[len(parentdir_prefix):], "full": ""} + print("guessing rootdir is '%%s', but '%%s' doesn't start with " + "prefix '%%s'" %% (root, dirname, parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None} + +@register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, @@ -390,7 +581,7 @@ def git_get_keywords(versionfile_abs): # _version.py. keywords = {} try: - f = open(versionfile_abs,"r") + f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) @@ -405,14 +596,16 @@ def git_get_keywords(versionfile_abs): pass return keywords -def git_versions_from_keywords(keywords, tag_prefix, verbose=False): + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): if not keywords: - return {} # keyword-finding function failed to find keywords + raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") - return {} # unexpanded, so not in an unpacked git-archive tarball + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. @@ -437,16 +630,20 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose=False): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) - return { "version": r, - "full": keywords["full"].strip() } - # no suitable tags, so we use the full revision id + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None + } + # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: - print("no suitable tags, using full revision id") - return { "version": keywords["full"].strip(), - "full": keywords["full"].strip() } + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags"} -def git_versions_from_vcs(tag_prefix, root, verbose=False): +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' keywords were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, @@ -455,55 +652,282 @@ def git_versions_from_vcs(tag_prefix, root, verbose=False): if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %%s" %% root) - return {} + raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"], - cwd=root) - if stdout is None: - return {} - if not stdout.startswith(tag_prefix): - if verbose: - print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix)) - return {} - tag = stdout[len(tag_prefix):] - stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if stdout is None: - return {} - full = stdout.strip() - if tag.endswith("-dirty"): - full += "-dirty" - return {"version": tag, "full": full} - - -def get_versions(default={"version": "unknown", "full": ""}, verbose=False): + # if there is a tag, this yields TAG-NUM-gHEX[-dirty] + # if there are no tags, this yields HEX[-dirty] (no NUM) + describe_out = run_command(GITS, ["describe", "--tags", "--dirty", + "--always", "--long"], + cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparseable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%%s'" + %% describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%%s' doesn't start with prefix '%%s'" + print(fmt %% (full_tag, tag_prefix)) + pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" + %% (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], + cwd=root) + pieces["distance"] = int(count_out) # total number of commits + + return pieces + + +def plus_or_dot(pieces): + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + # now build up version string, with post-release "local version + # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + # exceptions: + # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_pre(pieces): + # TAG[.post.devDISTANCE] . No -dirty + + # exceptions: + # 1: no tags. 0.post.devDISTANCE + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += ".post.dev%%d" %% pieces["distance"] + else: + # exception #1 + rendered = "0.post.dev%%d" %% pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that + # .dev0 sorts backwards (a dirty tree will appear "older" than the + # corresponding clean one), but you shouldn't be releasing software with + # -dirty anyways. + + # exceptions: + # 1: no tags. 0.postDISTANCE[.dev0] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%%s" %% pieces["short"] + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%%s" %% pieces["short"] + return rendered + + +def render_pep440_old(pieces): + # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. + + # exceptions: + # 1: no tags. 0.postDISTANCE[.dev0] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty + # --always' + + # exceptions: + # 1: no tags. HEX[-dirty] (note: no 'g' prefix) + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty + # --always -long'. The distance/hash is unconditional. + + # exceptions: + # 1: no tags. HEX[-dirty] (note: no 'g' prefix) + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"]} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%%s'" %% style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None} + + +def get_versions(): # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. - keywords = { "refnames": git_refnames, "full": git_full } - ver = git_versions_from_keywords(keywords, tag_prefix, verbose) - if ver: - return ver + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) + except NotThisMethod: + pass try: - root = os.path.abspath(__file__) + root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in range(len(versionfile_source.split(os.sep))): + for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: - return default + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree"} - return (git_versions_from_vcs(tag_prefix, root, verbose) - or versions_from_parentdir(parentdir_prefix, root, verbose) - or default) + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version"} ''' + +@register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, @@ -511,7 +935,7 @@ def git_get_keywords(versionfile_abs): # _version.py. keywords = {} try: - f = open(versionfile_abs,"r") + f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) @@ -526,14 +950,16 @@ def git_get_keywords(versionfile_abs): pass return keywords -def git_versions_from_keywords(keywords, tag_prefix, verbose=False): + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): if not keywords: - return {} # keyword-finding function failed to find keywords + raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") - return {} # unexpanded, so not in an unpacked git-archive tarball + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. @@ -558,16 +984,20 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose=False): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) - return { "version": r, - "full": keywords["full"].strip() } - # no suitable tags, so we use the full revision id + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None + } + # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: - print("no suitable tags, using full revision id") - return { "version": keywords["full"].strip(), - "full": keywords["full"].strip() } + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags"} -def git_versions_from_vcs(tag_prefix, root, verbose=False): +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' keywords were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, @@ -576,27 +1006,76 @@ def git_versions_from_vcs(tag_prefix, root, verbose=False): if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) - return {} + raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"], - cwd=root) - if stdout is None: - return {} - if not stdout.startswith(tag_prefix): - if verbose: - print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)) - return {} - tag = stdout[len(tag_prefix):] - stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if stdout is None: - return {} - full = stdout.strip() - if tag.endswith("-dirty"): - full += "-dirty" - return {"version": tag, "full": full} + # if there is a tag, this yields TAG-NUM-gHEX[-dirty] + # if there are no tags, this yields HEX[-dirty] (no NUM) + describe_out = run_command(GITS, ["describe", "--tags", "--dirty", + "--always", "--long"], + cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparseable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], + cwd=root) + pieces["distance"] = int(count_out) # total number of commits + + return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): @@ -631,183 +1110,462 @@ def do_vcs_install(manifest_in, versionfile_source, ipy): files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) -def versions_from_parentdir(parentdir_prefix, root, verbose=False): + +def versions_from_parentdir(parentdir_prefix, root, verbose): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: - print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % - (root, dirname, parentdir_prefix)) - return None - return {"version": dirname[len(parentdir_prefix):], "full": ""} + print("guessing rootdir is '%s', but '%s' doesn't start with " + "prefix '%s'" % (root, dirname, parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None} SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.12) from +# This file was generated by 'versioneer.py' (0.15) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. -version_version = '%(version)s' -version_full = '%(full)s' -def get_versions(default={}, verbose=False): - return {'version': version_version, 'full': version_full} +import json +import sys + +version_json = ''' +%s +''' # END VERSION_JSON + +def get_versions(): + return json.loads(version_json) """ -DEFAULT = {"version": "unknown", "full": "unknown"} def versions_from_file(filename): - versions = {} try: with open(filename) as f: - for line in f.readlines(): - mo = re.match("version_version = '([^']+)'", line) - if mo: - versions["version"] = mo.group(1) - mo = re.match("version_full = '([^']+)'", line) - if mo: - versions["full"] = mo.group(1) + contents = f.read() except EnvironmentError: - return {} + raise NotThisMethod("unable to read _version.py") + mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", + contents, re.M | re.S) + if not mo: + raise NotThisMethod("no version_json in _version.py") + return json.loads(mo.group(1)) - return versions def write_to_version_file(filename, versions): + os.unlink(filename) + contents = json.dumps(versions, sort_keys=True, + indent=1, separators=(",", ": ")) with open(filename, "w") as f: - f.write(SHORT_VERSION_PY % versions) + f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) -def get_root(): - try: - return os.path.dirname(os.path.abspath(__file__)) - except NameError: - return os.path.dirname(os.path.abspath(sys.argv[0])) +def plus_or_dot(pieces): + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + # now build up version string, with post-release "local version + # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + # exceptions: + # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_pre(pieces): + # TAG[.post.devDISTANCE] . No -dirty + + # exceptions: + # 1: no tags. 0.post.devDISTANCE + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += ".post.dev%d" % pieces["distance"] + else: + # exception #1 + rendered = "0.post.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that + # .dev0 sorts backwards (a dirty tree will appear "older" than the + # corresponding clean one), but you shouldn't be releasing software with + # -dirty anyways. + + # exceptions: + # 1: no tags. 0.postDISTANCE[.dev0] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_old(pieces): + # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. + + # exceptions: + # 1: no tags. 0.postDISTANCE[.dev0] + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + -def vcs_function(vcs, suffix): - return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None) +def render_git_describe(pieces): + # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty + # --always' -def get_versions(default=DEFAULT, verbose=False): + # exceptions: + # 1: no tags. HEX[-dirty] (note: no 'g' prefix) + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty + # --always -long'. The distance/hash is unconditional. + + # exceptions: + # 1: no tags. HEX[-dirty] (note: no 'g' prefix) + + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"]} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None} + + +class VersioneerBadRootError(Exception): + pass + + +def get_versions(verbose=False): # returns dict with two keys: 'version' and 'full' - assert versionfile_source is not None, "please set versioneer.versionfile_source" - assert tag_prefix is not None, "please set versioneer.tag_prefix" - assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix" - assert VCS is not None, "please set versioneer.VCS" - - # I am in versioneer.py, which must live at the top of the source tree, - # which we use to compute the root directory. py2exe/bbfreeze/non-CPython - # don't have __file__, in which case we fall back to sys.argv[0] (which - # ought to be the setup.py script). We prefer __file__ since that's more - # robust in cases where setup.py was invoked in some weird way (e.g. pip) + + if "versioneer" in sys.modules: + # see the discussion in cmdclass.py:get_cmdclass() + del sys.modules["versioneer"] + root = get_root() - versionfile_abs = os.path.join(root, versionfile_source) + cfg = get_config_from_root(root) + + assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" + handlers = HANDLERS.get(cfg.VCS) + assert handlers, "unrecognized VCS '%s'" % cfg.VCS + verbose = verbose or cfg.verbose + assert cfg.versionfile_source is not None, \ + "please set versioneer.versionfile_source" + assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" - # extract version from first of _version.py, VCS command (e.g. 'git + versionfile_abs = os.path.join(root, cfg.versionfile_source) + + # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. - get_keywords_f = vcs_function(VCS, "get_keywords") - versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords") - if get_keywords_f and versions_from_keywords_f: - vcs_keywords = get_keywords_f(versionfile_abs) - ver = versions_from_keywords_f(vcs_keywords, tag_prefix) - if ver: - if verbose: print("got version from expanded keyword %s" % ver) + get_keywords_f = handlers.get("get_keywords") + from_keywords_f = handlers.get("keywords") + if get_keywords_f and from_keywords_f: + try: + keywords = get_keywords_f(versionfile_abs) + ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) + if verbose: + print("got version from expanded keyword %s" % ver) return ver + except NotThisMethod: + pass - ver = versions_from_file(versionfile_abs) - if ver: - if verbose: print("got version from file %s %s" % (versionfile_abs,ver)) + try: + ver = versions_from_file(versionfile_abs) + if verbose: + print("got version from file %s %s" % (versionfile_abs, ver)) return ver + except NotThisMethod: + pass + + from_vcs_f = handlers.get("pieces_from_vcs") + if from_vcs_f: + try: + pieces = from_vcs_f(cfg.tag_prefix, root, verbose) + ver = render(pieces, cfg.style) + if verbose: + print("got version from VCS %s" % ver) + return ver + except NotThisMethod: + pass - versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs") - if versions_from_vcs_f: - ver = versions_from_vcs_f(tag_prefix, root, verbose) - if ver: - if verbose: print("got version from VCS %s" % ver) + try: + if cfg.parentdir_prefix: + ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + if verbose: + print("got version from parentdir %s" % ver) return ver + except NotThisMethod: + pass - ver = versions_from_parentdir(parentdir_prefix, root, verbose) - if ver: - if verbose: print("got version from parentdir %s" % ver) - return ver + if verbose: + print("unable to compute version") - if verbose: print("got version from default %s" % default) - return default + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, "error": "unable to compute version"} -def get_version(verbose=False): - return get_versions(verbose=verbose)["version"] -class cmd_version(Command): - description = "report generated version string" - user_options = [] - boolean_options = [] - def initialize_options(self): - pass - def finalize_options(self): - pass - def run(self): - ver = get_version(verbose=True) - print("Version is currently: %s" % ver) - - -class cmd_build_py(_build_py): - def run(self): - versions = get_versions(verbose=True) - _build_py.run(self) - # now locate _version.py in the new build/ directory and replace it - # with an updated value - if versionfile_build: - target_versionfile = os.path.join(self.build_lib, versionfile_build) - print("UPDATING %s" % target_versionfile) - os.unlink(target_versionfile) - with open(target_versionfile, "w") as f: - f.write(SHORT_VERSION_PY % versions) +def get_version(): + return get_versions()["version"] -if 'cx_Freeze' in sys.modules: # cx_freeze enabled? - from cx_Freeze.dist import build_exe as _build_exe - class cmd_build_exe(_build_exe): +def get_cmdclass(): + if "versioneer" in sys.modules: + del sys.modules["versioneer"] + # this fixes the "python setup.py develop" case (also 'install' and + # 'easy_install .'), in which subdependencies of the main project are + # built (using setup.py bdist_egg) in the same python process. Assume + # a main project A and a dependency B, which use different versions + # of Versioneer. A's setup.py imports A's Versioneer, leaving it in + # sys.modules by the time B's setup.py is executed, causing B to run + # with the wrong versioneer. Setuptools wraps the sub-dep builds in a + # sandbox that restores sys.modules to it's pre-build state, so the + # parent is protected against the child's "import versioneer". By + # removing ourselves from sys.modules here, before the child build + # happens, we protect the child from the parent's versioneer too. + # Also see https://github.com/warner/python-versioneer/issues/52 + + cmds = {} + + # we add "version" to both distutils and setuptools + from distutils.core import Command + + class cmd_version(Command): + description = "report generated version string" + user_options = [] + boolean_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + vers = get_versions(verbose=True) + print("Version: %s" % vers["version"]) + print(" full-revisionid: %s" % vers.get("full-revisionid")) + print(" dirty: %s" % vers.get("dirty")) + if vers["error"]: + print(" error: %s" % vers["error"]) + cmds["version"] = cmd_version + + # we override "build_py" in both distutils and setuptools + # + # most invocation pathways end up running build_py: + # distutils/build -> build_py + # distutils/install -> distutils/build ->.. + # setuptools/bdist_wheel -> distutils/install ->.. + # setuptools/bdist_egg -> distutils/install_lib -> build_py + # setuptools/install -> bdist_egg ->.. + # setuptools/develop -> ? + + from distutils.command.build_py import build_py as _build_py + + class cmd_build_py(_build_py): + def run(self): + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + _build_py.run(self) + # now locate _version.py in the new build/ directory and replace + # it with an updated value + if cfg.versionfile_build: + target_versionfile = os.path.join(self.build_lib, + cfg.versionfile_build) + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + cmds["build_py"] = cmd_build_py + + if "cx_Freeze" in sys.modules: # cx_freeze enabled? + from cx_Freeze.dist import build_exe as _build_exe + + class cmd_build_exe(_build_exe): + def run(self): + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + target_versionfile = cfg.versionfile_source + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + + _build_exe.run(self) + os.unlink(target_versionfile) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % + {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + cmds["build_exe"] = cmd_build_exe + del cmds["build_py"] + + # we override different "sdist" commands for both environments + if "setuptools" in sys.modules: + from setuptools.command.sdist import sdist as _sdist + else: + from distutils.command.sdist import sdist as _sdist + + class cmd_sdist(_sdist): def run(self): - versions = get_versions(verbose=True) - target_versionfile = versionfile_source + versions = get_versions() + self._versioneer_generated_versions = versions + # unless we update this, the command will keep using the old + # version + self.distribution.metadata.version = versions["version"] + return _sdist.run(self) + + def make_release_tree(self, base_dir, files): + root = get_root() + cfg = get_config_from_root(root) + _sdist.make_release_tree(self, base_dir, files) + # now locate _version.py in the new base_dir directory + # (remembering that it may be a hardlink) and replace it with an + # updated value + target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) - os.unlink(target_versionfile) - with open(target_versionfile, "w") as f: - f.write(SHORT_VERSION_PY % versions) - - _build_exe.run(self) - os.unlink(target_versionfile) - with open(versionfile_source, "w") as f: - assert VCS is not None, "please set versioneer.VCS" - LONG = LONG_VERSION_PY[VCS] - f.write(LONG % {"DOLLAR": "$", - "TAG_PREFIX": tag_prefix, - "PARENTDIR_PREFIX": parentdir_prefix, - "VERSIONFILE_SOURCE": versionfile_source, - }) - -class cmd_sdist(_sdist): - def run(self): - versions = get_versions(verbose=True) - self._versioneer_generated_versions = versions - # unless we update this, the command will keep using the old version - self.distribution.metadata.version = versions["version"] - return _sdist.run(self) - - def make_release_tree(self, base_dir, files): - _sdist.make_release_tree(self, base_dir, files) - # now locate _version.py in the new base_dir directory (remembering - # that it may be a hardlink) and replace it with an updated value - target_versionfile = os.path.join(base_dir, versionfile_source) - print("UPDATING %s" % target_versionfile) - os.unlink(target_versionfile) - with open(target_versionfile, "w") as f: - f.write(SHORT_VERSION_PY % self._versioneer_generated_versions) + write_to_version_file(target_versionfile, + self._versioneer_generated_versions) + cmds["sdist"] = cmd_sdist + + return cmds + + +CONFIG_ERROR = """ +setup.cfg is missing the necessary Versioneer configuration. You need +a section like: + + [versioneer] + VCS = git + style = pep440 + versionfile_source = src/myproject/_version.py + versionfile_build = myproject/_version.py + tag_prefix = "" + parentdir_prefix = myproject- + +You will also need to edit your setup.py to use the results: + + import versioneer + setup(version=versioneer.get_version(), + cmdclass=versioneer.get_cmdclass(), ...) + +Please read the docstring in ./versioneer.py for configuration instructions, +edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. +""" + +SAMPLE_CONFIG = """ +# See the docstring in versioneer.py for instructions. Note that you must +# re-run 'versioneer.py setup' after changing this section, and commit the +# resulting files. + +[versioneer] +#VCS = git +#style = pep440 +#versionfile_source = +#versionfile_build = +#tag_prefix = +#parentdir_prefix = + +""" INIT_PY_SNIPPET = """ from ._version import get_versions @@ -815,87 +1573,127 @@ def make_release_tree(self, base_dir, files): del get_versions """ -class cmd_update_files(Command): - description = "install/upgrade Versioneer files: __init__.py SRC/_version.py" - user_options = [] - boolean_options = [] - def initialize_options(self): - pass - def finalize_options(self): - pass - def run(self): - print(" creating %s" % versionfile_source) - with open(versionfile_source, "w") as f: - assert VCS is not None, "please set versioneer.VCS" - LONG = LONG_VERSION_PY[VCS] - f.write(LONG % {"DOLLAR": "$", - "TAG_PREFIX": tag_prefix, - "PARENTDIR_PREFIX": parentdir_prefix, - "VERSIONFILE_SOURCE": versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py") - if os.path.exists(ipy): - try: - with open(ipy, "r") as f: - old = f.read() - except EnvironmentError: - old = "" - if INIT_PY_SNIPPET not in old: - print(" appending to %s" % ipy) - with open(ipy, "a") as f: - f.write(INIT_PY_SNIPPET) - else: - print(" %s unmodified" % ipy) - else: - print(" %s doesn't exist, ok" % ipy) - ipy = None - - # Make sure both the top-level "versioneer.py" and versionfile_source - # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so - # they'll be copied into source distributions. Pip won't be able to - # install the package without this. - manifest_in = os.path.join(get_root(), "MANIFEST.in") - simple_includes = set() + +def do_setup(): + root = get_root() + try: + cfg = get_config_from_root(root) + except (EnvironmentError, configparser.NoSectionError, + configparser.NoOptionError) as e: + if isinstance(e, (EnvironmentError, configparser.NoSectionError)): + print("Adding sample versioneer config to setup.cfg", + file=sys.stderr) + with open(os.path.join(root, "setup.cfg"), "a") as f: + f.write(SAMPLE_CONFIG) + print(CONFIG_ERROR, file=sys.stderr) + return 1 + + print(" creating %s" % cfg.versionfile_source) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + + ipy = os.path.join(os.path.dirname(cfg.versionfile_source), + "__init__.py") + if os.path.exists(ipy): try: - with open(manifest_in, "r") as f: - for line in f: - if line.startswith("include "): - for include in line.split()[1:]: - simple_includes.add(include) + with open(ipy, "r") as f: + old = f.read() except EnvironmentError: - pass - # That doesn't cover everything MANIFEST.in can do - # (http://docs.python.org/2/distutils/sourcedist.html#commands), so - # it might give some false negatives. Appending redundant 'include' - # lines is safe, though. - if "versioneer.py" not in simple_includes: - print(" appending 'versioneer.py' to MANIFEST.in") - with open(manifest_in, "a") as f: - f.write("include versioneer.py\n") - else: - print(" 'versioneer.py' already in MANIFEST.in") - if versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - versionfile_source) - with open(manifest_in, "a") as f: - f.write("include %s\n" % versionfile_source) + old = "" + if INIT_PY_SNIPPET not in old: + print(" appending to %s" % ipy) + with open(ipy, "a") as f: + f.write(INIT_PY_SNIPPET) else: - print(" versionfile_source already in MANIFEST.in") + print(" %s unmodified" % ipy) + else: + print(" %s doesn't exist, ok" % ipy) + ipy = None + + # Make sure both the top-level "versioneer.py" and versionfile_source + # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so + # they'll be copied into source distributions. Pip won't be able to + # install the package without this. + manifest_in = os.path.join(root, "MANIFEST.in") + simple_includes = set() + try: + with open(manifest_in, "r") as f: + for line in f: + if line.startswith("include "): + for include in line.split()[1:]: + simple_includes.add(include) + except EnvironmentError: + pass + # That doesn't cover everything MANIFEST.in can do + # (http://docs.python.org/2/distutils/sourcedist.html#commands), so + # it might give some false negatives. Appending redundant 'include' + # lines is safe, though. + if "versioneer.py" not in simple_includes: + print(" appending 'versioneer.py' to MANIFEST.in") + with open(manifest_in, "a") as f: + f.write("include versioneer.py\n") + else: + print(" 'versioneer.py' already in MANIFEST.in") + if cfg.versionfile_source not in simple_includes: + print(" appending versionfile_source ('%s') to MANIFEST.in" % + cfg.versionfile_source) + with open(manifest_in, "a") as f: + f.write("include %s\n" % cfg.versionfile_source) + else: + print(" versionfile_source already in MANIFEST.in") - # Make VCS-specific changes. For git, this means creating/changing - # .gitattributes to mark _version.py for export-time keyword - # substitution. - do_vcs_install(manifest_in, versionfile_source, ipy) + # Make VCS-specific changes. For git, this means creating/changing + # .gitattributes to mark _version.py for export-time keyword + # substitution. + do_vcs_install(manifest_in, cfg.versionfile_source, ipy) + return 0 -def get_cmdclass(): - cmds = {'version': cmd_version, - 'versioneer': cmd_update_files, - 'build_py': cmd_build_py, - 'sdist': cmd_sdist, - } - if 'cx_Freeze' in sys.modules: # cx_freeze enabled? - cmds['build_exe'] = cmd_build_exe - del cmds['build'] - return cmds +def scan_setup_py(): + found = set() + setters = False + errors = 0 + with open("setup.py", "r") as f: + for line in f.readlines(): + if "import versioneer" in line: + found.add("import") + if "versioneer.get_cmdclass()" in line: + found.add("cmdclass") + if "versioneer.get_version()" in line: + found.add("get_version") + if "versioneer.VCS" in line: + setters = True + if "versioneer.versionfile_source" in line: + setters = True + if len(found) != 3: + print("") + print("Your setup.py appears to be missing some important items") + print("(but I might be wrong). Please make sure it has something") + print("roughly like the following:") + print("") + print(" import versioneer") + print(" setup( version=versioneer.get_version(),") + print(" cmdclass=versioneer.get_cmdclass(), ...)") + print("") + errors += 1 + if setters: + print("You should remove lines like 'versioneer.VCS = ' and") + print("'versioneer.versionfile_source = ' . This configuration") + print("now lives in setup.cfg, and should be removed from setup.py") + print("") + errors += 1 + return errors + +if __name__ == "__main__": + cmd = sys.argv[1] + if cmd == "setup": + errors = do_setup() + errors += scan_setup_py() + if errors: + sys.exit(1)