Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[flake8]
exclude =
.git,
__pycache__,
build,
dist,
versioneer.py,
pyxrf/_version.py,
docs/conf.py
# There are some errors produced by 'black', therefore unavoidable
ignore = E203, W503
max-line-length = 115
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@ history.sqlite
pid
security
static
*.tgz
backups*
*.remove
*.bak

# https://github.com/bluesky/databroker/blob/master/.gitignore

Expand Down
11 changes: 11 additions & 0 deletions profile_/Users/gbischof/dama/profile_collection_pta/startup/README
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
This is the IPython startup directory

.py and .ipy files in this directory will be run *prior* to any code or files specified
via the exec_lines or exec_files configurables whenever you load this profile.

Files will be run in lexicographical order, so you can control the execution order of files
with a prefix, e.g.::

00-first.py
50-middle.py
99-last.ipy
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ line-length = 115
target-version = ['py37']
include = '\.pyi?$'
exclude = '''

(
/(
\.eggs # exclude a few common directories in the
Expand Down
15 changes: 15 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
appdirs==1.4.4
bluesky==1.10.0
caproto==1.0.0
ipython==8.9.0
lmfit==1.1.0
msgpack_numpy==0.4.8
msgpack_python==0.5.6
nslsii==0.9.0
ophyd==1.7.0
pandas==1.5.3
pyOlog==4.5.0
pyepics
requests==2.28.2
scipy==1.10.0
zict==2.2.0
99 changes: 1 addition & 98 deletions startup/.cms_config

Large diffs are not rendered by default.

200 changes: 25 additions & 175 deletions startup/00-startup.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,29 @@
# import logging
# import caproto
# handler = logging.FileHandler('pilatus-trigger-log.txt')
# from caproto._log import LogFormatter, color_log_format, log_date_format
# handler.setFormatter(
# LogFormatter(color_log_format, datefmt=log_date_format))
# caproto_log = logging.getLogger('caproto')
# caproto_log.handlers.clear()
# caproto_log.addHandler(handler)
# logging.getLogger('caproto.ch').setLevel('DEBUG')
import nslsii

nslsii.configure_base(get_ipython().user_ns, "cms", publish_documents_with_kafka=True)
print(f"Loading {__file__!r} ...")

import nslsii
import os
from bluesky.magics import BlueskyMagics
from bluesky.preprocessors import pchain
from bluesky.utils import PersistentDict
from pyOlog.ophyd_tools import *

# At the end of every run, verify that files were saved and
# print a confirmation message.
from bluesky.callbacks.broker import verify_files_saved


# Added this variable temporarily to bypass some code that doesn't run without the beamline.
# This can be set when starting bsui like this: `BS_MODE=test bsui`

testing = os.environ.get("BS_MODE", False) == "test"

if testing:
nslsii.configure_base(get_ipython().user_ns, "temp", publish_documents_with_kafka=False)
else:
nslsii.configure_base(get_ipython().user_ns, "cms", publish_documents_with_kafka=True)

# RE.subscribe(post_run(verify_files_saved), 'stop')

from pyOlog.ophyd_tools import *

# Uncomment the following lines to turn on verbose messages for
# debugging.
Expand All @@ -36,169 +38,17 @@
#
# RE.subscribe(print_scan_ids, 'start')

# - HACK #1 - patch EpicsSignal.get to retry when timeouts happen stolen from HXN
import ophyd


def _epicssignal_get(self, *, as_string=None, connection_timeout=1.0, **kwargs):
"""Get the readback value through an explicit call to EPICS
Parameters
----------
count : int, optional
Explicitly limit count for array data
as_string : bool, optional
Get a string representation of the value, defaults to as_string
from this signal, optional
as_numpy : bool
Use numpy array as the return type for array data.
timeout : float, optional
maximum time to wait for value to be received.
(default = 0.5 + log10(count) seconds)
use_monitor : bool, optional
to use value from latest monitor callback or to make an
explicit CA call for the value. (default: True)
connection_timeout : float, optional
If not already connected, allow up to `connection_timeout` seconds
for the connection to complete.
"""
if as_string is None:
as_string = self._string

with self._metadata_lock:
if not self._read_pv.connected:
if not self._read_pv.wait_for_connection(connection_timeout):
raise TimeoutError("Failed to connect to %s" % self._read_pv.pvname)

ret = None
attempts = 0
max_attempts = 4
while ret is None and attempts < max_attempts:
attempts += 1
ret = self._read_pv.get(as_string=as_string, **kwargs)
if ret is None:
print(f"*** PV GET TIMED OUT {self._read_pv.pvname} *** attempt #{attempts}/{max_attempts}")
if ret is None:
print(f"*** PV GET TIMED OUT {self._read_pv.pvname} *** return `None` as value :(")
# TODO we really want to raise TimeoutError here, but that may cause more
# issues in the codebase than we have the time to fix...
# If this causes issues, remove it to keep the old functionality...
raise TimeoutError("Failed to get %s after %d attempts" % (self._read_pv.pvname, attempts))
if attempts > 1:
print(f"*** PV GET succeeded {self._read_pv.pvname} on attempt #{attempts}")

if as_string:
return ophyd.signal.waveform_to_string(ret)

return ret


from ophyd import EpicsSignal
from ophyd import EpicsSignalRO

# from ophyd import EpicsSignalBase

from ophyd.areadetector import EpicsSignalWithRBV

# Increase the timeout for EpicsSignal.get()
# This beamline was occasionally getting ReadTimeoutErrors
# EpicsSignal.set_defaults(timeout=10)
# EpicsSignalRO.set_defaults(timeout=10)
ophyd.signal.EpicsSignalBase.set_defaults(timeout=15)


# We have commented this because we would like to identify the PVs that are causing problems.
# Then the controls group can investigate why it is not working as expected.
# Increasing the get() timeout argument is the prefered way to work around this problem.
# EpicsSignal.get = _epicssignal_get
# EpicsSignalRO.get = _epicssignal_get
# EpicsSignalWithRBV.get = _epicssignal_get

from pathlib import Path

import appdirs


try:
from bluesky.utils import PersistentDict
except ImportError:
import msgpack
import msgpack_numpy
import zict

class PersistentDict(zict.Func):
"""
A MutableMapping which syncs it contents to disk.
The contents are stored as msgpack-serialized files, with one file per item
in the mapping.
Note that when an item is *mutated* it is not immediately synced:
>>> d['sample'] = {"color": "red"} # immediately synced
>>> d['sample']['shape'] = 'bar' # not immediately synced
but that the full contents are synced to disk when the PersistentDict
instance is garbage collected.
"""

def __init__(self, directory):
self._directory = directory
self._file = zict.File(directory)
self._cache = {}
super().__init__(self._dump, self._load, self._file)
self.reload()

# Similar to flush() or _do_update(), but without reference to self
# to avoid circular reference preventing collection.
# NOTE: This still doesn't guarantee call on delete or gc.collect()!
# Explicitly call flush() if immediate write to disk required.
def finalize(zfile, cache, dump):
zfile.update((k, dump(v)) for k, v in cache.items())

import weakref

self._finalizer = weakref.finalize(self, finalize, self._file, self._cache, PersistentDict._dump)

@property
def directory(self):
return self._directory

def __setitem__(self, key, value):
self._cache[key] = value
super().__setitem__(key, value)

def __getitem__(self, key):
return self._cache[key]

def __delitem__(self, key):
del self._cache[key]
super().__delitem__(key)

def __repr__(self):
return f"<{self.__class__.__name__} {dict(self)!r}>"

@staticmethod
def _dump(obj):
"Encode as msgpack using numpy-aware encoder."
# See https://github.com/msgpack/msgpack-python#string-and-binary-type
# for more on use_bin_type.
return msgpack.packb(obj, default=msgpack_numpy.encode, use_bin_type=True)

@staticmethod
def _load(file):
return msgpack.unpackb(file, object_hook=msgpack_numpy.decode, raw=False)

def flush(self):
"""Force a write of the current state to disk"""
for k, v in self.items():
super().__setitem__(k, v)

def reload(self):
"""Force a reload from disk, overwriting current cache"""
self._cache = dict(super().items())


# runengine_metadata_dir = appdirs.user_data_dir(appname="bluesky") / Path("runengine-metadata")
runengine_metadata_dir = "/nsls2/data/cms/shared/config/runengine-metadata"
if testing:
runengine_metadata_dir = "/tmp/runingine-metadata"
else:
runengine_metadata_dir = "/nsls2/data/cms/shared/config/runengine-metadata"


# PersistentDict will create the directory if it does not exist
RE.md = PersistentDict(runengine_metadata_dir)

print("a new version of bsui")
print("sth is happening")
# The following plan stubs are automatically imported in global namespace by 'nslsii.configure_base',
# but have signatures that are not compatible with the Queue Server. They should not exist in the global
# namespace, but can be accessed as 'bps.one_1d_step' etc. from other plans.
del one_1d_step, one_nd_step, one_shot
28 changes: 7 additions & 21 deletions startup/01-ad33_tmp.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
from ophyd.areadetector.base import (
ADComponent as C,
ad_group,
EpicsSignalWithRBV as SignalWithRBV,
)
print(f"Loading {__file__!r} ...")

from ophyd.areadetector.base import ADComponent as C, ad_group, EpicsSignalWithRBV as SignalWithRBV
from ophyd.areadetector.plugins import PluginBase
from ophyd.device import DynamicDeviceComponent as DDC, Staged
from ophyd.signal import EpicsSignalRO, EpicsSignal
Expand Down Expand Up @@ -74,26 +72,17 @@ class StatsPluginV33(PluginBase):
)
net = C(EpicsSignalRO, "Net_RBV")
profile_average = DDC(
ad_group(
EpicsSignalRO,
(("x", "ProfileAverageX_RBV"), ("y", "ProfileAverageY_RBV")),
),
ad_group(EpicsSignalRO, (("x", "ProfileAverageX_RBV"), ("y", "ProfileAverageY_RBV"))),
doc="Profile average in XY",
default_read_attrs=("x", "y"),
)
profile_centroid = DDC(
ad_group(
EpicsSignalRO,
(("x", "ProfileCentroidX_RBV"), ("y", "ProfileCentroidY_RBV")),
),
ad_group(EpicsSignalRO, (("x", "ProfileCentroidX_RBV"), ("y", "ProfileCentroidY_RBV"))),
doc="Profile centroid in XY",
default_read_attrs=("x", "y"),
)
profile_cursor = DDC(
ad_group(
EpicsSignalRO,
(("x", "ProfileCursorX_RBV"), ("y", "ProfileCursorY_RBV")),
),
ad_group(EpicsSignalRO, (("x", "ProfileCursorX_RBV"), ("y", "ProfileCursorY_RBV"))),
doc="Profile cursor in XY",
default_read_attrs=("x", "y"),
)
Expand All @@ -103,10 +92,7 @@ class StatsPluginV33(PluginBase):
default_read_attrs=("x", "y"),
)
profile_threshold = DDC(
ad_group(
EpicsSignalRO,
(("x", "ProfileThresholdX_RBV"), ("y", "ProfileThresholdY_RBV")),
),
ad_group(EpicsSignalRO, (("x", "ProfileThresholdX_RBV"), ("y", "ProfileThresholdY_RBV"))),
doc="Profile threshold in XY",
default_read_attrs=("x", "y"),
)
Expand Down
Loading